diff options
469 files changed, 19442 insertions, 10689 deletions
diff --git a/Documentation/SubmittingPatches b/Documentation/SubmittingPatches index dcadffcab2dc..0a523c9a5ff4 100644 --- a/Documentation/SubmittingPatches +++ b/Documentation/SubmittingPatches | |||
@@ -84,18 +84,42 @@ is another popular alternative. | |||
84 | 84 | ||
85 | 2) Describe your changes. | 85 | 2) Describe your changes. |
86 | 86 | ||
87 | Describe the technical detail of the change(s) your patch includes. | 87 | Describe your problem. Whether your patch is a one-line bug fix or |
88 | 88 | 5000 lines of a new feature, there must be an underlying problem that | |
89 | Be as specific as possible. The WORST descriptions possible include | 89 | motivated you to do this work. Convince the reviewer that there is a |
90 | things like "update driver X", "bug fix for driver X", or "this patch | 90 | problem worth fixing and that it makes sense for them to read past the |
91 | includes updates for subsystem X. Please apply." | 91 | first paragraph. |
92 | |||
93 | Describe user-visible impact. Straight up crashes and lockups are | ||
94 | pretty convincing, but not all bugs are that blatant. Even if the | ||
95 | problem was spotted during code review, describe the impact you think | ||
96 | it can have on users. Keep in mind that the majority of Linux | ||
97 | installations run kernels from secondary stable trees or | ||
98 | vendor/product-specific trees that cherry-pick only specific patches | ||
99 | from upstream, so include anything that could help route your change | ||
100 | downstream: provoking circumstances, excerpts from dmesg, crash | ||
101 | descriptions, performance regressions, latency spikes, lockups, etc. | ||
102 | |||
103 | Quantify optimizations and trade-offs. If you claim improvements in | ||
104 | performance, memory consumption, stack footprint, or binary size, | ||
105 | include numbers that back them up. But also describe non-obvious | ||
106 | costs. Optimizations usually aren't free but trade-offs between CPU, | ||
107 | memory, and readability; or, when it comes to heuristics, between | ||
108 | different workloads. Describe the expected downsides of your | ||
109 | optimization so that the reviewer can weigh costs against benefits. | ||
110 | |||
111 | Once the problem is established, describe what you are actually doing | ||
112 | about it in technical detail. It's important to describe the change | ||
113 | in plain English for the reviewer to verify that the code is behaving | ||
114 | as you intend it to. | ||
92 | 115 | ||
93 | The maintainer will thank you if you write your patch description in a | 116 | The maintainer will thank you if you write your patch description in a |
94 | form which can be easily pulled into Linux's source code management | 117 | form which can be easily pulled into Linux's source code management |
95 | system, git, as a "commit log". See #15, below. | 118 | system, git, as a "commit log". See #15, below. |
96 | 119 | ||
97 | If your description starts to get long, that's a sign that you probably | 120 | Solve only one problem per patch. If your description starts to get |
98 | need to split up your patch. See #3, next. | 121 | long, that's a sign that you probably need to split up your patch. |
122 | See #3, next. | ||
99 | 123 | ||
100 | When you submit or resubmit a patch or patch series, include the | 124 | When you submit or resubmit a patch or patch series, include the |
101 | complete patch description and justification for it. Don't just | 125 | complete patch description and justification for it. Don't just |
diff --git a/Documentation/devicetree/bindings/i2c/i2c-efm32.txt b/Documentation/devicetree/bindings/i2c/i2c-efm32.txt index fc15ac519437..50b25c3da186 100644 --- a/Documentation/devicetree/bindings/i2c/i2c-efm32.txt +++ b/Documentation/devicetree/bindings/i2c/i2c-efm32.txt | |||
@@ -10,7 +10,7 @@ Required properties : | |||
10 | Recommended properties : | 10 | Recommended properties : |
11 | 11 | ||
12 | - clock-frequency : maximal I2C bus clock frequency in Hz. | 12 | - clock-frequency : maximal I2C bus clock frequency in Hz. |
13 | - efm32,location : Decides the location of the USART I/O pins. | 13 | - energymicro,location : Decides the location of the USART I/O pins. |
14 | Allowed range : [0 .. 6] | 14 | Allowed range : [0 .. 6] |
15 | 15 | ||
16 | Example: | 16 | Example: |
@@ -23,7 +23,7 @@ Example: | |||
23 | clocks = <&cmu clk_HFPERCLKI2C0>; | 23 | clocks = <&cmu clk_HFPERCLKI2C0>; |
24 | clock-frequency = <100000>; | 24 | clock-frequency = <100000>; |
25 | status = "ok"; | 25 | status = "ok"; |
26 | efm32,location = <3>; | 26 | energymicro,location = <3>; |
27 | 27 | ||
28 | eeprom@50 { | 28 | eeprom@50 { |
29 | compatible = "microchip,24c02"; | 29 | compatible = "microchip,24c02"; |
diff --git a/Documentation/filesystems/cifs/AUTHORS b/Documentation/filesystems/cifs/AUTHORS index ca4a67a0bb1e..c98800df677f 100644 --- a/Documentation/filesystems/cifs/AUTHORS +++ b/Documentation/filesystems/cifs/AUTHORS | |||
@@ -40,6 +40,7 @@ Gunter Kukkukk (testing and suggestions for support of old servers) | |||
40 | Igor Mammedov (DFS support) | 40 | Igor Mammedov (DFS support) |
41 | Jeff Layton (many, many fixes, as well as great work on the cifs Kerberos code) | 41 | Jeff Layton (many, many fixes, as well as great work on the cifs Kerberos code) |
42 | Scott Lovenberg | 42 | Scott Lovenberg |
43 | Pavel Shilovsky (for great work adding SMB2 support, and various SMB3 features) | ||
43 | 44 | ||
44 | Test case and Bug Report contributors | 45 | Test case and Bug Report contributors |
45 | ------------------------------------- | 46 | ------------------------------------- |
diff --git a/Documentation/filesystems/cifs/TODO b/Documentation/filesystems/cifs/TODO index 355abcdcda98..066ffddc3964 100644 --- a/Documentation/filesystems/cifs/TODO +++ b/Documentation/filesystems/cifs/TODO | |||
@@ -1,4 +1,4 @@ | |||
1 | Version 1.53 May 20, 2008 | 1 | Version 2.03 August 1, 2014 |
2 | 2 | ||
3 | A Partial List of Missing Features | 3 | A Partial List of Missing Features |
4 | ================================== | 4 | ================================== |
@@ -7,63 +7,49 @@ Contributions are welcome. There are plenty of opportunities | |||
7 | for visible, important contributions to this module. Here | 7 | for visible, important contributions to this module. Here |
8 | is a partial list of the known problems and missing features: | 8 | is a partial list of the known problems and missing features: |
9 | 9 | ||
10 | a) Support for SecurityDescriptors(Windows/CIFS ACLs) for chmod/chgrp/chown | 10 | a) SMB3 (and SMB3.02) missing optional features: |
11 | so that these operations can be supported to Windows servers | 11 | - RDMA |
12 | - multichannel (started) | ||
13 | - directory leases (improved metadata caching) | ||
14 | - T10 copy offload (copy chunk is only mechanism supported) | ||
15 | - encrypted shares | ||
12 | 16 | ||
13 | b) Mapping POSIX ACLs (and eventually NFSv4 ACLs) to CIFS | 17 | b) improved sparse file support |
14 | SecurityDescriptors | ||
15 | 18 | ||
16 | c) Better pam/winbind integration (e.g. to handle uid mapping | 19 | c) Directory entry caching relies on a 1 second timer, rather than |
17 | better) | ||
18 | |||
19 | d) Cleanup now unneeded SessSetup code in | ||
20 | fs/cifs/connect.c and add back in NTLMSSP code if any servers | ||
21 | need it | ||
22 | |||
23 | e) fix NTLMv2 signing when two mounts with different users to same | ||
24 | server. | ||
25 | |||
26 | f) Directory entry caching relies on a 1 second timer, rather than | ||
27 | using FindNotify or equivalent. - (started) | 20 | using FindNotify or equivalent. - (started) |
28 | 21 | ||
29 | g) quota support (needs minor kernel change since quota calls | 22 | d) quota support (needs minor kernel change since quota calls |
30 | to make it to network filesystems or deviceless filesystems) | 23 | to make it to network filesystems or deviceless filesystems) |
31 | 24 | ||
32 | h) investigate sync behavior (including syncpage) and check | 25 | e) improve support for very old servers (OS/2 and Win9x for example) |
33 | for proper behavior of intr/nointr | ||
34 | |||
35 | i) improve support for very old servers (OS/2 and Win9x for example) | ||
36 | Including support for changing the time remotely (utimes command). | 26 | Including support for changing the time remotely (utimes command). |
37 | 27 | ||
38 | j) hook lower into the sockets api (as NFS/SunRPC does) to avoid the | 28 | f) hook lower into the sockets api (as NFS/SunRPC does) to avoid the |
39 | extra copy in/out of the socket buffers in some cases. | 29 | extra copy in/out of the socket buffers in some cases. |
40 | 30 | ||
41 | k) Better optimize open (and pathbased setfilesize) to reduce the | 31 | g) Better optimize open (and pathbased setfilesize) to reduce the |
42 | oplock breaks coming from windows srv. Piggyback identical file | 32 | oplock breaks coming from windows srv. Piggyback identical file |
43 | opens on top of each other by incrementing reference count rather | 33 | opens on top of each other by incrementing reference count rather |
44 | than resending (helps reduce server resource utilization and avoid | 34 | than resending (helps reduce server resource utilization and avoid |
45 | spurious oplock breaks). | 35 | spurious oplock breaks). |
46 | 36 | ||
47 | l) Improve performance of readpages by sending more than one read | 37 | h) Add support for storing symlink info to Windows servers |
48 | at a time when 8 pages or more are requested. In conjuntion | ||
49 | add support for async_cifs_readpages. | ||
50 | |||
51 | m) Add support for storing symlink info to Windows servers | ||
52 | in the Extended Attribute format their SFU clients would recognize. | 38 | in the Extended Attribute format their SFU clients would recognize. |
53 | 39 | ||
54 | n) Finish fcntl D_NOTIFY support so kde and gnome file list windows | 40 | i) Finish inotify support so kde and gnome file list windows |
55 | will autorefresh (partially complete by Asser). Needs minor kernel | 41 | will autorefresh (partially complete by Asser). Needs minor kernel |
56 | vfs change to support removing D_NOTIFY on a file. | 42 | vfs change to support removing D_NOTIFY on a file. |
57 | 43 | ||
58 | o) Add GUI tool to configure /proc/fs/cifs settings and for display of | 44 | j) Add GUI tool to configure /proc/fs/cifs settings and for display of |
59 | the CIFS statistics (started) | 45 | the CIFS statistics (started) |
60 | 46 | ||
61 | p) implement support for security and trusted categories of xattrs | 47 | k) implement support for security and trusted categories of xattrs |
62 | (requires minor protocol extension) to enable better support for SELINUX | 48 | (requires minor protocol extension) to enable better support for SELINUX |
63 | 49 | ||
64 | q) Implement O_DIRECT flag on open (already supported on mount) | 50 | l) Implement O_DIRECT flag on open (already supported on mount) |
65 | 51 | ||
66 | r) Create UID mapping facility so server UIDs can be mapped on a per | 52 | m) Create UID mapping facility so server UIDs can be mapped on a per |
67 | mount or a per server basis to client UIDs or nobody if no mapping | 53 | mount or a per server basis to client UIDs or nobody if no mapping |
68 | exists. This is helpful when Unix extensions are negotiated to | 54 | exists. This is helpful when Unix extensions are negotiated to |
69 | allow better permission checking when UIDs differ on the server | 55 | allow better permission checking when UIDs differ on the server |
@@ -71,28 +57,29 @@ and client. Add new protocol request to the CIFS protocol | |||
71 | standard for asking the server for the corresponding name of a | 57 | standard for asking the server for the corresponding name of a |
72 | particular uid. | 58 | particular uid. |
73 | 59 | ||
74 | s) Add support for CIFS Unix and also the newer POSIX extensions to the | 60 | n) DOS attrs - returned as pseudo-xattr in Samba format (check VFAT and NTFS for this too) |
75 | server side for Samba 4. | 61 | |
62 | o) mount check for unmatched uids | ||
76 | 63 | ||
77 | t) In support for OS/2 (LANMAN 1.2 and LANMAN2.1 based SMB servers) | 64 | p) Add support for new vfs entry point for fallocate |
78 | need to add ability to set time to server (utimes command) | ||
79 | 65 | ||
80 | u) DOS attrs - returned as pseudo-xattr in Samba format (check VFAT and NTFS for this too) | 66 | q) Add tools to take advantage of cifs/smb3 specific ioctls and features |
67 | such as "CopyChunk" (fast server side file copy) | ||
81 | 68 | ||
82 | v) mount check for unmatched uids | 69 | r) encrypted file support |
83 | 70 | ||
84 | w) Add support for new vfs entry point for fallocate | 71 | s) improved stats gathering, tools (perhaps integration with nfsometer?) |
85 | 72 | ||
86 | x) Fix Samba 3 server to handle Linux kernel aio so dbench with lots of | 73 | t) allow setting more NTFS/SMB3 file attributes remotely (currently limited to compressed |
87 | processes can proceed better in parallel (on the server) | 74 | file attribute via chflags) |
88 | 75 | ||
89 | y) Fix Samba 3 to handle reads/writes over 127K (and remove the cifs mount | 76 | u) mount helper GUI (to simplify the various configuration options on mount) |
90 | restriction of wsize max being 127K) | ||
91 | 77 | ||
92 | KNOWN BUGS (updated April 24, 2007) | 78 | |
79 | KNOWN BUGS | ||
93 | ==================================== | 80 | ==================================== |
94 | See http://bugzilla.samba.org - search on product "CifsVFS" for | 81 | See http://bugzilla.samba.org - search on product "CifsVFS" for |
95 | current bug list. | 82 | current bug list. Also check http://bugzilla.kernel.org (Product = File System, Component = CIFS) |
96 | 83 | ||
97 | 1) existing symbolic links (Windows reparse points) are recognized but | 84 | 1) existing symbolic links (Windows reparse points) are recognized but |
98 | can not be created remotely. They are implemented for Samba and those that | 85 | can not be created remotely. They are implemented for Samba and those that |
@@ -100,30 +87,18 @@ support the CIFS Unix extensions, although earlier versions of Samba | |||
100 | overly restrict the pathnames. | 87 | overly restrict the pathnames. |
101 | 2) follow_link and readdir code does not follow dfs junctions | 88 | 2) follow_link and readdir code does not follow dfs junctions |
102 | but recognizes them | 89 | but recognizes them |
103 | 3) create of new files to FAT partitions on Windows servers can | ||
104 | succeed but still return access denied (appears to be Windows | ||
105 | server not cifs client problem) and has not been reproduced recently. | ||
106 | NTFS partitions do not have this problem. | ||
107 | 4) Unix/POSIX capabilities are reset after reconnection, and affect | ||
108 | a few fields in the tree connection but we do do not know which | ||
109 | superblocks to apply these changes to. We should probably walk | ||
110 | the list of superblocks to set these. Also need to check the | ||
111 | flags on the second mount to the same share, and see if we | ||
112 | can do the same trick that NFS does to remount duplicate shares. | ||
113 | 90 | ||
114 | Misc testing to do | 91 | Misc testing to do |
115 | ================== | 92 | ================== |
116 | 1) check out max path names and max path name components against various server | 93 | 1) check out max path names and max path name components against various server |
117 | types. Try nested symlinks (8 deep). Return max path name in stat -f information | 94 | types. Try nested symlinks (8 deep). Return max path name in stat -f information |
118 | 95 | ||
119 | 2) Modify file portion of ltp so it can run against a mounted network | 96 | 2) Improve xfstest's cifs enablement and adapt xfstests where needed to test |
120 | share and run it against cifs vfs in automated fashion. | 97 | cifs better |
121 | 98 | ||
122 | 3) Additional performance testing and optimization using iozone and similar - | 99 | 3) Additional performance testing and optimization using iozone and similar - |
123 | there are some easy changes that can be done to parallelize sequential writes, | 100 | there are some easy changes that can be done to parallelize sequential writes, |
124 | and when signing is disabled to request larger read sizes (larger than | 101 | and when signing is disabled to request larger read sizes (larger than |
125 | negotiated size) and send larger write sizes to modern servers. | 102 | negotiated size) and send larger write sizes to modern servers. |
126 | 103 | ||
127 | 4) More exhaustively test against less common servers. More testing | 104 | 4) More exhaustively test against less common servers |
128 | against Windows 9x, Windows ME servers. | ||
129 | |||
diff --git a/Documentation/i2c/busses/i2c-i801 b/Documentation/i2c/busses/i2c-i801 index adf5e33e8312..e9c803ea306d 100644 --- a/Documentation/i2c/busses/i2c-i801 +++ b/Documentation/i2c/busses/i2c-i801 | |||
@@ -25,6 +25,7 @@ Supported adapters: | |||
25 | * Intel Avoton (SOC) | 25 | * Intel Avoton (SOC) |
26 | * Intel Wellsburg (PCH) | 26 | * Intel Wellsburg (PCH) |
27 | * Intel Coleto Creek (PCH) | 27 | * Intel Coleto Creek (PCH) |
28 | * Intel Wildcat Point (PCH) | ||
28 | * Intel Wildcat Point-LP (PCH) | 29 | * Intel Wildcat Point-LP (PCH) |
29 | * Intel BayTrail (SOC) | 30 | * Intel BayTrail (SOC) |
30 | Datasheets: Publicly available at the Intel website | 31 | Datasheets: Publicly available at the Intel website |
diff --git a/Documentation/i2c/i2c-stub b/Documentation/i2c/i2c-stub index fa4b669c166b..a16924fbd289 100644 --- a/Documentation/i2c/i2c-stub +++ b/Documentation/i2c/i2c-stub | |||
@@ -2,9 +2,9 @@ MODULE: i2c-stub | |||
2 | 2 | ||
3 | DESCRIPTION: | 3 | DESCRIPTION: |
4 | 4 | ||
5 | This module is a very simple fake I2C/SMBus driver. It implements five | 5 | This module is a very simple fake I2C/SMBus driver. It implements six |
6 | types of SMBus commands: write quick, (r/w) byte, (r/w) byte data, (r/w) | 6 | types of SMBus commands: write quick, (r/w) byte, (r/w) byte data, (r/w) |
7 | word data, and (r/w) I2C block data. | 7 | word data, (r/w) I2C block data, and (r/w) SMBus block data. |
8 | 8 | ||
9 | You need to provide chip addresses as a module parameter when loading this | 9 | You need to provide chip addresses as a module parameter when loading this |
10 | driver, which will then only react to SMBus commands to these addresses. | 10 | driver, which will then only react to SMBus commands to these addresses. |
@@ -19,6 +19,14 @@ A pointer register with auto-increment is implemented for all byte | |||
19 | operations. This allows for continuous byte reads like those supported by | 19 | operations. This allows for continuous byte reads like those supported by |
20 | EEPROMs, among others. | 20 | EEPROMs, among others. |
21 | 21 | ||
22 | SMBus block command support is disabled by default, and must be enabled | ||
23 | explicitly by setting the respective bits (0x03000000) in the functionality | ||
24 | module parameter. | ||
25 | |||
26 | SMBus block commands must be written to configure an SMBus command for | ||
27 | SMBus block operations. Writes can be partial. Block read commands always | ||
28 | return the number of bytes selected with the largest write so far. | ||
29 | |||
22 | The typical use-case is like this: | 30 | The typical use-case is like this: |
23 | 1. load this module | 31 | 1. load this module |
24 | 2. use i2cset (from the i2c-tools project) to pre-load some data | 32 | 2. use i2cset (from the i2c-tools project) to pre-load some data |
@@ -39,15 +47,18 @@ unsigned long functionality: | |||
39 | value 0x1f0000 would only enable the quick, byte and byte data | 47 | value 0x1f0000 would only enable the quick, byte and byte data |
40 | commands. | 48 | commands. |
41 | 49 | ||
50 | u8 bank_reg[10] | ||
51 | u8 bank_mask[10] | ||
52 | u8 bank_start[10] | ||
53 | u8 bank_end[10]: | ||
54 | Optional bank settings. They tell which bits in which register | ||
55 | select the active bank, as well as the range of banked registers. | ||
56 | |||
42 | CAVEATS: | 57 | CAVEATS: |
43 | 58 | ||
44 | If your target driver polls some byte or word waiting for it to change, the | 59 | If your target driver polls some byte or word waiting for it to change, the |
45 | stub could lock it up. Use i2cset to unlock it. | 60 | stub could lock it up. Use i2cset to unlock it. |
46 | 61 | ||
47 | If the hardware for your driver has banked registers (e.g. Winbond sensors | ||
48 | chips) this module will not work well - although it could be extended to | ||
49 | support that pretty easily. | ||
50 | |||
51 | If you spam it hard enough, printk can be lossy. This module really wants | 62 | If you spam it hard enough, printk can be lossy. This module really wants |
52 | something like relayfs. | 63 | something like relayfs. |
53 | 64 | ||
diff --git a/Documentation/ioctl/00-INDEX b/Documentation/ioctl/00-INDEX index d2fe4d4729ef..c1a925787950 100644 --- a/Documentation/ioctl/00-INDEX +++ b/Documentation/ioctl/00-INDEX | |||
@@ -1,5 +1,7 @@ | |||
1 | 00-INDEX | 1 | 00-INDEX |
2 | - this file | 2 | - this file |
3 | botching-up-ioctls.txt | ||
4 | - how to avoid botching up ioctls | ||
3 | cdrom.txt | 5 | cdrom.txt |
4 | - summary of CDROM ioctl calls | 6 | - summary of CDROM ioctl calls |
5 | hdio.txt | 7 | hdio.txt |
diff --git a/Documentation/ioctl/botching-up-ioctls.txt b/Documentation/ioctl/botching-up-ioctls.txt new file mode 100644 index 000000000000..45fe78c58019 --- /dev/null +++ b/Documentation/ioctl/botching-up-ioctls.txt | |||
@@ -0,0 +1,219 @@ | |||
1 | (How to avoid) Botching up ioctls | ||
2 | ================================= | ||
3 | |||
4 | From: http://blog.ffwll.ch/2013/11/botching-up-ioctls.html | ||
5 | |||
6 | By: Daniel Vetter, Copyright © 2013 Intel Corporation | ||
7 | |||
8 | One clear insight kernel graphics hackers gained in the past few years is that | ||
9 | trying to come up with a unified interface to manage the execution units and | ||
10 | memory on completely different GPUs is a futile effort. So nowadays every | ||
11 | driver has its own set of ioctls to allocate memory and submit work to the GPU. | ||
12 | Which is nice, since there's no more insanity in the form of fake-generic, but | ||
13 | actually only used once interfaces. But the clear downside is that there's much | ||
14 | more potential to screw things up. | ||
15 | |||
16 | To avoid repeating all the same mistakes again I've written up some of the | ||
17 | lessons learned while botching the job for the drm/i915 driver. Most of these | ||
18 | only cover technicalities and not the big-picture issues like what the command | ||
19 | submission ioctl exactly should look like. Learning these lessons is probably | ||
20 | something every GPU driver has to do on its own. | ||
21 | |||
22 | |||
23 | Prerequisites | ||
24 | ------------- | ||
25 | |||
26 | First the prerequisites. Without these you have already failed, because you | ||
27 | will need to add a a 32-bit compat layer: | ||
28 | |||
29 | * Only use fixed sized integers. To avoid conflicts with typedefs in userspace | ||
30 | the kernel has special types like __u32, __s64. Use them. | ||
31 | |||
32 | * Align everything to the natural size and use explicit padding. 32-bit | ||
33 | platforms don't necessarily align 64-bit values to 64-bit boundaries, but | ||
34 | 64-bit platforms do. So we always need padding to the natural size to get | ||
35 | this right. | ||
36 | |||
37 | * Pad the entire struct to a multiple of 64-bits - the structure size will | ||
38 | otherwise differ on 32-bit versus 64-bit. Having a different structure size | ||
39 | hurts when passing arrays of structures to the kernel, or if the kernel | ||
40 | checks the structure size, which e.g. the drm core does. | ||
41 | |||
42 | * Pointers are __u64, cast from/to a uintprt_t on the userspace side and | ||
43 | from/to a void __user * in the kernel. Try really hard not to delay this | ||
44 | conversion or worse, fiddle the raw __u64 through your code since that | ||
45 | diminishes the checking tools like sparse can provide. | ||
46 | |||
47 | |||
48 | Basics | ||
49 | ------ | ||
50 | |||
51 | With the joys of writing a compat layer avoided we can take a look at the basic | ||
52 | fumbles. Neglecting these will make backward and forward compatibility a real | ||
53 | pain. And since getting things wrong on the first attempt is guaranteed you | ||
54 | will have a second iteration or at least an extension for any given interface. | ||
55 | |||
56 | * Have a clear way for userspace to figure out whether your new ioctl or ioctl | ||
57 | extension is supported on a given kernel. If you can't rely on old kernels | ||
58 | rejecting the new flags/modes or ioctls (since doing that was botched in the | ||
59 | past) then you need a driver feature flag or revision number somewhere. | ||
60 | |||
61 | * Have a plan for extending ioctls with new flags or new fields at the end of | ||
62 | the structure. The drm core checks the passed-in size for each ioctl call | ||
63 | and zero-extends any mismatches between kernel and userspace. That helps, | ||
64 | but isn't a complete solution since newer userspace on older kernels won't | ||
65 | notice that the newly added fields at the end get ignored. So this still | ||
66 | needs a new driver feature flags. | ||
67 | |||
68 | * Check all unused fields and flags and all the padding for whether it's 0, | ||
69 | and reject the ioctl if that's not the case. Otherwise your nice plan for | ||
70 | future extensions is going right down the gutters since someone will submit | ||
71 | an ioctl struct with random stack garbage in the yet unused parts. Which | ||
72 | then bakes in the ABI that those fields can never be used for anything else | ||
73 | but garbage. | ||
74 | |||
75 | * Have simple testcases for all of the above. | ||
76 | |||
77 | |||
78 | Fun with Error Paths | ||
79 | -------------------- | ||
80 | |||
81 | Nowadays we don't have any excuse left any more for drm drivers being neat | ||
82 | little root exploits. This means we both need full input validation and solid | ||
83 | error handling paths - GPUs will die eventually in the oddmost corner cases | ||
84 | anyway: | ||
85 | |||
86 | * The ioctl must check for array overflows. Also it needs to check for | ||
87 | over/underflows and clamping issues of integer values in general. The usual | ||
88 | example is sprite positioning values fed directly into the hardware with the | ||
89 | hardware just having 12 bits or so. Works nicely until some odd display | ||
90 | server doesn't bother with clamping itself and the cursor wraps around the | ||
91 | screen. | ||
92 | |||
93 | * Have simple testcases for every input validation failure case in your ioctl. | ||
94 | Check that the error code matches your expectations. And finally make sure | ||
95 | that you only test for one single error path in each subtest by submitting | ||
96 | otherwise perfectly valid data. Without this an earlier check might reject | ||
97 | the ioctl already and shadow the codepath you actually want to test, hiding | ||
98 | bugs and regressions. | ||
99 | |||
100 | * Make all your ioctls restartable. First X really loves signals and second | ||
101 | this will allow you to test 90% of all error handling paths by just | ||
102 | interrupting your main test suite constantly with signals. Thanks to X's | ||
103 | love for signal you'll get an excellent base coverage of all your error | ||
104 | paths pretty much for free for graphics drivers. Also, be consistent with | ||
105 | how you handle ioctl restarting - e.g. drm has a tiny drmIoctl helper in its | ||
106 | userspace library. The i915 driver botched this with the set_tiling ioctl, | ||
107 | now we're stuck forever with some arcane semantics in both the kernel and | ||
108 | userspace. | ||
109 | |||
110 | * If you can't make a given codepath restartable make a stuck task at least | ||
111 | killable. GPUs just die and your users won't like you more if you hang their | ||
112 | entire box (by means of an unkillable X process). If the state recovery is | ||
113 | still too tricky have a timeout or hangcheck safety net as a last-ditch | ||
114 | effort in case the hardware has gone bananas. | ||
115 | |||
116 | * Have testcases for the really tricky corner cases in your error recovery code | ||
117 | - it's way too easy to create a deadlock between your hangcheck code and | ||
118 | waiters. | ||
119 | |||
120 | |||
121 | Time, Waiting and Missing it | ||
122 | ---------------------------- | ||
123 | |||
124 | GPUs do most everything asynchronously, so we have a need to time operations and | ||
125 | wait for oustanding ones. This is really tricky business; at the moment none of | ||
126 | the ioctls supported by the drm/i915 get this fully right, which means there's | ||
127 | still tons more lessons to learn here. | ||
128 | |||
129 | * Use CLOCK_MONOTONIC as your reference time, always. It's what alsa, drm and | ||
130 | v4l use by default nowadays. But let userspace know which timestamps are | ||
131 | derived from different clock domains like your main system clock (provided | ||
132 | by the kernel) or some independent hardware counter somewhere else. Clocks | ||
133 | will mismatch if you look close enough, but if performance measuring tools | ||
134 | have this information they can at least compensate. If your userspace can | ||
135 | get at the raw values of some clocks (e.g. through in-command-stream | ||
136 | performance counter sampling instructions) consider exposing those also. | ||
137 | |||
138 | * Use __s64 seconds plus __u64 nanoseconds to specify time. It's not the most | ||
139 | convenient time specification, but it's mostly the standard. | ||
140 | |||
141 | * Check that input time values are normalized and reject them if not. Note | ||
142 | that the kernel native struct ktime has a signed integer for both seconds | ||
143 | and nanoseconds, so beware here. | ||
144 | |||
145 | * For timeouts, use absolute times. If you're a good fellow and made your | ||
146 | ioctl restartable relative timeouts tend to be too coarse and can | ||
147 | indefinitely extend your wait time due to rounding on each restart. | ||
148 | Especially if your reference clock is something really slow like the display | ||
149 | frame counter. With a spec laywer hat on this isn't a bug since timeouts can | ||
150 | always be extended - but users will surely hate you if their neat animations | ||
151 | starts to stutter due to this. | ||
152 | |||
153 | * Consider ditching any synchronous wait ioctls with timeouts and just deliver | ||
154 | an asynchronous event on a pollable file descriptor. It fits much better | ||
155 | into event driven applications' main loop. | ||
156 | |||
157 | * Have testcases for corner-cases, especially whether the return values for | ||
158 | already-completed events, successful waits and timed-out waits are all sane | ||
159 | and suiting to your needs. | ||
160 | |||
161 | |||
162 | Leaking Resources, Not | ||
163 | ---------------------- | ||
164 | |||
165 | A full-blown drm driver essentially implements a little OS, but specialized to | ||
166 | the given GPU platforms. This means a driver needs to expose tons of handles | ||
167 | for different objects and other resources to userspace. Doing that right | ||
168 | entails its own little set of pitfalls: | ||
169 | |||
170 | * Always attach the lifetime of your dynamically created resources to the | ||
171 | lifetime of a file descriptor. Consider using a 1:1 mapping if your resource | ||
172 | needs to be shared across processes - fd-passing over unix domain sockets | ||
173 | also simplifies lifetime management for userspace. | ||
174 | |||
175 | * Always have O_CLOEXEC support. | ||
176 | |||
177 | * Ensure that you have sufficient insulation between different clients. By | ||
178 | default pick a private per-fd namespace which forces any sharing to be done | ||
179 | explictly. Only go with a more global per-device namespace if the objects | ||
180 | are truly device-unique. One counterexample in the drm modeset interfaces is | ||
181 | that the per-device modeset objects like connectors share a namespace with | ||
182 | framebuffer objects, which mostly are not shared at all. A separate | ||
183 | namespace, private by default, for framebuffers would have been more | ||
184 | suitable. | ||
185 | |||
186 | * Think about uniqueness requirements for userspace handles. E.g. for most drm | ||
187 | drivers it's a userspace bug to submit the same object twice in the same | ||
188 | command submission ioctl. But then if objects are shareable userspace needs | ||
189 | to know whether it has seen an imported object from a different process | ||
190 | already or not. I haven't tried this myself yet due to lack of a new class | ||
191 | of objects, but consider using inode numbers on your shared file descriptors | ||
192 | as unique identifiers - it's how real files are told apart, too. | ||
193 | Unfortunately this requires a full-blown virtual filesystem in the kernel. | ||
194 | |||
195 | |||
196 | Last, but not Least | ||
197 | ------------------- | ||
198 | |||
199 | Not every problem needs a new ioctl: | ||
200 | |||
201 | * Think hard whether you really want a driver-private interface. Of course | ||
202 | it's much quicker to push a driver-private interface than engaging in | ||
203 | lengthy discussions for a more generic solution. And occasionally doing a | ||
204 | private interface to spearhead a new concept is what's required. But in the | ||
205 | end, once the generic interface comes around you'll end up maintainer two | ||
206 | interfaces. Indefinitely. | ||
207 | |||
208 | * Consider other interfaces than ioctls. A sysfs attribute is much better for | ||
209 | per-device settings, or for child objects with fairly static lifetimes (like | ||
210 | output connectors in drm with all the detection override attributes). Or | ||
211 | maybe only your testsuite needs this interface, and then debugfs with its | ||
212 | disclaimer of not having a stable ABI would be better. | ||
213 | |||
214 | Finally, the name of the game is to get it right on the first attempt, since if | ||
215 | your driver proves popular and your hardware platforms long-lived then you'll | ||
216 | be stuck with a given ioctl essentially forever. You can try to deprecate | ||
217 | horrible ioctls on newer iterations of your hardware, but generally it takes | ||
218 | years to accomplish this. And then again years until the last user able to | ||
219 | complain about regressions disappears, too. | ||
diff --git a/Documentation/laptops/freefall.c b/Documentation/laptops/freefall.c index d4173186555c..5e44b20b1848 100644 --- a/Documentation/laptops/freefall.c +++ b/Documentation/laptops/freefall.c | |||
@@ -29,16 +29,12 @@ static const char app_name[] = "FREE FALL"; | |||
29 | 29 | ||
30 | static int set_unload_heads_path(char *device) | 30 | static int set_unload_heads_path(char *device) |
31 | { | 31 | { |
32 | char devname[64]; | ||
33 | |||
34 | if (strlen(device) <= 5 || strncmp(device, "/dev/", 5) != 0) | 32 | if (strlen(device) <= 5 || strncmp(device, "/dev/", 5) != 0) |
35 | return -EINVAL; | 33 | return -EINVAL; |
36 | strncpy(devname, device + 5, sizeof(devname) - 1); | ||
37 | devname[sizeof(devname) - 1] = '\0'; | ||
38 | strncpy(device_path, device, sizeof(device_path) - 1); | 34 | strncpy(device_path, device, sizeof(device_path) - 1); |
39 | 35 | ||
40 | snprintf(unload_heads_path, sizeof(unload_heads_path) - 1, | 36 | snprintf(unload_heads_path, sizeof(unload_heads_path) - 1, |
41 | "/sys/block/%s/device/unload_heads", devname); | 37 | "/sys/block/%s/device/unload_heads", device+5); |
42 | return 0; | 38 | return 0; |
43 | } | 39 | } |
44 | 40 | ||
diff --git a/arch/arc/include/asm/arcregs.h b/arch/arc/include/asm/arcregs.h index 355cb470c2a4..372466b371bf 100644 --- a/arch/arc/include/asm/arcregs.h +++ b/arch/arc/include/asm/arcregs.h | |||
@@ -296,7 +296,7 @@ struct cpuinfo_arc_mmu { | |||
296 | }; | 296 | }; |
297 | 297 | ||
298 | struct cpuinfo_arc_cache { | 298 | struct cpuinfo_arc_cache { |
299 | unsigned int sz, line_len, assoc, ver; | 299 | unsigned int sz_k:8, line_len:8, assoc:4, ver:4, alias:1, vipt:1, pad:6; |
300 | }; | 300 | }; |
301 | 301 | ||
302 | struct cpuinfo_arc_ccm { | 302 | struct cpuinfo_arc_ccm { |
diff --git a/arch/arc/include/asm/irq.h b/arch/arc/include/asm/irq.h index fb4efb648971..f38652fb2ed7 100644 --- a/arch/arc/include/asm/irq.h +++ b/arch/arc/include/asm/irq.h | |||
@@ -16,9 +16,13 @@ | |||
16 | #define TIMER0_IRQ 3 | 16 | #define TIMER0_IRQ 3 |
17 | #define TIMER1_IRQ 4 | 17 | #define TIMER1_IRQ 4 |
18 | 18 | ||
19 | #include <linux/interrupt.h> | ||
19 | #include <asm-generic/irq.h> | 20 | #include <asm-generic/irq.h> |
20 | 21 | ||
21 | extern void arc_init_IRQ(void); | 22 | extern void arc_init_IRQ(void); |
22 | void arc_local_timer_setup(void); | 23 | void arc_local_timer_setup(void); |
24 | void arc_request_percpu_irq(int irq, int cpu, | ||
25 | irqreturn_t (*isr)(int irq, void *dev), | ||
26 | const char *irq_nm, void *percpu_dev); | ||
23 | 27 | ||
24 | #endif | 28 | #endif |
diff --git a/arch/arc/include/asm/irqflags.h b/arch/arc/include/asm/irqflags.h index cb7efc29f16f..587df8236e8b 100644 --- a/arch/arc/include/asm/irqflags.h +++ b/arch/arc/include/asm/irqflags.h | |||
@@ -131,24 +131,6 @@ static inline int arch_irqs_disabled(void) | |||
131 | return arch_irqs_disabled_flags(arch_local_save_flags()); | 131 | return arch_irqs_disabled_flags(arch_local_save_flags()); |
132 | } | 132 | } |
133 | 133 | ||
134 | static inline void arch_mask_irq(unsigned int irq) | ||
135 | { | ||
136 | unsigned int ienb; | ||
137 | |||
138 | ienb = read_aux_reg(AUX_IENABLE); | ||
139 | ienb &= ~(1 << irq); | ||
140 | write_aux_reg(AUX_IENABLE, ienb); | ||
141 | } | ||
142 | |||
143 | static inline void arch_unmask_irq(unsigned int irq) | ||
144 | { | ||
145 | unsigned int ienb; | ||
146 | |||
147 | ienb = read_aux_reg(AUX_IENABLE); | ||
148 | ienb |= (1 << irq); | ||
149 | write_aux_reg(AUX_IENABLE, ienb); | ||
150 | } | ||
151 | |||
152 | #else | 134 | #else |
153 | 135 | ||
154 | #ifdef CONFIG_TRACE_IRQFLAGS | 136 | #ifdef CONFIG_TRACE_IRQFLAGS |
diff --git a/arch/arc/kernel/irq.c b/arch/arc/kernel/irq.c index 7d653c0d0773..620ec2fe32a9 100644 --- a/arch/arc/kernel/irq.c +++ b/arch/arc/kernel/irq.c | |||
@@ -19,21 +19,16 @@ | |||
19 | 19 | ||
20 | /* | 20 | /* |
21 | * Early Hardware specific Interrupt setup | 21 | * Early Hardware specific Interrupt setup |
22 | * -Platform independent, needed for each CPU (not foldable into init_IRQ) | ||
22 | * -Called very early (start_kernel -> setup_arch -> setup_processor) | 23 | * -Called very early (start_kernel -> setup_arch -> setup_processor) |
23 | * -Platform Independent (must for any ARC700) | ||
24 | * -Needed for each CPU (hence not foldable into init_IRQ) | ||
25 | * | 24 | * |
26 | * what it does ? | 25 | * what it does ? |
27 | * -Disable all IRQs (on CPU side) | ||
28 | * -Optionally, setup the High priority Interrupts as Level 2 IRQs | 26 | * -Optionally, setup the High priority Interrupts as Level 2 IRQs |
29 | */ | 27 | */ |
30 | void arc_init_IRQ(void) | 28 | void arc_init_IRQ(void) |
31 | { | 29 | { |
32 | int level_mask = 0; | 30 | int level_mask = 0; |
33 | 31 | ||
34 | /* Disable all IRQs: enable them as devices request */ | ||
35 | write_aux_reg(AUX_IENABLE, 0); | ||
36 | |||
37 | /* setup any high priority Interrupts (Level2 in ARCompact jargon) */ | 32 | /* setup any high priority Interrupts (Level2 in ARCompact jargon) */ |
38 | level_mask |= IS_ENABLED(CONFIG_ARC_IRQ3_LV2) << 3; | 33 | level_mask |= IS_ENABLED(CONFIG_ARC_IRQ3_LV2) << 3; |
39 | level_mask |= IS_ENABLED(CONFIG_ARC_IRQ5_LV2) << 5; | 34 | level_mask |= IS_ENABLED(CONFIG_ARC_IRQ5_LV2) << 5; |
@@ -60,20 +55,28 @@ void arc_init_IRQ(void) | |||
60 | * below, per IRQ. | 55 | * below, per IRQ. |
61 | */ | 56 | */ |
62 | 57 | ||
63 | static void arc_mask_irq(struct irq_data *data) | 58 | static void arc_irq_mask(struct irq_data *data) |
64 | { | 59 | { |
65 | arch_mask_irq(data->irq); | 60 | unsigned int ienb; |
61 | |||
62 | ienb = read_aux_reg(AUX_IENABLE); | ||
63 | ienb &= ~(1 << data->irq); | ||
64 | write_aux_reg(AUX_IENABLE, ienb); | ||
66 | } | 65 | } |
67 | 66 | ||
68 | static void arc_unmask_irq(struct irq_data *data) | 67 | static void arc_irq_unmask(struct irq_data *data) |
69 | { | 68 | { |
70 | arch_unmask_irq(data->irq); | 69 | unsigned int ienb; |
70 | |||
71 | ienb = read_aux_reg(AUX_IENABLE); | ||
72 | ienb |= (1 << data->irq); | ||
73 | write_aux_reg(AUX_IENABLE, ienb); | ||
71 | } | 74 | } |
72 | 75 | ||
73 | static struct irq_chip onchip_intc = { | 76 | static struct irq_chip onchip_intc = { |
74 | .name = "ARC In-core Intc", | 77 | .name = "ARC In-core Intc", |
75 | .irq_mask = arc_mask_irq, | 78 | .irq_mask = arc_irq_mask, |
76 | .irq_unmask = arc_unmask_irq, | 79 | .irq_unmask = arc_irq_unmask, |
77 | }; | 80 | }; |
78 | 81 | ||
79 | static int arc_intc_domain_map(struct irq_domain *d, unsigned int irq, | 82 | static int arc_intc_domain_map(struct irq_domain *d, unsigned int irq, |
@@ -150,6 +153,32 @@ void arch_do_IRQ(unsigned int irq, struct pt_regs *regs) | |||
150 | set_irq_regs(old_regs); | 153 | set_irq_regs(old_regs); |
151 | } | 154 | } |
152 | 155 | ||
156 | void arc_request_percpu_irq(int irq, int cpu, | ||
157 | irqreturn_t (*isr)(int irq, void *dev), | ||
158 | const char *irq_nm, | ||
159 | void *percpu_dev) | ||
160 | { | ||
161 | /* Boot cpu calls request, all call enable */ | ||
162 | if (!cpu) { | ||
163 | int rc; | ||
164 | |||
165 | /* | ||
166 | * These 2 calls are essential to making percpu IRQ APIs work | ||
167 | * Ideally these details could be hidden in irq chip map function | ||
168 | * but the issue is IPIs IRQs being static (non-DT) and platform | ||
169 | * specific, so we can't identify them there. | ||
170 | */ | ||
171 | irq_set_percpu_devid(irq); | ||
172 | irq_modify_status(irq, IRQ_NOAUTOEN, 0); /* @irq, @clr, @set */ | ||
173 | |||
174 | rc = request_percpu_irq(irq, isr, irq_nm, percpu_dev); | ||
175 | if (rc) | ||
176 | panic("Percpu IRQ request failed for %d\n", irq); | ||
177 | } | ||
178 | |||
179 | enable_percpu_irq(irq, 0); | ||
180 | } | ||
181 | |||
153 | /* | 182 | /* |
154 | * arch_local_irq_enable - Enable interrupts. | 183 | * arch_local_irq_enable - Enable interrupts. |
155 | * | 184 | * |
diff --git a/arch/arc/kernel/signal.c b/arch/arc/kernel/signal.c index 7e95e1a86510..cb3142a2d40b 100644 --- a/arch/arc/kernel/signal.c +++ b/arch/arc/kernel/signal.c | |||
@@ -141,17 +141,13 @@ badframe: | |||
141 | /* | 141 | /* |
142 | * Determine which stack to use.. | 142 | * Determine which stack to use.. |
143 | */ | 143 | */ |
144 | static inline void __user *get_sigframe(struct k_sigaction *ka, | 144 | static inline void __user *get_sigframe(struct ksignal *ksig, |
145 | struct pt_regs *regs, | 145 | struct pt_regs *regs, |
146 | unsigned long framesize) | 146 | unsigned long framesize) |
147 | { | 147 | { |
148 | unsigned long sp = regs->sp; | 148 | unsigned long sp = sigsp(regs->sp, ksig); |
149 | void __user *frame; | 149 | void __user *frame; |
150 | 150 | ||
151 | /* This is the X/Open sanctioned signal stack switching */ | ||
152 | if ((ka->sa.sa_flags & SA_ONSTACK) && !sas_ss_flags(sp)) | ||
153 | sp = current->sas_ss_sp + current->sas_ss_size; | ||
154 | |||
155 | /* No matter what happens, 'sp' must be word | 151 | /* No matter what happens, 'sp' must be word |
156 | * aligned otherwise nasty things could happen | 152 | * aligned otherwise nasty things could happen |
157 | */ | 153 | */ |
@@ -179,14 +175,13 @@ static inline int map_sig(int sig) | |||
179 | } | 175 | } |
180 | 176 | ||
181 | static int | 177 | static int |
182 | setup_rt_frame(int signo, struct k_sigaction *ka, siginfo_t *info, | 178 | setup_rt_frame(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs) |
183 | sigset_t *set, struct pt_regs *regs) | ||
184 | { | 179 | { |
185 | struct rt_sigframe __user *sf; | 180 | struct rt_sigframe __user *sf; |
186 | unsigned int magic = 0; | 181 | unsigned int magic = 0; |
187 | int err = 0; | 182 | int err = 0; |
188 | 183 | ||
189 | sf = get_sigframe(ka, regs, sizeof(struct rt_sigframe)); | 184 | sf = get_sigframe(ksig, regs, sizeof(struct rt_sigframe)); |
190 | if (!sf) | 185 | if (!sf) |
191 | return 1; | 186 | return 1; |
192 | 187 | ||
@@ -205,8 +200,8 @@ setup_rt_frame(int signo, struct k_sigaction *ka, siginfo_t *info, | |||
205 | * #2: struct siginfo | 200 | * #2: struct siginfo |
206 | * #3: struct ucontext (completely populated) | 201 | * #3: struct ucontext (completely populated) |
207 | */ | 202 | */ |
208 | if (unlikely(ka->sa.sa_flags & SA_SIGINFO)) { | 203 | if (unlikely(ksig->ka.sa.sa_flags & SA_SIGINFO)) { |
209 | err |= copy_siginfo_to_user(&sf->info, info); | 204 | err |= copy_siginfo_to_user(&sf->info, &ksig->info); |
210 | err |= __put_user(0, &sf->uc.uc_flags); | 205 | err |= __put_user(0, &sf->uc.uc_flags); |
211 | err |= __put_user(NULL, &sf->uc.uc_link); | 206 | err |= __put_user(NULL, &sf->uc.uc_link); |
212 | err |= __save_altstack(&sf->uc.uc_stack, regs->sp); | 207 | err |= __save_altstack(&sf->uc.uc_stack, regs->sp); |
@@ -227,16 +222,16 @@ setup_rt_frame(int signo, struct k_sigaction *ka, siginfo_t *info, | |||
227 | return err; | 222 | return err; |
228 | 223 | ||
229 | /* #1 arg to the user Signal handler */ | 224 | /* #1 arg to the user Signal handler */ |
230 | regs->r0 = map_sig(signo); | 225 | regs->r0 = map_sig(ksig->sig); |
231 | 226 | ||
232 | /* setup PC of user space signal handler */ | 227 | /* setup PC of user space signal handler */ |
233 | regs->ret = (unsigned long)ka->sa.sa_handler; | 228 | regs->ret = (unsigned long)ksig->ka.sa.sa_handler; |
234 | 229 | ||
235 | /* | 230 | /* |
236 | * handler returns using sigreturn stub provided already by userpsace | 231 | * handler returns using sigreturn stub provided already by userpsace |
237 | */ | 232 | */ |
238 | BUG_ON(!(ka->sa.sa_flags & SA_RESTORER)); | 233 | BUG_ON(!(ksig->ka.sa.sa_flags & SA_RESTORER)); |
239 | regs->blink = (unsigned long)ka->sa.sa_restorer; | 234 | regs->blink = (unsigned long)ksig->ka.sa.sa_restorer; |
240 | 235 | ||
241 | /* User Stack for signal handler will be above the frame just carved */ | 236 | /* User Stack for signal handler will be above the frame just carved */ |
242 | regs->sp = (unsigned long)sf; | 237 | regs->sp = (unsigned long)sf; |
@@ -298,38 +293,30 @@ static void arc_restart_syscall(struct k_sigaction *ka, struct pt_regs *regs) | |||
298 | * OK, we're invoking a handler | 293 | * OK, we're invoking a handler |
299 | */ | 294 | */ |
300 | static void | 295 | static void |
301 | handle_signal(unsigned long sig, struct k_sigaction *ka, siginfo_t *info, | 296 | handle_signal(struct ksignal *ksig, struct pt_regs *regs) |
302 | struct pt_regs *regs) | ||
303 | { | 297 | { |
304 | sigset_t *oldset = sigmask_to_save(); | 298 | sigset_t *oldset = sigmask_to_save(); |
305 | int ret; | 299 | int ret; |
306 | 300 | ||
307 | /* Set up the stack frame */ | 301 | /* Set up the stack frame */ |
308 | ret = setup_rt_frame(sig, ka, info, oldset, regs); | 302 | ret = setup_rt_frame(ksig, oldset, regs); |
309 | 303 | ||
310 | if (ret) | 304 | signal_setup_done(ret, ksig, 0); |
311 | force_sigsegv(sig, current); | ||
312 | else | ||
313 | signal_delivered(sig, info, ka, regs, 0); | ||
314 | } | 305 | } |
315 | 306 | ||
316 | void do_signal(struct pt_regs *regs) | 307 | void do_signal(struct pt_regs *regs) |
317 | { | 308 | { |
318 | struct k_sigaction ka; | 309 | struct ksignal ksig; |
319 | siginfo_t info; | ||
320 | int signr; | ||
321 | int restart_scall; | 310 | int restart_scall; |
322 | 311 | ||
323 | signr = get_signal_to_deliver(&info, &ka, regs, NULL); | ||
324 | |||
325 | restart_scall = in_syscall(regs) && syscall_restartable(regs); | 312 | restart_scall = in_syscall(regs) && syscall_restartable(regs); |
326 | 313 | ||
327 | if (signr > 0) { | 314 | if (get_signal(&ksig)) { |
328 | if (restart_scall) { | 315 | if (restart_scall) { |
329 | arc_restart_syscall(&ka, regs); | 316 | arc_restart_syscall(&ksig.ka, regs); |
330 | syscall_wont_restart(regs); /* No more restarts */ | 317 | syscall_wont_restart(regs); /* No more restarts */ |
331 | } | 318 | } |
332 | handle_signal(signr, &ka, &info, regs); | 319 | handle_signal(&ksig, regs); |
333 | return; | 320 | return; |
334 | } | 321 | } |
335 | 322 | ||
diff --git a/arch/arc/kernel/smp.c b/arch/arc/kernel/smp.c index c802bb500602..dcd317c47d09 100644 --- a/arch/arc/kernel/smp.c +++ b/arch/arc/kernel/smp.c | |||
@@ -12,23 +12,15 @@ | |||
12 | * -- Initial Write (Borrowed heavily from ARM) | 12 | * -- Initial Write (Borrowed heavily from ARM) |
13 | */ | 13 | */ |
14 | 14 | ||
15 | #include <linux/module.h> | ||
16 | #include <linux/init.h> | ||
17 | #include <linux/spinlock.h> | 15 | #include <linux/spinlock.h> |
18 | #include <linux/sched.h> | 16 | #include <linux/sched.h> |
19 | #include <linux/interrupt.h> | 17 | #include <linux/interrupt.h> |
20 | #include <linux/profile.h> | 18 | #include <linux/profile.h> |
21 | #include <linux/errno.h> | ||
22 | #include <linux/err.h> | ||
23 | #include <linux/mm.h> | 19 | #include <linux/mm.h> |
24 | #include <linux/cpu.h> | 20 | #include <linux/cpu.h> |
25 | #include <linux/smp.h> | ||
26 | #include <linux/irq.h> | 21 | #include <linux/irq.h> |
27 | #include <linux/delay.h> | ||
28 | #include <linux/atomic.h> | 22 | #include <linux/atomic.h> |
29 | #include <linux/percpu.h> | ||
30 | #include <linux/cpumask.h> | 23 | #include <linux/cpumask.h> |
31 | #include <linux/spinlock_types.h> | ||
32 | #include <linux/reboot.h> | 24 | #include <linux/reboot.h> |
33 | #include <asm/processor.h> | 25 | #include <asm/processor.h> |
34 | #include <asm/setup.h> | 26 | #include <asm/setup.h> |
@@ -136,7 +128,7 @@ void start_kernel_secondary(void) | |||
136 | pr_info("## CPU%u LIVE ##: Executing Code...\n", cpu); | 128 | pr_info("## CPU%u LIVE ##: Executing Code...\n", cpu); |
137 | 129 | ||
138 | if (machine_desc->init_smp) | 130 | if (machine_desc->init_smp) |
139 | machine_desc->init_smp(smp_processor_id()); | 131 | machine_desc->init_smp(cpu); |
140 | 132 | ||
141 | arc_local_timer_setup(); | 133 | arc_local_timer_setup(); |
142 | 134 | ||
@@ -338,18 +330,11 @@ irqreturn_t do_IPI(int irq, void *dev_id) | |||
338 | */ | 330 | */ |
339 | static DEFINE_PER_CPU(int, ipi_dev); | 331 | static DEFINE_PER_CPU(int, ipi_dev); |
340 | 332 | ||
341 | static struct irqaction arc_ipi_irq = { | ||
342 | .name = "IPI Interrupt", | ||
343 | .flags = IRQF_PERCPU, | ||
344 | .handler = do_IPI, | ||
345 | }; | ||
346 | |||
347 | int smp_ipi_irq_setup(int cpu, int irq) | 333 | int smp_ipi_irq_setup(int cpu, int irq) |
348 | { | 334 | { |
349 | if (!cpu) | 335 | int *dev = per_cpu_ptr(&ipi_dev, cpu); |
350 | return setup_irq(irq, &arc_ipi_irq); | 336 | |
351 | else | 337 | arc_request_percpu_irq(irq, cpu, do_IPI, "IPI Interrupt", dev); |
352 | arch_unmask_irq(irq); | ||
353 | 338 | ||
354 | return 0; | 339 | return 0; |
355 | } | 340 | } |
diff --git a/arch/arc/kernel/time.c b/arch/arc/kernel/time.c index 36c2aa99436f..dbe74f418019 100644 --- a/arch/arc/kernel/time.c +++ b/arch/arc/kernel/time.c | |||
@@ -144,12 +144,12 @@ static struct clocksource arc_counter = { | |||
144 | /********** Clock Event Device *********/ | 144 | /********** Clock Event Device *********/ |
145 | 145 | ||
146 | /* | 146 | /* |
147 | * Arm the timer to interrupt after @limit cycles | 147 | * Arm the timer to interrupt after @cycles |
148 | * The distinction for oneshot/periodic is done in arc_event_timer_ack() below | 148 | * The distinction for oneshot/periodic is done in arc_event_timer_ack() below |
149 | */ | 149 | */ |
150 | static void arc_timer_event_setup(unsigned int limit) | 150 | static void arc_timer_event_setup(unsigned int cycles) |
151 | { | 151 | { |
152 | write_aux_reg(ARC_REG_TIMER0_LIMIT, limit); | 152 | write_aux_reg(ARC_REG_TIMER0_LIMIT, cycles); |
153 | write_aux_reg(ARC_REG_TIMER0_CNT, 0); /* start from 0 */ | 153 | write_aux_reg(ARC_REG_TIMER0_CNT, 0); /* start from 0 */ |
154 | 154 | ||
155 | write_aux_reg(ARC_REG_TIMER0_CTRL, TIMER_CTRL_IE | TIMER_CTRL_NH); | 155 | write_aux_reg(ARC_REG_TIMER0_CTRL, TIMER_CTRL_IE | TIMER_CTRL_NH); |
@@ -168,6 +168,10 @@ static void arc_clkevent_set_mode(enum clock_event_mode mode, | |||
168 | { | 168 | { |
169 | switch (mode) { | 169 | switch (mode) { |
170 | case CLOCK_EVT_MODE_PERIODIC: | 170 | case CLOCK_EVT_MODE_PERIODIC: |
171 | /* | ||
172 | * At X Hz, 1 sec = 1000ms -> X cycles; | ||
173 | * 10ms -> X / 100 cycles | ||
174 | */ | ||
171 | arc_timer_event_setup(arc_get_core_freq() / HZ); | 175 | arc_timer_event_setup(arc_get_core_freq() / HZ); |
172 | break; | 176 | break; |
173 | case CLOCK_EVT_MODE_ONESHOT: | 177 | case CLOCK_EVT_MODE_ONESHOT: |
@@ -210,12 +214,6 @@ static irqreturn_t timer_irq_handler(int irq, void *dev_id) | |||
210 | return IRQ_HANDLED; | 214 | return IRQ_HANDLED; |
211 | } | 215 | } |
212 | 216 | ||
213 | static struct irqaction arc_timer_irq = { | ||
214 | .name = "Timer0 (clock-evt-dev)", | ||
215 | .flags = IRQF_TIMER | IRQF_PERCPU, | ||
216 | .handler = timer_irq_handler, | ||
217 | }; | ||
218 | |||
219 | /* | 217 | /* |
220 | * Setup the local event timer for @cpu | 218 | * Setup the local event timer for @cpu |
221 | */ | 219 | */ |
@@ -228,15 +226,9 @@ void arc_local_timer_setup() | |||
228 | clockevents_config_and_register(evt, arc_get_core_freq(), | 226 | clockevents_config_and_register(evt, arc_get_core_freq(), |
229 | 0, ARC_TIMER_MAX); | 227 | 0, ARC_TIMER_MAX); |
230 | 228 | ||
231 | /* | 229 | /* setup the per-cpu timer IRQ handler - for all cpus */ |
232 | * setup the per-cpu timer IRQ handler - for all cpus | 230 | arc_request_percpu_irq(TIMER0_IRQ, cpu, timer_irq_handler, |
233 | * For non boot CPU explicitly unmask at intc | 231 | "Timer0 (per-cpu-tick)", evt); |
234 | * setup_irq() -> .. -> irq_startup() already does this on boot-cpu | ||
235 | */ | ||
236 | if (!cpu) | ||
237 | setup_irq(TIMER0_IRQ, &arc_timer_irq); | ||
238 | else | ||
239 | arch_unmask_irq(TIMER0_IRQ); | ||
240 | } | 232 | } |
241 | 233 | ||
242 | /* | 234 | /* |
diff --git a/arch/arc/mm/cache_arc700.c b/arch/arc/mm/cache_arc700.c index 353b202c37c9..4670afc3b971 100644 --- a/arch/arc/mm/cache_arc700.c +++ b/arch/arc/mm/cache_arc700.c | |||
@@ -77,21 +77,19 @@ char *arc_cache_mumbojumbo(int c, char *buf, int len) | |||
77 | { | 77 | { |
78 | int n = 0; | 78 | int n = 0; |
79 | 79 | ||
80 | #define PR_CACHE(p, enb, str) \ | 80 | #define PR_CACHE(p, cfg, str) \ |
81 | { \ | ||
82 | if (!(p)->ver) \ | 81 | if (!(p)->ver) \ |
83 | n += scnprintf(buf + n, len - n, str"\t\t: N/A\n"); \ | 82 | n += scnprintf(buf + n, len - n, str"\t\t: N/A\n"); \ |
84 | else \ | 83 | else \ |
85 | n += scnprintf(buf + n, len - n, \ | 84 | n += scnprintf(buf + n, len - n, \ |
86 | str"\t\t: (%uK) VIPT, %dway set-asc, %ub Line %s\n", \ | 85 | str"\t\t: %uK, %dway/set, %uB Line, %s%s%s\n", \ |
87 | TO_KB((p)->sz), (p)->assoc, (p)->line_len, \ | 86 | (p)->sz_k, (p)->assoc, (p)->line_len, \ |
88 | enb ? "" : "DISABLED (kernel-build)"); \ | 87 | (p)->vipt ? "VIPT" : "PIPT", \ |
89 | } | 88 | (p)->alias ? " aliasing" : "", \ |
89 | IS_ENABLED(cfg) ? "" : " (not used)"); | ||
90 | 90 | ||
91 | PR_CACHE(&cpuinfo_arc700[c].icache, IS_ENABLED(CONFIG_ARC_HAS_ICACHE), | 91 | PR_CACHE(&cpuinfo_arc700[c].icache, CONFIG_ARC_HAS_ICACHE, "I-Cache"); |
92 | "I-Cache"); | 92 | PR_CACHE(&cpuinfo_arc700[c].dcache, CONFIG_ARC_HAS_DCACHE, "D-Cache"); |
93 | PR_CACHE(&cpuinfo_arc700[c].dcache, IS_ENABLED(CONFIG_ARC_HAS_DCACHE), | ||
94 | "D-Cache"); | ||
95 | 93 | ||
96 | return buf; | 94 | return buf; |
97 | } | 95 | } |
@@ -116,20 +114,31 @@ void read_decode_cache_bcr(void) | |||
116 | p_ic = &cpuinfo_arc700[cpu].icache; | 114 | p_ic = &cpuinfo_arc700[cpu].icache; |
117 | READ_BCR(ARC_REG_IC_BCR, ibcr); | 115 | READ_BCR(ARC_REG_IC_BCR, ibcr); |
118 | 116 | ||
117 | if (!ibcr.ver) | ||
118 | goto dc_chk; | ||
119 | |||
119 | BUG_ON(ibcr.config != 3); | 120 | BUG_ON(ibcr.config != 3); |
120 | p_ic->assoc = 2; /* Fixed to 2w set assoc */ | 121 | p_ic->assoc = 2; /* Fixed to 2w set assoc */ |
121 | p_ic->line_len = 8 << ibcr.line_len; | 122 | p_ic->line_len = 8 << ibcr.line_len; |
122 | p_ic->sz = 0x200 << ibcr.sz; | 123 | p_ic->sz_k = 1 << (ibcr.sz - 1); |
123 | p_ic->ver = ibcr.ver; | 124 | p_ic->ver = ibcr.ver; |
125 | p_ic->vipt = 1; | ||
126 | p_ic->alias = p_ic->sz_k/p_ic->assoc/TO_KB(PAGE_SIZE) > 1; | ||
124 | 127 | ||
128 | dc_chk: | ||
125 | p_dc = &cpuinfo_arc700[cpu].dcache; | 129 | p_dc = &cpuinfo_arc700[cpu].dcache; |
126 | READ_BCR(ARC_REG_DC_BCR, dbcr); | 130 | READ_BCR(ARC_REG_DC_BCR, dbcr); |
127 | 131 | ||
132 | if (!dbcr.ver) | ||
133 | return; | ||
134 | |||
128 | BUG_ON(dbcr.config != 2); | 135 | BUG_ON(dbcr.config != 2); |
129 | p_dc->assoc = 4; /* Fixed to 4w set assoc */ | 136 | p_dc->assoc = 4; /* Fixed to 4w set assoc */ |
130 | p_dc->line_len = 16 << dbcr.line_len; | 137 | p_dc->line_len = 16 << dbcr.line_len; |
131 | p_dc->sz = 0x200 << dbcr.sz; | 138 | p_dc->sz_k = 1 << (dbcr.sz - 1); |
132 | p_dc->ver = dbcr.ver; | 139 | p_dc->ver = dbcr.ver; |
140 | p_dc->vipt = 1; | ||
141 | p_dc->alias = p_dc->sz_k/p_dc->assoc/TO_KB(PAGE_SIZE) > 1; | ||
133 | } | 142 | } |
134 | 143 | ||
135 | /* | 144 | /* |
@@ -142,14 +151,16 @@ void read_decode_cache_bcr(void) | |||
142 | void arc_cache_init(void) | 151 | void arc_cache_init(void) |
143 | { | 152 | { |
144 | unsigned int __maybe_unused cpu = smp_processor_id(); | 153 | unsigned int __maybe_unused cpu = smp_processor_id(); |
145 | struct cpuinfo_arc_cache __maybe_unused *ic, __maybe_unused *dc; | ||
146 | char str[256]; | 154 | char str[256]; |
147 | 155 | ||
148 | printk(arc_cache_mumbojumbo(0, str, sizeof(str))); | 156 | printk(arc_cache_mumbojumbo(0, str, sizeof(str))); |
149 | 157 | ||
150 | #ifdef CONFIG_ARC_HAS_ICACHE | 158 | if (IS_ENABLED(CONFIG_ARC_HAS_ICACHE)) { |
151 | ic = &cpuinfo_arc700[cpu].icache; | 159 | struct cpuinfo_arc_cache *ic = &cpuinfo_arc700[cpu].icache; |
152 | if (ic->ver) { | 160 | |
161 | if (!ic->ver) | ||
162 | panic("cache support enabled but non-existent cache\n"); | ||
163 | |||
153 | if (ic->line_len != L1_CACHE_BYTES) | 164 | if (ic->line_len != L1_CACHE_BYTES) |
154 | panic("ICache line [%d] != kernel Config [%d]", | 165 | panic("ICache line [%d] != kernel Config [%d]", |
155 | ic->line_len, L1_CACHE_BYTES); | 166 | ic->line_len, L1_CACHE_BYTES); |
@@ -158,26 +169,26 @@ void arc_cache_init(void) | |||
158 | panic("Cache ver [%d] doesn't match MMU ver [%d]\n", | 169 | panic("Cache ver [%d] doesn't match MMU ver [%d]\n", |
159 | ic->ver, CONFIG_ARC_MMU_VER); | 170 | ic->ver, CONFIG_ARC_MMU_VER); |
160 | } | 171 | } |
161 | #endif | ||
162 | 172 | ||
163 | #ifdef CONFIG_ARC_HAS_DCACHE | 173 | if (IS_ENABLED(CONFIG_ARC_HAS_DCACHE)) { |
164 | dc = &cpuinfo_arc700[cpu].dcache; | 174 | struct cpuinfo_arc_cache *dc = &cpuinfo_arc700[cpu].dcache; |
165 | if (dc->ver) { | 175 | int handled; |
166 | unsigned int dcache_does_alias; | 176 | |
177 | if (!dc->ver) | ||
178 | panic("cache support enabled but non-existent cache\n"); | ||
167 | 179 | ||
168 | if (dc->line_len != L1_CACHE_BYTES) | 180 | if (dc->line_len != L1_CACHE_BYTES) |
169 | panic("DCache line [%d] != kernel Config [%d]", | 181 | panic("DCache line [%d] != kernel Config [%d]", |
170 | dc->line_len, L1_CACHE_BYTES); | 182 | dc->line_len, L1_CACHE_BYTES); |
171 | 183 | ||
172 | /* check for D-Cache aliasing */ | 184 | /* check for D-Cache aliasing */ |
173 | dcache_does_alias = (dc->sz / dc->assoc) > PAGE_SIZE; | 185 | handled = IS_ENABLED(CONFIG_ARC_CACHE_VIPT_ALIASING); |
174 | 186 | ||
175 | if (dcache_does_alias && !cache_is_vipt_aliasing()) | 187 | if (dc->alias && !handled) |
176 | panic("Enable CONFIG_ARC_CACHE_VIPT_ALIASING\n"); | 188 | panic("Enable CONFIG_ARC_CACHE_VIPT_ALIASING\n"); |
177 | else if (!dcache_does_alias && cache_is_vipt_aliasing()) | 189 | else if (!dc->alias && handled) |
178 | panic("Don't need CONFIG_ARC_CACHE_VIPT_ALIASING\n"); | 190 | panic("Don't need CONFIG_ARC_CACHE_VIPT_ALIASING\n"); |
179 | } | 191 | } |
180 | #endif | ||
181 | } | 192 | } |
182 | 193 | ||
183 | #define OP_INV 0x1 | 194 | #define OP_INV 0x1 |
@@ -255,10 +266,32 @@ static inline void __cache_line_loop(unsigned long paddr, unsigned long vaddr, | |||
255 | * Machine specific helpers for Entire D-Cache or Per Line ops | 266 | * Machine specific helpers for Entire D-Cache or Per Line ops |
256 | */ | 267 | */ |
257 | 268 | ||
258 | static inline void wait_for_flush(void) | 269 | static unsigned int __before_dc_op(const int op) |
270 | { | ||
271 | unsigned int reg = reg; | ||
272 | |||
273 | if (op == OP_FLUSH_N_INV) { | ||
274 | /* Dcache provides 2 cmd: FLUSH or INV | ||
275 | * INV inturn has sub-modes: DISCARD or FLUSH-BEFORE | ||
276 | * flush-n-inv is achieved by INV cmd but with IM=1 | ||
277 | * So toggle INV sub-mode depending on op request and default | ||
278 | */ | ||
279 | reg = read_aux_reg(ARC_REG_DC_CTRL); | ||
280 | write_aux_reg(ARC_REG_DC_CTRL, reg | DC_CTRL_INV_MODE_FLUSH) | ||
281 | ; | ||
282 | } | ||
283 | |||
284 | return reg; | ||
285 | } | ||
286 | |||
287 | static void __after_dc_op(const int op, unsigned int reg) | ||
259 | { | 288 | { |
260 | while (read_aux_reg(ARC_REG_DC_CTRL) & DC_CTRL_FLUSH_STATUS) | 289 | if (op & OP_FLUSH) /* flush / flush-n-inv both wait */ |
261 | ; | 290 | while (read_aux_reg(ARC_REG_DC_CTRL) & DC_CTRL_FLUSH_STATUS); |
291 | |||
292 | /* Switch back to default Invalidate mode */ | ||
293 | if (op == OP_FLUSH_N_INV) | ||
294 | write_aux_reg(ARC_REG_DC_CTRL, reg & ~DC_CTRL_INV_MODE_FLUSH); | ||
262 | } | 295 | } |
263 | 296 | ||
264 | /* | 297 | /* |
@@ -269,18 +302,10 @@ static inline void wait_for_flush(void) | |||
269 | */ | 302 | */ |
270 | static inline void __dc_entire_op(const int cacheop) | 303 | static inline void __dc_entire_op(const int cacheop) |
271 | { | 304 | { |
272 | unsigned int tmp = tmp; | 305 | unsigned int ctrl_reg; |
273 | int aux; | 306 | int aux; |
274 | 307 | ||
275 | if (cacheop == OP_FLUSH_N_INV) { | 308 | ctrl_reg = __before_dc_op(cacheop); |
276 | /* Dcache provides 2 cmd: FLUSH or INV | ||
277 | * INV inturn has sub-modes: DISCARD or FLUSH-BEFORE | ||
278 | * flush-n-inv is achieved by INV cmd but with IM=1 | ||
279 | * Default INV sub-mode is DISCARD, which needs to be toggled | ||
280 | */ | ||
281 | tmp = read_aux_reg(ARC_REG_DC_CTRL); | ||
282 | write_aux_reg(ARC_REG_DC_CTRL, tmp | DC_CTRL_INV_MODE_FLUSH); | ||
283 | } | ||
284 | 309 | ||
285 | if (cacheop & OP_INV) /* Inv or flush-n-inv use same cmd reg */ | 310 | if (cacheop & OP_INV) /* Inv or flush-n-inv use same cmd reg */ |
286 | aux = ARC_REG_DC_IVDC; | 311 | aux = ARC_REG_DC_IVDC; |
@@ -289,12 +314,7 @@ static inline void __dc_entire_op(const int cacheop) | |||
289 | 314 | ||
290 | write_aux_reg(aux, 0x1); | 315 | write_aux_reg(aux, 0x1); |
291 | 316 | ||
292 | if (cacheop & OP_FLUSH) /* flush / flush-n-inv both wait */ | 317 | __after_dc_op(cacheop, ctrl_reg); |
293 | wait_for_flush(); | ||
294 | |||
295 | /* Switch back the DISCARD ONLY Invalidate mode */ | ||
296 | if (cacheop == OP_FLUSH_N_INV) | ||
297 | write_aux_reg(ARC_REG_DC_CTRL, tmp & ~DC_CTRL_INV_MODE_FLUSH); | ||
298 | } | 318 | } |
299 | 319 | ||
300 | /* For kernel mappings cache operation: index is same as paddr */ | 320 | /* For kernel mappings cache operation: index is same as paddr */ |
@@ -306,29 +326,16 @@ static inline void __dc_entire_op(const int cacheop) | |||
306 | static inline void __dc_line_op(unsigned long paddr, unsigned long vaddr, | 326 | static inline void __dc_line_op(unsigned long paddr, unsigned long vaddr, |
307 | unsigned long sz, const int cacheop) | 327 | unsigned long sz, const int cacheop) |
308 | { | 328 | { |
309 | unsigned long flags, tmp = tmp; | 329 | unsigned long flags; |
330 | unsigned int ctrl_reg; | ||
310 | 331 | ||
311 | local_irq_save(flags); | 332 | local_irq_save(flags); |
312 | 333 | ||
313 | if (cacheop == OP_FLUSH_N_INV) { | 334 | ctrl_reg = __before_dc_op(cacheop); |
314 | /* | ||
315 | * Dcache provides 2 cmd: FLUSH or INV | ||
316 | * INV inturn has sub-modes: DISCARD or FLUSH-BEFORE | ||
317 | * flush-n-inv is achieved by INV cmd but with IM=1 | ||
318 | * Default INV sub-mode is DISCARD, which needs to be toggled | ||
319 | */ | ||
320 | tmp = read_aux_reg(ARC_REG_DC_CTRL); | ||
321 | write_aux_reg(ARC_REG_DC_CTRL, tmp | DC_CTRL_INV_MODE_FLUSH); | ||
322 | } | ||
323 | 335 | ||
324 | __cache_line_loop(paddr, vaddr, sz, cacheop); | 336 | __cache_line_loop(paddr, vaddr, sz, cacheop); |
325 | 337 | ||
326 | if (cacheop & OP_FLUSH) /* flush / flush-n-inv both wait */ | 338 | __after_dc_op(cacheop, ctrl_reg); |
327 | wait_for_flush(); | ||
328 | |||
329 | /* Switch back the DISCARD ONLY Invalidate mode */ | ||
330 | if (cacheop == OP_FLUSH_N_INV) | ||
331 | write_aux_reg(ARC_REG_DC_CTRL, tmp & ~DC_CTRL_INV_MODE_FLUSH); | ||
332 | 339 | ||
333 | local_irq_restore(flags); | 340 | local_irq_restore(flags); |
334 | } | 341 | } |
@@ -389,8 +396,16 @@ static inline void __dc_line_op(unsigned long paddr, unsigned long vaddr, | |||
389 | /*********************************************************** | 396 | /*********************************************************** |
390 | * Machine specific helper for per line I-Cache invalidate. | 397 | * Machine specific helper for per line I-Cache invalidate. |
391 | */ | 398 | */ |
392 | static void __ic_line_inv_vaddr_local(unsigned long paddr, unsigned long vaddr, | 399 | |
393 | unsigned long sz) | 400 | static inline void __ic_entire_inv(void) |
401 | { | ||
402 | write_aux_reg(ARC_REG_IC_IVIC, 1); | ||
403 | read_aux_reg(ARC_REG_IC_CTRL); /* blocks */ | ||
404 | } | ||
405 | |||
406 | static inline void | ||
407 | __ic_line_inv_vaddr_local(unsigned long paddr, unsigned long vaddr, | ||
408 | unsigned long sz) | ||
394 | { | 409 | { |
395 | unsigned long flags; | 410 | unsigned long flags; |
396 | 411 | ||
@@ -399,30 +414,39 @@ static void __ic_line_inv_vaddr_local(unsigned long paddr, unsigned long vaddr, | |||
399 | local_irq_restore(flags); | 414 | local_irq_restore(flags); |
400 | } | 415 | } |
401 | 416 | ||
402 | static inline void __ic_entire_inv(void) | 417 | #ifndef CONFIG_SMP |
403 | { | 418 | |
404 | write_aux_reg(ARC_REG_IC_IVIC, 1); | 419 | #define __ic_line_inv_vaddr(p, v, s) __ic_line_inv_vaddr_local(p, v, s) |
405 | read_aux_reg(ARC_REG_IC_CTRL); /* blocks */ | ||
406 | } | ||
407 | 420 | ||
408 | struct ic_line_inv_vaddr_ipi { | 421 | #else |
422 | |||
423 | struct ic_inv_args { | ||
409 | unsigned long paddr, vaddr; | 424 | unsigned long paddr, vaddr; |
410 | int sz; | 425 | int sz; |
411 | }; | 426 | }; |
412 | 427 | ||
413 | static void __ic_line_inv_vaddr_helper(void *info) | 428 | static void __ic_line_inv_vaddr_helper(void *info) |
414 | { | 429 | { |
415 | struct ic_line_inv_vaddr_ipi *ic_inv = (struct ic_line_inv_vaddr_ipi*) info; | 430 | struct ic_inv *ic_inv_args = (struct ic_inv_args *) info; |
431 | |||
416 | __ic_line_inv_vaddr_local(ic_inv->paddr, ic_inv->vaddr, ic_inv->sz); | 432 | __ic_line_inv_vaddr_local(ic_inv->paddr, ic_inv->vaddr, ic_inv->sz); |
417 | } | 433 | } |
418 | 434 | ||
419 | static void __ic_line_inv_vaddr(unsigned long paddr, unsigned long vaddr, | 435 | static void __ic_line_inv_vaddr(unsigned long paddr, unsigned long vaddr, |
420 | unsigned long sz) | 436 | unsigned long sz) |
421 | { | 437 | { |
422 | struct ic_line_inv_vaddr_ipi ic_inv = { paddr, vaddr , sz}; | 438 | struct ic_inv_args ic_inv = { |
439 | .paddr = paddr, | ||
440 | .vaddr = vaddr, | ||
441 | .sz = sz | ||
442 | }; | ||
443 | |||
423 | on_each_cpu(__ic_line_inv_vaddr_helper, &ic_inv, 1); | 444 | on_each_cpu(__ic_line_inv_vaddr_helper, &ic_inv, 1); |
424 | } | 445 | } |
425 | #else | 446 | |
447 | #endif /* CONFIG_SMP */ | ||
448 | |||
449 | #else /* !CONFIG_ARC_HAS_ICACHE */ | ||
426 | 450 | ||
427 | #define __ic_entire_inv() | 451 | #define __ic_entire_inv() |
428 | #define __ic_line_inv_vaddr(pstart, vstart, sz) | 452 | #define __ic_line_inv_vaddr(pstart, vstart, sz) |
diff --git a/arch/arc/mm/fault.c b/arch/arc/mm/fault.c index 9c69552350c4..6f7e3a68803a 100644 --- a/arch/arc/mm/fault.c +++ b/arch/arc/mm/fault.c | |||
@@ -159,7 +159,6 @@ good_area: | |||
159 | return; | 159 | return; |
160 | } | 160 | } |
161 | 161 | ||
162 | /* TBD: switch to pagefault_out_of_memory() */ | ||
163 | if (fault & VM_FAULT_OOM) | 162 | if (fault & VM_FAULT_OOM) |
164 | goto out_of_memory; | 163 | goto out_of_memory; |
165 | else if (fault & VM_FAULT_SIGBUS) | 164 | else if (fault & VM_FAULT_SIGBUS) |
diff --git a/arch/arc/mm/tlbex.S b/arch/arc/mm/tlbex.S index 79bfc81358c9..d572f1c2c724 100644 --- a/arch/arc/mm/tlbex.S +++ b/arch/arc/mm/tlbex.S | |||
@@ -220,9 +220,9 @@ ex_saved_reg1: | |||
220 | 220 | ||
221 | .macro CONV_PTE_TO_TLB | 221 | .macro CONV_PTE_TO_TLB |
222 | and r3, r0, PTE_BITS_RWX ; r w x | 222 | and r3, r0, PTE_BITS_RWX ; r w x |
223 | lsl r2, r3, 3 ; r w x 0 0 0 | 223 | lsl r2, r3, 3 ; r w x 0 0 0 (GLOBAL, kernel only) |
224 | and.f 0, r0, _PAGE_GLOBAL | 224 | and.f 0, r0, _PAGE_GLOBAL |
225 | or.z r2, r2, r3 ; r w x r w x | 225 | or.z r2, r2, r3 ; r w x r w x (!GLOBAL, user page) |
226 | 226 | ||
227 | and r3, r0, PTE_BITS_NON_RWX_IN_PD1 ; Extract PFN+cache bits from PTE | 227 | and r3, r0, PTE_BITS_NON_RWX_IN_PD1 ; Extract PFN+cache bits from PTE |
228 | or r3, r3, r2 | 228 | or r3, r3, r2 |
diff --git a/arch/arc/plat-arcfpga/Makefile b/arch/arc/plat-arcfpga/Makefile index 4d1bddc34b5b..66fd0ecd68b3 100644 --- a/arch/arc/plat-arcfpga/Makefile +++ b/arch/arc/plat-arcfpga/Makefile | |||
@@ -8,5 +8,5 @@ | |||
8 | 8 | ||
9 | KBUILD_CFLAGS += -Iarch/arc/plat-arcfpga/include | 9 | KBUILD_CFLAGS += -Iarch/arc/plat-arcfpga/include |
10 | 10 | ||
11 | obj-y := platform.o irq.o | 11 | obj-y := platform.o |
12 | obj-$(CONFIG_ISS_SMP_EXTN) += smp.o | 12 | obj-$(CONFIG_ISS_SMP_EXTN) += smp.o |
diff --git a/arch/arc/plat-arcfpga/include/plat/irq.h b/arch/arc/plat-arcfpga/include/plat/irq.h index 6adbc53c3a5b..2c9dea690ac4 100644 --- a/arch/arc/plat-arcfpga/include/plat/irq.h +++ b/arch/arc/plat-arcfpga/include/plat/irq.h | |||
@@ -24,6 +24,4 @@ | |||
24 | #define IDU_INTERRUPT_0 16 | 24 | #define IDU_INTERRUPT_0 16 |
25 | #endif | 25 | #endif |
26 | 26 | ||
27 | extern void __init plat_fpga_init_IRQ(void); | ||
28 | |||
29 | #endif | 27 | #endif |
diff --git a/arch/arc/plat-arcfpga/irq.c b/arch/arc/plat-arcfpga/irq.c deleted file mode 100644 index d2215fd889c2..000000000000 --- a/arch/arc/plat-arcfpga/irq.c +++ /dev/null | |||
@@ -1,25 +0,0 @@ | |||
1 | /* | ||
2 | * ARC FPGA Platform IRQ hookups | ||
3 | * | ||
4 | * Copyright (C) 2012 Synopsys, Inc. (www.synopsys.com) | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 as | ||
8 | * published by the Free Software Foundation. | ||
9 | */ | ||
10 | |||
11 | #include <linux/interrupt.h> | ||
12 | #include <plat/irq.h> | ||
13 | |||
14 | void __init plat_fpga_init_IRQ(void) | ||
15 | { | ||
16 | /* | ||
17 | * SMP Hack because UART IRQ hardwired to cpu0 (boot-cpu) but if the | ||
18 | * request_irq() comes from any other CPU, the low level IRQ unamsking | ||
19 | * essential for getting Interrupts won't be enabled on cpu0, locking | ||
20 | * up the UART state machine. | ||
21 | */ | ||
22 | #ifdef CONFIG_SMP | ||
23 | arch_unmask_irq(UART0_IRQ); | ||
24 | #endif | ||
25 | } | ||
diff --git a/arch/arc/plat-arcfpga/platform.c b/arch/arc/plat-arcfpga/platform.c index b8d0d456627f..1038949a99a1 100644 --- a/arch/arc/plat-arcfpga/platform.c +++ b/arch/arc/plat-arcfpga/platform.c | |||
@@ -57,7 +57,6 @@ MACHINE_START(ANGEL4, "angel4") | |||
57 | .dt_compat = aa4_compat, | 57 | .dt_compat = aa4_compat, |
58 | .init_early = plat_fpga_early_init, | 58 | .init_early = plat_fpga_early_init, |
59 | .init_machine = plat_fpga_populate_dev, | 59 | .init_machine = plat_fpga_populate_dev, |
60 | .init_irq = plat_fpga_init_IRQ, | ||
61 | #ifdef CONFIG_ISS_SMP_EXTN | 60 | #ifdef CONFIG_ISS_SMP_EXTN |
62 | .init_smp = iss_model_init_smp, | 61 | .init_smp = iss_model_init_smp, |
63 | #endif | 62 | #endif |
@@ -72,7 +71,6 @@ MACHINE_START(ML509, "ml509") | |||
72 | .dt_compat = ml509_compat, | 71 | .dt_compat = ml509_compat, |
73 | .init_early = plat_fpga_early_init, | 72 | .init_early = plat_fpga_early_init, |
74 | .init_machine = plat_fpga_populate_dev, | 73 | .init_machine = plat_fpga_populate_dev, |
75 | .init_irq = plat_fpga_init_IRQ, | ||
76 | #ifdef CONFIG_SMP | 74 | #ifdef CONFIG_SMP |
77 | .init_smp = iss_model_init_smp, | 75 | .init_smp = iss_model_init_smp, |
78 | #endif | 76 | #endif |
@@ -87,5 +85,4 @@ MACHINE_START(NSIMOSCI, "nsimosci") | |||
87 | .dt_compat = nsimosci_compat, | 85 | .dt_compat = nsimosci_compat, |
88 | .init_early = NULL, | 86 | .init_early = NULL, |
89 | .init_machine = plat_fpga_populate_dev, | 87 | .init_machine = plat_fpga_populate_dev, |
90 | .init_irq = NULL, | ||
91 | MACHINE_END | 88 | MACHINE_END |
diff --git a/arch/arm/include/asm/unistd.h b/arch/arm/include/asm/unistd.h index 21ca0cebcab0..32640c431a08 100644 --- a/arch/arm/include/asm/unistd.h +++ b/arch/arm/include/asm/unistd.h | |||
@@ -19,7 +19,7 @@ | |||
19 | * This may need to be greater than __NR_last_syscall+1 in order to | 19 | * This may need to be greater than __NR_last_syscall+1 in order to |
20 | * account for the padding in the syscall table | 20 | * account for the padding in the syscall table |
21 | */ | 21 | */ |
22 | #define __NR_syscalls (384) | 22 | #define __NR_syscalls (388) |
23 | 23 | ||
24 | /* | 24 | /* |
25 | * *NOTE*: This is a ghost syscall private to the kernel. Only the | 25 | * *NOTE*: This is a ghost syscall private to the kernel. Only the |
diff --git a/arch/arm/include/uapi/asm/unistd.h b/arch/arm/include/uapi/asm/unistd.h index 767ea204334e..3aaa75cae90c 100644 --- a/arch/arm/include/uapi/asm/unistd.h +++ b/arch/arm/include/uapi/asm/unistd.h | |||
@@ -410,6 +410,8 @@ | |||
410 | #define __NR_sched_getattr (__NR_SYSCALL_BASE+381) | 410 | #define __NR_sched_getattr (__NR_SYSCALL_BASE+381) |
411 | #define __NR_renameat2 (__NR_SYSCALL_BASE+382) | 411 | #define __NR_renameat2 (__NR_SYSCALL_BASE+382) |
412 | #define __NR_seccomp (__NR_SYSCALL_BASE+383) | 412 | #define __NR_seccomp (__NR_SYSCALL_BASE+383) |
413 | #define __NR_getrandom (__NR_SYSCALL_BASE+384) | ||
414 | #define __NR_memfd_create (__NR_SYSCALL_BASE+385) | ||
413 | 415 | ||
414 | /* | 416 | /* |
415 | * The following SWIs are ARM private. | 417 | * The following SWIs are ARM private. |
diff --git a/arch/arm/kernel/calls.S b/arch/arm/kernel/calls.S index bea85f97f363..9f899d8fdcca 100644 --- a/arch/arm/kernel/calls.S +++ b/arch/arm/kernel/calls.S | |||
@@ -393,6 +393,8 @@ | |||
393 | CALL(sys_sched_getattr) | 393 | CALL(sys_sched_getattr) |
394 | CALL(sys_renameat2) | 394 | CALL(sys_renameat2) |
395 | CALL(sys_seccomp) | 395 | CALL(sys_seccomp) |
396 | CALL(sys_getrandom) | ||
397 | /* 385 */ CALL(sys_memfd_create) | ||
396 | #ifndef syscalls_counted | 398 | #ifndef syscalls_counted |
397 | .equ syscalls_padding, ((NR_syscalls + 3) & ~3) - NR_syscalls | 399 | .equ syscalls_padding, ((NR_syscalls + 3) & ~3) - NR_syscalls |
398 | #define syscalls_counted | 400 | #define syscalls_counted |
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c index 7c4fada440f0..9388a3d479e1 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c | |||
@@ -47,6 +47,9 @@ | |||
47 | #include <asm/mach/arch.h> | 47 | #include <asm/mach/arch.h> |
48 | #include <asm/mpu.h> | 48 | #include <asm/mpu.h> |
49 | 49 | ||
50 | #define CREATE_TRACE_POINTS | ||
51 | #include <trace/events/ipi.h> | ||
52 | |||
50 | /* | 53 | /* |
51 | * as from 2.5, kernels no longer have an init_tasks structure | 54 | * as from 2.5, kernels no longer have an init_tasks structure |
52 | * so we need some other way of telling a new secondary core | 55 | * so we need some other way of telling a new secondary core |
@@ -430,38 +433,15 @@ void __init smp_prepare_cpus(unsigned int max_cpus) | |||
430 | } | 433 | } |
431 | } | 434 | } |
432 | 435 | ||
433 | static void (*smp_cross_call)(const struct cpumask *, unsigned int); | 436 | static void (*__smp_cross_call)(const struct cpumask *, unsigned int); |
434 | 437 | ||
435 | void __init set_smp_cross_call(void (*fn)(const struct cpumask *, unsigned int)) | 438 | void __init set_smp_cross_call(void (*fn)(const struct cpumask *, unsigned int)) |
436 | { | 439 | { |
437 | if (!smp_cross_call) | 440 | if (!__smp_cross_call) |
438 | smp_cross_call = fn; | 441 | __smp_cross_call = fn; |
439 | } | ||
440 | |||
441 | void arch_send_call_function_ipi_mask(const struct cpumask *mask) | ||
442 | { | ||
443 | smp_cross_call(mask, IPI_CALL_FUNC); | ||
444 | } | ||
445 | |||
446 | void arch_send_wakeup_ipi_mask(const struct cpumask *mask) | ||
447 | { | ||
448 | smp_cross_call(mask, IPI_WAKEUP); | ||
449 | } | ||
450 | |||
451 | void arch_send_call_function_single_ipi(int cpu) | ||
452 | { | ||
453 | smp_cross_call(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE); | ||
454 | } | 442 | } |
455 | 443 | ||
456 | #ifdef CONFIG_IRQ_WORK | 444 | static const char *ipi_types[NR_IPI] __tracepoint_string = { |
457 | void arch_irq_work_raise(void) | ||
458 | { | ||
459 | if (is_smp()) | ||
460 | smp_cross_call(cpumask_of(smp_processor_id()), IPI_IRQ_WORK); | ||
461 | } | ||
462 | #endif | ||
463 | |||
464 | static const char *ipi_types[NR_IPI] = { | ||
465 | #define S(x,s) [x] = s | 445 | #define S(x,s) [x] = s |
466 | S(IPI_WAKEUP, "CPU wakeup interrupts"), | 446 | S(IPI_WAKEUP, "CPU wakeup interrupts"), |
467 | S(IPI_TIMER, "Timer broadcast interrupts"), | 447 | S(IPI_TIMER, "Timer broadcast interrupts"), |
@@ -473,6 +453,12 @@ static const char *ipi_types[NR_IPI] = { | |||
473 | S(IPI_COMPLETION, "completion interrupts"), | 453 | S(IPI_COMPLETION, "completion interrupts"), |
474 | }; | 454 | }; |
475 | 455 | ||
456 | static void smp_cross_call(const struct cpumask *target, unsigned int ipinr) | ||
457 | { | ||
458 | trace_ipi_raise(target, ipi_types[ipinr]); | ||
459 | __smp_cross_call(target, ipinr); | ||
460 | } | ||
461 | |||
476 | void show_ipi_list(struct seq_file *p, int prec) | 462 | void show_ipi_list(struct seq_file *p, int prec) |
477 | { | 463 | { |
478 | unsigned int cpu, i; | 464 | unsigned int cpu, i; |
@@ -499,6 +485,29 @@ u64 smp_irq_stat_cpu(unsigned int cpu) | |||
499 | return sum; | 485 | return sum; |
500 | } | 486 | } |
501 | 487 | ||
488 | void arch_send_call_function_ipi_mask(const struct cpumask *mask) | ||
489 | { | ||
490 | smp_cross_call(mask, IPI_CALL_FUNC); | ||
491 | } | ||
492 | |||
493 | void arch_send_wakeup_ipi_mask(const struct cpumask *mask) | ||
494 | { | ||
495 | smp_cross_call(mask, IPI_WAKEUP); | ||
496 | } | ||
497 | |||
498 | void arch_send_call_function_single_ipi(int cpu) | ||
499 | { | ||
500 | smp_cross_call(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE); | ||
501 | } | ||
502 | |||
503 | #ifdef CONFIG_IRQ_WORK | ||
504 | void arch_irq_work_raise(void) | ||
505 | { | ||
506 | if (is_smp()) | ||
507 | smp_cross_call(cpumask_of(smp_processor_id()), IPI_IRQ_WORK); | ||
508 | } | ||
509 | #endif | ||
510 | |||
502 | #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST | 511 | #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST |
503 | void tick_broadcast(const struct cpumask *mask) | 512 | void tick_broadcast(const struct cpumask *mask) |
504 | { | 513 | { |
@@ -556,8 +565,10 @@ void handle_IPI(int ipinr, struct pt_regs *regs) | |||
556 | unsigned int cpu = smp_processor_id(); | 565 | unsigned int cpu = smp_processor_id(); |
557 | struct pt_regs *old_regs = set_irq_regs(regs); | 566 | struct pt_regs *old_regs = set_irq_regs(regs); |
558 | 567 | ||
559 | if (ipinr < NR_IPI) | 568 | if ((unsigned)ipinr < NR_IPI) { |
569 | trace_ipi_entry(ipi_types[ipinr]); | ||
560 | __inc_irq_stat(cpu, ipi_irqs[ipinr]); | 570 | __inc_irq_stat(cpu, ipi_irqs[ipinr]); |
571 | } | ||
561 | 572 | ||
562 | switch (ipinr) { | 573 | switch (ipinr) { |
563 | case IPI_WAKEUP: | 574 | case IPI_WAKEUP: |
@@ -612,6 +623,9 @@ void handle_IPI(int ipinr, struct pt_regs *regs) | |||
612 | cpu, ipinr); | 623 | cpu, ipinr); |
613 | break; | 624 | break; |
614 | } | 625 | } |
626 | |||
627 | if ((unsigned)ipinr < NR_IPI) | ||
628 | trace_ipi_exit(ipi_types[ipinr]); | ||
615 | set_irq_regs(old_regs); | 629 | set_irq_regs(old_regs); |
616 | } | 630 | } |
617 | 631 | ||
diff --git a/arch/arm/mm/proc-v7-3level.S b/arch/arm/mm/proc-v7-3level.S index e4c8acfc1323..1a24e9232ec8 100644 --- a/arch/arm/mm/proc-v7-3level.S +++ b/arch/arm/mm/proc-v7-3level.S | |||
@@ -146,12 +146,11 @@ ENDPROC(cpu_v7_set_pte_ext) | |||
146 | mov \tmp, \ttbr1, lsr #(32 - ARCH_PGD_SHIFT) @ upper bits | 146 | mov \tmp, \ttbr1, lsr #(32 - ARCH_PGD_SHIFT) @ upper bits |
147 | mov \ttbr1, \ttbr1, lsl #ARCH_PGD_SHIFT @ lower bits | 147 | mov \ttbr1, \ttbr1, lsl #ARCH_PGD_SHIFT @ lower bits |
148 | addls \ttbr1, \ttbr1, #TTBR1_OFFSET | 148 | addls \ttbr1, \ttbr1, #TTBR1_OFFSET |
149 | mcrr p15, 1, \ttbr1, \zero, c2 @ load TTBR1 | 149 | adcls \tmp, \tmp, #0 |
150 | mcrr p15, 1, \ttbr1, \tmp, c2 @ load TTBR1 | ||
150 | mov \tmp, \ttbr0, lsr #(32 - ARCH_PGD_SHIFT) @ upper bits | 151 | mov \tmp, \ttbr0, lsr #(32 - ARCH_PGD_SHIFT) @ upper bits |
151 | mov \ttbr0, \ttbr0, lsl #ARCH_PGD_SHIFT @ lower bits | 152 | mov \ttbr0, \ttbr0, lsl #ARCH_PGD_SHIFT @ lower bits |
152 | mcrr p15, 0, \ttbr0, \zero, c2 @ load TTBR0 | 153 | mcrr p15, 0, \ttbr0, \tmp, c2 @ load TTBR0 |
153 | mcrr p15, 1, \ttbr1, \zero, c2 @ load TTBR1 | ||
154 | mcrr p15, 0, \ttbr0, \zero, c2 @ load TTBR0 | ||
155 | .endm | 154 | .endm |
156 | 155 | ||
157 | /* | 156 | /* |
diff --git a/arch/arm64/include/asm/signal32.h b/arch/arm64/include/asm/signal32.h index 7c275e3b640f..eeaa97559bab 100644 --- a/arch/arm64/include/asm/signal32.h +++ b/arch/arm64/include/asm/signal32.h | |||
@@ -24,22 +24,21 @@ | |||
24 | 24 | ||
25 | extern const compat_ulong_t aarch32_sigret_code[6]; | 25 | extern const compat_ulong_t aarch32_sigret_code[6]; |
26 | 26 | ||
27 | int compat_setup_frame(int usig, struct k_sigaction *ka, sigset_t *set, | 27 | int compat_setup_frame(int usig, struct ksignal *ksig, sigset_t *set, |
28 | struct pt_regs *regs); | 28 | struct pt_regs *regs); |
29 | int compat_setup_rt_frame(int usig, struct k_sigaction *ka, siginfo_t *info, | 29 | int compat_setup_rt_frame(int usig, struct ksignal *ksig, sigset_t *set, |
30 | sigset_t *set, struct pt_regs *regs); | 30 | struct pt_regs *regs); |
31 | 31 | ||
32 | void compat_setup_restart_syscall(struct pt_regs *regs); | 32 | void compat_setup_restart_syscall(struct pt_regs *regs); |
33 | #else | 33 | #else |
34 | 34 | ||
35 | static inline int compat_setup_frame(int usid, struct k_sigaction *ka, | 35 | static inline int compat_setup_frame(int usid, struct ksignal *ksig, |
36 | sigset_t *set, struct pt_regs *regs) | 36 | sigset_t *set, struct pt_regs *regs) |
37 | { | 37 | { |
38 | return -ENOSYS; | 38 | return -ENOSYS; |
39 | } | 39 | } |
40 | 40 | ||
41 | static inline int compat_setup_rt_frame(int usig, struct k_sigaction *ka, | 41 | static inline int compat_setup_rt_frame(int usig, struct ksignal *ksig, sigset_t *set, |
42 | siginfo_t *info, sigset_t *set, | ||
43 | struct pt_regs *regs) | 42 | struct pt_regs *regs) |
44 | { | 43 | { |
45 | return -ENOSYS; | 44 | return -ENOSYS; |
diff --git a/arch/arm64/kernel/signal.c b/arch/arm64/kernel/signal.c index 6357b9c6c90e..6fa792137eda 100644 --- a/arch/arm64/kernel/signal.c +++ b/arch/arm64/kernel/signal.c | |||
@@ -209,19 +209,13 @@ static int setup_sigframe(struct rt_sigframe __user *sf, | |||
209 | return err; | 209 | return err; |
210 | } | 210 | } |
211 | 211 | ||
212 | static struct rt_sigframe __user *get_sigframe(struct k_sigaction *ka, | 212 | static struct rt_sigframe __user *get_sigframe(struct ksignal *ksig, |
213 | struct pt_regs *regs) | 213 | struct pt_regs *regs) |
214 | { | 214 | { |
215 | unsigned long sp, sp_top; | 215 | unsigned long sp, sp_top; |
216 | struct rt_sigframe __user *frame; | 216 | struct rt_sigframe __user *frame; |
217 | 217 | ||
218 | sp = sp_top = regs->sp; | 218 | sp = sp_top = sigsp(regs->sp, ksig); |
219 | |||
220 | /* | ||
221 | * This is the X/Open sanctioned signal stack switching. | ||
222 | */ | ||
223 | if ((ka->sa.sa_flags & SA_ONSTACK) && !sas_ss_flags(sp)) | ||
224 | sp = sp_top = current->sas_ss_sp + current->sas_ss_size; | ||
225 | 219 | ||
226 | sp = (sp - sizeof(struct rt_sigframe)) & ~15; | 220 | sp = (sp - sizeof(struct rt_sigframe)) & ~15; |
227 | frame = (struct rt_sigframe __user *)sp; | 221 | frame = (struct rt_sigframe __user *)sp; |
@@ -253,13 +247,13 @@ static void setup_return(struct pt_regs *regs, struct k_sigaction *ka, | |||
253 | regs->regs[30] = (unsigned long)sigtramp; | 247 | regs->regs[30] = (unsigned long)sigtramp; |
254 | } | 248 | } |
255 | 249 | ||
256 | static int setup_rt_frame(int usig, struct k_sigaction *ka, siginfo_t *info, | 250 | static int setup_rt_frame(int usig, struct ksignal *ksig, sigset_t *set, |
257 | sigset_t *set, struct pt_regs *regs) | 251 | struct pt_regs *regs) |
258 | { | 252 | { |
259 | struct rt_sigframe __user *frame; | 253 | struct rt_sigframe __user *frame; |
260 | int err = 0; | 254 | int err = 0; |
261 | 255 | ||
262 | frame = get_sigframe(ka, regs); | 256 | frame = get_sigframe(ksig, regs); |
263 | if (!frame) | 257 | if (!frame) |
264 | return 1; | 258 | return 1; |
265 | 259 | ||
@@ -269,9 +263,9 @@ static int setup_rt_frame(int usig, struct k_sigaction *ka, siginfo_t *info, | |||
269 | err |= __save_altstack(&frame->uc.uc_stack, regs->sp); | 263 | err |= __save_altstack(&frame->uc.uc_stack, regs->sp); |
270 | err |= setup_sigframe(frame, regs, set); | 264 | err |= setup_sigframe(frame, regs, set); |
271 | if (err == 0) { | 265 | if (err == 0) { |
272 | setup_return(regs, ka, frame, usig); | 266 | setup_return(regs, &ksig->ka, frame, usig); |
273 | if (ka->sa.sa_flags & SA_SIGINFO) { | 267 | if (ksig->ka.sa.sa_flags & SA_SIGINFO) { |
274 | err |= copy_siginfo_to_user(&frame->info, info); | 268 | err |= copy_siginfo_to_user(&frame->info, &ksig->info); |
275 | regs->regs[1] = (unsigned long)&frame->info; | 269 | regs->regs[1] = (unsigned long)&frame->info; |
276 | regs->regs[2] = (unsigned long)&frame->uc; | 270 | regs->regs[2] = (unsigned long)&frame->uc; |
277 | } | 271 | } |
@@ -291,13 +285,12 @@ static void setup_restart_syscall(struct pt_regs *regs) | |||
291 | /* | 285 | /* |
292 | * OK, we're invoking a handler | 286 | * OK, we're invoking a handler |
293 | */ | 287 | */ |
294 | static void handle_signal(unsigned long sig, struct k_sigaction *ka, | 288 | static void handle_signal(struct ksignal *ksig, struct pt_regs *regs) |
295 | siginfo_t *info, struct pt_regs *regs) | ||
296 | { | 289 | { |
297 | struct thread_info *thread = current_thread_info(); | 290 | struct thread_info *thread = current_thread_info(); |
298 | struct task_struct *tsk = current; | 291 | struct task_struct *tsk = current; |
299 | sigset_t *oldset = sigmask_to_save(); | 292 | sigset_t *oldset = sigmask_to_save(); |
300 | int usig = sig; | 293 | int usig = ksig->sig; |
301 | int ret; | 294 | int ret; |
302 | 295 | ||
303 | /* | 296 | /* |
@@ -310,13 +303,12 @@ static void handle_signal(unsigned long sig, struct k_sigaction *ka, | |||
310 | * Set up the stack frame | 303 | * Set up the stack frame |
311 | */ | 304 | */ |
312 | if (is_compat_task()) { | 305 | if (is_compat_task()) { |
313 | if (ka->sa.sa_flags & SA_SIGINFO) | 306 | if (ksig->ka.sa.sa_flags & SA_SIGINFO) |
314 | ret = compat_setup_rt_frame(usig, ka, info, oldset, | 307 | ret = compat_setup_rt_frame(usig, ksig, oldset, regs); |
315 | regs); | ||
316 | else | 308 | else |
317 | ret = compat_setup_frame(usig, ka, oldset, regs); | 309 | ret = compat_setup_frame(usig, ksig, oldset, regs); |
318 | } else { | 310 | } else { |
319 | ret = setup_rt_frame(usig, ka, info, oldset, regs); | 311 | ret = setup_rt_frame(usig, ksig, oldset, regs); |
320 | } | 312 | } |
321 | 313 | ||
322 | /* | 314 | /* |
@@ -324,18 +316,14 @@ static void handle_signal(unsigned long sig, struct k_sigaction *ka, | |||
324 | */ | 316 | */ |
325 | ret |= !valid_user_regs(®s->user_regs); | 317 | ret |= !valid_user_regs(®s->user_regs); |
326 | 318 | ||
327 | if (ret != 0) { | ||
328 | force_sigsegv(sig, tsk); | ||
329 | return; | ||
330 | } | ||
331 | |||
332 | /* | 319 | /* |
333 | * Fast forward the stepping logic so we step into the signal | 320 | * Fast forward the stepping logic so we step into the signal |
334 | * handler. | 321 | * handler. |
335 | */ | 322 | */ |
336 | user_fastforward_single_step(tsk); | 323 | if (!ret) |
324 | user_fastforward_single_step(tsk); | ||
337 | 325 | ||
338 | signal_delivered(sig, info, ka, regs, 0); | 326 | signal_setup_done(ret, ksig, 0); |
339 | } | 327 | } |
340 | 328 | ||
341 | /* | 329 | /* |
@@ -350,10 +338,9 @@ static void handle_signal(unsigned long sig, struct k_sigaction *ka, | |||
350 | static void do_signal(struct pt_regs *regs) | 338 | static void do_signal(struct pt_regs *regs) |
351 | { | 339 | { |
352 | unsigned long continue_addr = 0, restart_addr = 0; | 340 | unsigned long continue_addr = 0, restart_addr = 0; |
353 | struct k_sigaction ka; | 341 | int retval = 0; |
354 | siginfo_t info; | ||
355 | int signr, retval = 0; | ||
356 | int syscall = (int)regs->syscallno; | 342 | int syscall = (int)regs->syscallno; |
343 | struct ksignal ksig; | ||
357 | 344 | ||
358 | /* | 345 | /* |
359 | * If we were from a system call, check for system call restarting... | 346 | * If we were from a system call, check for system call restarting... |
@@ -387,8 +374,7 @@ static void do_signal(struct pt_regs *regs) | |||
387 | * Get the signal to deliver. When running under ptrace, at this point | 374 | * Get the signal to deliver. When running under ptrace, at this point |
388 | * the debugger may change all of our registers. | 375 | * the debugger may change all of our registers. |
389 | */ | 376 | */ |
390 | signr = get_signal_to_deliver(&info, &ka, regs, NULL); | 377 | if (get_signal(&ksig)) { |
391 | if (signr > 0) { | ||
392 | /* | 378 | /* |
393 | * Depending on the signal settings, we may need to revert the | 379 | * Depending on the signal settings, we may need to revert the |
394 | * decision to restart the system call, but skip this if a | 380 | * decision to restart the system call, but skip this if a |
@@ -398,12 +384,12 @@ static void do_signal(struct pt_regs *regs) | |||
398 | (retval == -ERESTARTNOHAND || | 384 | (retval == -ERESTARTNOHAND || |
399 | retval == -ERESTART_RESTARTBLOCK || | 385 | retval == -ERESTART_RESTARTBLOCK || |
400 | (retval == -ERESTARTSYS && | 386 | (retval == -ERESTARTSYS && |
401 | !(ka.sa.sa_flags & SA_RESTART)))) { | 387 | !(ksig.ka.sa.sa_flags & SA_RESTART)))) { |
402 | regs->regs[0] = -EINTR; | 388 | regs->regs[0] = -EINTR; |
403 | regs->pc = continue_addr; | 389 | regs->pc = continue_addr; |
404 | } | 390 | } |
405 | 391 | ||
406 | handle_signal(signr, &ka, &info, regs); | 392 | handle_signal(&ksig, regs); |
407 | return; | 393 | return; |
408 | } | 394 | } |
409 | 395 | ||
diff --git a/arch/arm64/kernel/signal32.c b/arch/arm64/kernel/signal32.c index c5ee208321c3..1b9ad02837cf 100644 --- a/arch/arm64/kernel/signal32.c +++ b/arch/arm64/kernel/signal32.c | |||
@@ -407,20 +407,14 @@ badframe: | |||
407 | return 0; | 407 | return 0; |
408 | } | 408 | } |
409 | 409 | ||
410 | static void __user *compat_get_sigframe(struct k_sigaction *ka, | 410 | static void __user *compat_get_sigframe(struct ksignal *ksig, |
411 | struct pt_regs *regs, | 411 | struct pt_regs *regs, |
412 | int framesize) | 412 | int framesize) |
413 | { | 413 | { |
414 | compat_ulong_t sp = regs->compat_sp; | 414 | compat_ulong_t sp = sigsp(regs->compat_sp, ksig); |
415 | void __user *frame; | 415 | void __user *frame; |
416 | 416 | ||
417 | /* | 417 | /* |
418 | * This is the X/Open sanctioned signal stack switching. | ||
419 | */ | ||
420 | if ((ka->sa.sa_flags & SA_ONSTACK) && !sas_ss_flags(sp)) | ||
421 | sp = current->sas_ss_sp + current->sas_ss_size; | ||
422 | |||
423 | /* | ||
424 | * ATPCS B01 mandates 8-byte alignment | 418 | * ATPCS B01 mandates 8-byte alignment |
425 | */ | 419 | */ |
426 | frame = compat_ptr((compat_uptr_t)((sp - framesize) & ~7)); | 420 | frame = compat_ptr((compat_uptr_t)((sp - framesize) & ~7)); |
@@ -520,18 +514,18 @@ static int compat_setup_sigframe(struct compat_sigframe __user *sf, | |||
520 | /* | 514 | /* |
521 | * 32-bit signal handling routines called from signal.c | 515 | * 32-bit signal handling routines called from signal.c |
522 | */ | 516 | */ |
523 | int compat_setup_rt_frame(int usig, struct k_sigaction *ka, siginfo_t *info, | 517 | int compat_setup_rt_frame(int usig, struct ksignal *ksig, |
524 | sigset_t *set, struct pt_regs *regs) | 518 | sigset_t *set, struct pt_regs *regs) |
525 | { | 519 | { |
526 | struct compat_rt_sigframe __user *frame; | 520 | struct compat_rt_sigframe __user *frame; |
527 | int err = 0; | 521 | int err = 0; |
528 | 522 | ||
529 | frame = compat_get_sigframe(ka, regs, sizeof(*frame)); | 523 | frame = compat_get_sigframe(ksig, regs, sizeof(*frame)); |
530 | 524 | ||
531 | if (!frame) | 525 | if (!frame) |
532 | return 1; | 526 | return 1; |
533 | 527 | ||
534 | err |= copy_siginfo_to_user32(&frame->info, info); | 528 | err |= copy_siginfo_to_user32(&frame->info, &ksig->info); |
535 | 529 | ||
536 | __put_user_error(0, &frame->sig.uc.uc_flags, err); | 530 | __put_user_error(0, &frame->sig.uc.uc_flags, err); |
537 | __put_user_error(0, &frame->sig.uc.uc_link, err); | 531 | __put_user_error(0, &frame->sig.uc.uc_link, err); |
@@ -541,7 +535,7 @@ int compat_setup_rt_frame(int usig, struct k_sigaction *ka, siginfo_t *info, | |||
541 | err |= compat_setup_sigframe(&frame->sig, regs, set); | 535 | err |= compat_setup_sigframe(&frame->sig, regs, set); |
542 | 536 | ||
543 | if (err == 0) { | 537 | if (err == 0) { |
544 | compat_setup_return(regs, ka, frame->sig.retcode, frame, usig); | 538 | compat_setup_return(regs, &ksig->ka, frame->sig.retcode, frame, usig); |
545 | regs->regs[1] = (compat_ulong_t)(unsigned long)&frame->info; | 539 | regs->regs[1] = (compat_ulong_t)(unsigned long)&frame->info; |
546 | regs->regs[2] = (compat_ulong_t)(unsigned long)&frame->sig.uc; | 540 | regs->regs[2] = (compat_ulong_t)(unsigned long)&frame->sig.uc; |
547 | } | 541 | } |
@@ -549,13 +543,13 @@ int compat_setup_rt_frame(int usig, struct k_sigaction *ka, siginfo_t *info, | |||
549 | return err; | 543 | return err; |
550 | } | 544 | } |
551 | 545 | ||
552 | int compat_setup_frame(int usig, struct k_sigaction *ka, sigset_t *set, | 546 | int compat_setup_frame(int usig, struct ksignal *ksig, sigset_t *set, |
553 | struct pt_regs *regs) | 547 | struct pt_regs *regs) |
554 | { | 548 | { |
555 | struct compat_sigframe __user *frame; | 549 | struct compat_sigframe __user *frame; |
556 | int err = 0; | 550 | int err = 0; |
557 | 551 | ||
558 | frame = compat_get_sigframe(ka, regs, sizeof(*frame)); | 552 | frame = compat_get_sigframe(ksig, regs, sizeof(*frame)); |
559 | 553 | ||
560 | if (!frame) | 554 | if (!frame) |
561 | return 1; | 555 | return 1; |
@@ -564,7 +558,7 @@ int compat_setup_frame(int usig, struct k_sigaction *ka, sigset_t *set, | |||
564 | 558 | ||
565 | err |= compat_setup_sigframe(frame, regs, set); | 559 | err |= compat_setup_sigframe(frame, regs, set); |
566 | if (err == 0) | 560 | if (err == 0) |
567 | compat_setup_return(regs, ka, frame->retcode, frame, usig); | 561 | compat_setup_return(regs, &ksig->ka, frame->retcode, frame, usig); |
568 | 562 | ||
569 | return err; | 563 | return err; |
570 | } | 564 | } |
diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c index 3e2f5ebbf63e..474339718105 100644 --- a/arch/arm64/kernel/smp.c +++ b/arch/arm64/kernel/smp.c | |||
@@ -51,6 +51,9 @@ | |||
51 | #include <asm/tlbflush.h> | 51 | #include <asm/tlbflush.h> |
52 | #include <asm/ptrace.h> | 52 | #include <asm/ptrace.h> |
53 | 53 | ||
54 | #define CREATE_TRACE_POINTS | ||
55 | #include <trace/events/ipi.h> | ||
56 | |||
54 | /* | 57 | /* |
55 | * as from 2.5, kernels no longer have an init_tasks structure | 58 | * as from 2.5, kernels no longer have an init_tasks structure |
56 | * so we need some other way of telling a new secondary core | 59 | * so we need some other way of telling a new secondary core |
@@ -313,8 +316,6 @@ void __init smp_prepare_boot_cpu(void) | |||
313 | set_my_cpu_offset(per_cpu_offset(smp_processor_id())); | 316 | set_my_cpu_offset(per_cpu_offset(smp_processor_id())); |
314 | } | 317 | } |
315 | 318 | ||
316 | static void (*smp_cross_call)(const struct cpumask *, unsigned int); | ||
317 | |||
318 | /* | 319 | /* |
319 | * Enumerate the possible CPU set from the device tree and build the | 320 | * Enumerate the possible CPU set from the device tree and build the |
320 | * cpu logical map array containing MPIDR values related to logical | 321 | * cpu logical map array containing MPIDR values related to logical |
@@ -469,32 +470,15 @@ void __init smp_prepare_cpus(unsigned int max_cpus) | |||
469 | } | 470 | } |
470 | } | 471 | } |
471 | 472 | ||
473 | static void (*__smp_cross_call)(const struct cpumask *, unsigned int); | ||
472 | 474 | ||
473 | void __init set_smp_cross_call(void (*fn)(const struct cpumask *, unsigned int)) | 475 | void __init set_smp_cross_call(void (*fn)(const struct cpumask *, unsigned int)) |
474 | { | 476 | { |
475 | smp_cross_call = fn; | 477 | __smp_cross_call = fn; |
476 | } | 478 | } |
477 | 479 | ||
478 | void arch_send_call_function_ipi_mask(const struct cpumask *mask) | 480 | static const char *ipi_types[NR_IPI] __tracepoint_string = { |
479 | { | 481 | #define S(x,s) [x] = s |
480 | smp_cross_call(mask, IPI_CALL_FUNC); | ||
481 | } | ||
482 | |||
483 | void arch_send_call_function_single_ipi(int cpu) | ||
484 | { | ||
485 | smp_cross_call(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE); | ||
486 | } | ||
487 | |||
488 | #ifdef CONFIG_IRQ_WORK | ||
489 | void arch_irq_work_raise(void) | ||
490 | { | ||
491 | if (smp_cross_call) | ||
492 | smp_cross_call(cpumask_of(smp_processor_id()), IPI_IRQ_WORK); | ||
493 | } | ||
494 | #endif | ||
495 | |||
496 | static const char *ipi_types[NR_IPI] = { | ||
497 | #define S(x,s) [x - IPI_RESCHEDULE] = s | ||
498 | S(IPI_RESCHEDULE, "Rescheduling interrupts"), | 482 | S(IPI_RESCHEDULE, "Rescheduling interrupts"), |
499 | S(IPI_CALL_FUNC, "Function call interrupts"), | 483 | S(IPI_CALL_FUNC, "Function call interrupts"), |
500 | S(IPI_CALL_FUNC_SINGLE, "Single function call interrupts"), | 484 | S(IPI_CALL_FUNC_SINGLE, "Single function call interrupts"), |
@@ -503,12 +487,18 @@ static const char *ipi_types[NR_IPI] = { | |||
503 | S(IPI_IRQ_WORK, "IRQ work interrupts"), | 487 | S(IPI_IRQ_WORK, "IRQ work interrupts"), |
504 | }; | 488 | }; |
505 | 489 | ||
490 | static void smp_cross_call(const struct cpumask *target, unsigned int ipinr) | ||
491 | { | ||
492 | trace_ipi_raise(target, ipi_types[ipinr]); | ||
493 | __smp_cross_call(target, ipinr); | ||
494 | } | ||
495 | |||
506 | void show_ipi_list(struct seq_file *p, int prec) | 496 | void show_ipi_list(struct seq_file *p, int prec) |
507 | { | 497 | { |
508 | unsigned int cpu, i; | 498 | unsigned int cpu, i; |
509 | 499 | ||
510 | for (i = 0; i < NR_IPI; i++) { | 500 | for (i = 0; i < NR_IPI; i++) { |
511 | seq_printf(p, "%*s%u:%s", prec - 1, "IPI", i + IPI_RESCHEDULE, | 501 | seq_printf(p, "%*s%u:%s", prec - 1, "IPI", i, |
512 | prec >= 4 ? " " : ""); | 502 | prec >= 4 ? " " : ""); |
513 | for_each_online_cpu(cpu) | 503 | for_each_online_cpu(cpu) |
514 | seq_printf(p, "%10u ", | 504 | seq_printf(p, "%10u ", |
@@ -528,6 +518,24 @@ u64 smp_irq_stat_cpu(unsigned int cpu) | |||
528 | return sum; | 518 | return sum; |
529 | } | 519 | } |
530 | 520 | ||
521 | void arch_send_call_function_ipi_mask(const struct cpumask *mask) | ||
522 | { | ||
523 | smp_cross_call(mask, IPI_CALL_FUNC); | ||
524 | } | ||
525 | |||
526 | void arch_send_call_function_single_ipi(int cpu) | ||
527 | { | ||
528 | smp_cross_call(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE); | ||
529 | } | ||
530 | |||
531 | #ifdef CONFIG_IRQ_WORK | ||
532 | void arch_irq_work_raise(void) | ||
533 | { | ||
534 | if (__smp_cross_call) | ||
535 | smp_cross_call(cpumask_of(smp_processor_id()), IPI_IRQ_WORK); | ||
536 | } | ||
537 | #endif | ||
538 | |||
531 | static DEFINE_RAW_SPINLOCK(stop_lock); | 539 | static DEFINE_RAW_SPINLOCK(stop_lock); |
532 | 540 | ||
533 | /* | 541 | /* |
@@ -559,8 +567,10 @@ void handle_IPI(int ipinr, struct pt_regs *regs) | |||
559 | unsigned int cpu = smp_processor_id(); | 567 | unsigned int cpu = smp_processor_id(); |
560 | struct pt_regs *old_regs = set_irq_regs(regs); | 568 | struct pt_regs *old_regs = set_irq_regs(regs); |
561 | 569 | ||
562 | if (ipinr >= IPI_RESCHEDULE && ipinr < IPI_RESCHEDULE + NR_IPI) | 570 | if ((unsigned)ipinr < NR_IPI) { |
563 | __inc_irq_stat(cpu, ipi_irqs[ipinr - IPI_RESCHEDULE]); | 571 | trace_ipi_entry(ipi_types[ipinr]); |
572 | __inc_irq_stat(cpu, ipi_irqs[ipinr]); | ||
573 | } | ||
564 | 574 | ||
565 | switch (ipinr) { | 575 | switch (ipinr) { |
566 | case IPI_RESCHEDULE: | 576 | case IPI_RESCHEDULE: |
@@ -605,6 +615,9 @@ void handle_IPI(int ipinr, struct pt_regs *regs) | |||
605 | pr_crit("CPU%u: Unknown IPI message 0x%x\n", cpu, ipinr); | 615 | pr_crit("CPU%u: Unknown IPI message 0x%x\n", cpu, ipinr); |
606 | break; | 616 | break; |
607 | } | 617 | } |
618 | |||
619 | if ((unsigned)ipinr < NR_IPI) | ||
620 | trace_ipi_exit(ipi_types[ipinr]); | ||
608 | set_irq_regs(old_regs); | 621 | set_irq_regs(old_regs); |
609 | } | 622 | } |
610 | 623 | ||
diff --git a/arch/avr32/kernel/signal.c b/arch/avr32/kernel/signal.c index b80c0b3d2bab..d309fbcc3bd6 100644 --- a/arch/avr32/kernel/signal.c +++ b/arch/avr32/kernel/signal.c | |||
@@ -127,24 +127,20 @@ setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs) | |||
127 | } | 127 | } |
128 | 128 | ||
129 | static inline void __user * | 129 | static inline void __user * |
130 | get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, int framesize) | 130 | get_sigframe(struct ksignal *ksig, struct pt_regs *regs, int framesize) |
131 | { | 131 | { |
132 | unsigned long sp = regs->sp; | 132 | unsigned long sp = sigsp(regs->sp, ksig); |
133 | |||
134 | if ((ka->sa.sa_flags & SA_ONSTACK) && !sas_ss_flags(sp)) | ||
135 | sp = current->sas_ss_sp + current->sas_ss_size; | ||
136 | 133 | ||
137 | return (void __user *)((sp - framesize) & ~3); | 134 | return (void __user *)((sp - framesize) & ~3); |
138 | } | 135 | } |
139 | 136 | ||
140 | static int | 137 | static int |
141 | setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, | 138 | setup_rt_frame(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs) |
142 | sigset_t *set, struct pt_regs *regs) | ||
143 | { | 139 | { |
144 | struct rt_sigframe __user *frame; | 140 | struct rt_sigframe __user *frame; |
145 | int err = 0; | 141 | int err = 0; |
146 | 142 | ||
147 | frame = get_sigframe(ka, regs, sizeof(*frame)); | 143 | frame = get_sigframe(ksig, regs, sizeof(*frame)); |
148 | err = -EFAULT; | 144 | err = -EFAULT; |
149 | if (!access_ok(VERIFY_WRITE, frame, sizeof (*frame))) | 145 | if (!access_ok(VERIFY_WRITE, frame, sizeof (*frame))) |
150 | goto out; | 146 | goto out; |
@@ -164,7 +160,7 @@ setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, | |||
164 | err = __put_user(0x3008d733 | (__NR_rt_sigreturn << 20), | 160 | err = __put_user(0x3008d733 | (__NR_rt_sigreturn << 20), |
165 | &frame->retcode); | 161 | &frame->retcode); |
166 | 162 | ||
167 | err |= copy_siginfo_to_user(&frame->info, info); | 163 | err |= copy_siginfo_to_user(&frame->info, &ksig->info); |
168 | 164 | ||
169 | /* Set up the ucontext */ | 165 | /* Set up the ucontext */ |
170 | err |= __put_user(0, &frame->uc.uc_flags); | 166 | err |= __put_user(0, &frame->uc.uc_flags); |
@@ -176,12 +172,12 @@ setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, | |||
176 | if (err) | 172 | if (err) |
177 | goto out; | 173 | goto out; |
178 | 174 | ||
179 | regs->r12 = sig; | 175 | regs->r12 = ksig->sig; |
180 | regs->r11 = (unsigned long) &frame->info; | 176 | regs->r11 = (unsigned long) &frame->info; |
181 | regs->r10 = (unsigned long) &frame->uc; | 177 | regs->r10 = (unsigned long) &frame->uc; |
182 | regs->sp = (unsigned long) frame; | 178 | regs->sp = (unsigned long) frame; |
183 | if (ka->sa.sa_flags & SA_RESTORER) | 179 | if (ksig->ka.sa.sa_flags & SA_RESTORER) |
184 | regs->lr = (unsigned long)ka->sa.sa_restorer; | 180 | regs->lr = (unsigned long)ksig->ka.sa.sa_restorer; |
185 | else { | 181 | else { |
186 | printk(KERN_NOTICE "[%s:%d] did not set SA_RESTORER\n", | 182 | printk(KERN_NOTICE "[%s:%d] did not set SA_RESTORER\n", |
187 | current->comm, current->pid); | 183 | current->comm, current->pid); |
@@ -189,10 +185,10 @@ setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, | |||
189 | } | 185 | } |
190 | 186 | ||
191 | pr_debug("SIG deliver [%s:%d]: sig=%d sp=0x%lx pc=0x%lx->0x%p lr=0x%lx\n", | 187 | pr_debug("SIG deliver [%s:%d]: sig=%d sp=0x%lx pc=0x%lx->0x%p lr=0x%lx\n", |
192 | current->comm, current->pid, sig, regs->sp, | 188 | current->comm, current->pid, ksig->sig, regs->sp, |
193 | regs->pc, ka->sa.sa_handler, regs->lr); | 189 | regs->pc, ksig->ka.sa.sa_handler, regs->lr); |
194 | 190 | ||
195 | regs->pc = (unsigned long) ka->sa.sa_handler; | 191 | regs->pc = (unsigned long)ksig->ka.sa.sa_handler; |
196 | 192 | ||
197 | out: | 193 | out: |
198 | return err; | 194 | return err; |
@@ -208,15 +204,14 @@ static inline void setup_syscall_restart(struct pt_regs *regs) | |||
208 | } | 204 | } |
209 | 205 | ||
210 | static inline void | 206 | static inline void |
211 | handle_signal(unsigned long sig, struct k_sigaction *ka, siginfo_t *info, | 207 | handle_signal(struct ksignal *ksig, struct pt_regs *regs, int syscall) |
212 | struct pt_regs *regs, int syscall) | ||
213 | { | 208 | { |
214 | int ret; | 209 | int ret; |
215 | 210 | ||
216 | /* | 211 | /* |
217 | * Set up the stack frame | 212 | * Set up the stack frame |
218 | */ | 213 | */ |
219 | ret = setup_rt_frame(sig, ka, info, sigmask_to_save(), regs); | 214 | ret = setup_rt_frame(ksig, sigmask_to_save(), regs); |
220 | 215 | ||
221 | /* | 216 | /* |
222 | * Check that the resulting registers are sane | 217 | * Check that the resulting registers are sane |
@@ -226,10 +221,7 @@ handle_signal(unsigned long sig, struct k_sigaction *ka, siginfo_t *info, | |||
226 | /* | 221 | /* |
227 | * Block the signal if we were successful. | 222 | * Block the signal if we were successful. |
228 | */ | 223 | */ |
229 | if (ret != 0) | 224 | signal_setup_done(ret, ksig, 0); |
230 | force_sigsegv(sig, current); | ||
231 | else | ||
232 | signal_delivered(sig, info, ka, regs, 0); | ||
233 | } | 225 | } |
234 | 226 | ||
235 | /* | 227 | /* |
@@ -239,9 +231,7 @@ handle_signal(unsigned long sig, struct k_sigaction *ka, siginfo_t *info, | |||
239 | */ | 231 | */ |
240 | static void do_signal(struct pt_regs *regs, int syscall) | 232 | static void do_signal(struct pt_regs *regs, int syscall) |
241 | { | 233 | { |
242 | siginfo_t info; | 234 | struct ksignal ksig; |
243 | int signr; | ||
244 | struct k_sigaction ka; | ||
245 | 235 | ||
246 | /* | 236 | /* |
247 | * We want the common case to go fast, which is why we may in | 237 | * We want the common case to go fast, which is why we may in |
@@ -251,18 +241,18 @@ static void do_signal(struct pt_regs *regs, int syscall) | |||
251 | if (!user_mode(regs)) | 241 | if (!user_mode(regs)) |
252 | return; | 242 | return; |
253 | 243 | ||
254 | signr = get_signal_to_deliver(&info, &ka, regs, NULL); | 244 | get_signal(&ksig); |
255 | if (syscall) { | 245 | if (syscall) { |
256 | switch (regs->r12) { | 246 | switch (regs->r12) { |
257 | case -ERESTART_RESTARTBLOCK: | 247 | case -ERESTART_RESTARTBLOCK: |
258 | case -ERESTARTNOHAND: | 248 | case -ERESTARTNOHAND: |
259 | if (signr > 0) { | 249 | if (ksig.sig > 0) { |
260 | regs->r12 = -EINTR; | 250 | regs->r12 = -EINTR; |
261 | break; | 251 | break; |
262 | } | 252 | } |
263 | /* fall through */ | 253 | /* fall through */ |
264 | case -ERESTARTSYS: | 254 | case -ERESTARTSYS: |
265 | if (signr > 0 && !(ka.sa.sa_flags & SA_RESTART)) { | 255 | if (ksig.sig > 0 && !(ksig.ka.sa.sa_flags & SA_RESTART)) { |
266 | regs->r12 = -EINTR; | 256 | regs->r12 = -EINTR; |
267 | break; | 257 | break; |
268 | } | 258 | } |
@@ -272,13 +262,13 @@ static void do_signal(struct pt_regs *regs, int syscall) | |||
272 | } | 262 | } |
273 | } | 263 | } |
274 | 264 | ||
275 | if (signr == 0) { | 265 | if (!ksig.sig) { |
276 | /* No signal to deliver -- put the saved sigmask back */ | 266 | /* No signal to deliver -- put the saved sigmask back */ |
277 | restore_saved_sigmask(); | 267 | restore_saved_sigmask(); |
278 | return; | 268 | return; |
279 | } | 269 | } |
280 | 270 | ||
281 | handle_signal(signr, &ka, &info, regs, syscall); | 271 | handle_signal(&ksig, regs, syscall); |
282 | } | 272 | } |
283 | 273 | ||
284 | asmlinkage void do_notify_resume(struct pt_regs *regs, struct thread_info *ti) | 274 | asmlinkage void do_notify_resume(struct pt_regs *regs, struct thread_info *ti) |
diff --git a/arch/blackfin/kernel/signal.c b/arch/blackfin/kernel/signal.c index b022af6c48f8..ef275571d885 100644 --- a/arch/blackfin/kernel/signal.c +++ b/arch/blackfin/kernel/signal.c | |||
@@ -135,40 +135,31 @@ static inline int rt_setup_sigcontext(struct sigcontext *sc, struct pt_regs *reg | |||
135 | return err; | 135 | return err; |
136 | } | 136 | } |
137 | 137 | ||
138 | static inline void *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, | 138 | static inline void *get_sigframe(struct ksignal *ksig, |
139 | size_t frame_size) | 139 | size_t frame_size) |
140 | { | 140 | { |
141 | unsigned long usp; | 141 | unsigned long usp = sigsp(rdusp(), ksig); |
142 | 142 | ||
143 | /* Default to using normal stack. */ | ||
144 | usp = rdusp(); | ||
145 | |||
146 | /* This is the X/Open sanctioned signal stack switching. */ | ||
147 | if (ka->sa.sa_flags & SA_ONSTACK) { | ||
148 | if (!on_sig_stack(usp)) | ||
149 | usp = current->sas_ss_sp + current->sas_ss_size; | ||
150 | } | ||
151 | return (void *)((usp - frame_size) & -8UL); | 143 | return (void *)((usp - frame_size) & -8UL); |
152 | } | 144 | } |
153 | 145 | ||
154 | static int | 146 | static int |
155 | setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t * info, | 147 | setup_rt_frame(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs) |
156 | sigset_t * set, struct pt_regs *regs) | ||
157 | { | 148 | { |
158 | struct rt_sigframe *frame; | 149 | struct rt_sigframe *frame; |
159 | int err = 0; | 150 | int err = 0; |
160 | 151 | ||
161 | frame = get_sigframe(ka, regs, sizeof(*frame)); | 152 | frame = get_sigframe(ksig, sizeof(*frame)); |
162 | 153 | ||
163 | err |= __put_user((current_thread_info()->exec_domain | 154 | err |= __put_user((current_thread_info()->exec_domain |
164 | && current_thread_info()->exec_domain->signal_invmap | 155 | && current_thread_info()->exec_domain->signal_invmap |
165 | && sig < 32 | 156 | && ksig->sig < 32 |
166 | ? current_thread_info()->exec_domain-> | 157 | ? current_thread_info()->exec_domain-> |
167 | signal_invmap[sig] : sig), &frame->sig); | 158 | signal_invmap[ksig->sig] : ksig->sig), &frame->sig); |
168 | 159 | ||
169 | err |= __put_user(&frame->info, &frame->pinfo); | 160 | err |= __put_user(&frame->info, &frame->pinfo); |
170 | err |= __put_user(&frame->uc, &frame->puc); | 161 | err |= __put_user(&frame->uc, &frame->puc); |
171 | err |= copy_siginfo_to_user(&frame->info, info); | 162 | err |= copy_siginfo_to_user(&frame->info, &ksig->info); |
172 | 163 | ||
173 | /* Create the ucontext. */ | 164 | /* Create the ucontext. */ |
174 | err |= __put_user(0, &frame->uc.uc_flags); | 165 | err |= __put_user(0, &frame->uc.uc_flags); |
@@ -183,7 +174,7 @@ setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t * info, | |||
183 | /* Set up registers for signal handler */ | 174 | /* Set up registers for signal handler */ |
184 | if (current->personality & FDPIC_FUNCPTRS) { | 175 | if (current->personality & FDPIC_FUNCPTRS) { |
185 | struct fdpic_func_descriptor __user *funcptr = | 176 | struct fdpic_func_descriptor __user *funcptr = |
186 | (struct fdpic_func_descriptor *) ka->sa.sa_handler; | 177 | (struct fdpic_func_descriptor *) ksig->ka.sa.sa_handler; |
187 | u32 pc, p3; | 178 | u32 pc, p3; |
188 | err |= __get_user(pc, &funcptr->text); | 179 | err |= __get_user(pc, &funcptr->text); |
189 | err |= __get_user(p3, &funcptr->GOT); | 180 | err |= __get_user(p3, &funcptr->GOT); |
@@ -192,7 +183,7 @@ setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t * info, | |||
192 | regs->pc = pc; | 183 | regs->pc = pc; |
193 | regs->p3 = p3; | 184 | regs->p3 = p3; |
194 | } else | 185 | } else |
195 | regs->pc = (unsigned long)ka->sa.sa_handler; | 186 | regs->pc = (unsigned long)ksig->ka.sa.sa_handler; |
196 | wrusp((unsigned long)frame); | 187 | wrusp((unsigned long)frame); |
197 | regs->rets = SIGRETURN_STUB; | 188 | regs->rets = SIGRETURN_STUB; |
198 | 189 | ||
@@ -237,20 +228,19 @@ handle_restart(struct pt_regs *regs, struct k_sigaction *ka, int has_handler) | |||
237 | * OK, we're invoking a handler | 228 | * OK, we're invoking a handler |
238 | */ | 229 | */ |
239 | static void | 230 | static void |
240 | handle_signal(int sig, siginfo_t *info, struct k_sigaction *ka, | 231 | handle_signal(struct ksignal *ksig, struct pt_regs *regs) |
241 | struct pt_regs *regs) | ||
242 | { | 232 | { |
233 | int ret; | ||
234 | |||
243 | /* are we from a system call? to see pt_regs->orig_p0 */ | 235 | /* are we from a system call? to see pt_regs->orig_p0 */ |
244 | if (regs->orig_p0 >= 0) | 236 | if (regs->orig_p0 >= 0) |
245 | /* If so, check system call restarting.. */ | 237 | /* If so, check system call restarting.. */ |
246 | handle_restart(regs, ka, 1); | 238 | handle_restart(regs, &ksig->ka, 1); |
247 | 239 | ||
248 | /* set up the stack frame */ | 240 | /* set up the stack frame */ |
249 | if (setup_rt_frame(sig, ka, info, sigmask_to_save(), regs) < 0) | 241 | ret = setup_rt_frame(ksig, sigmask_to_save(), regs); |
250 | force_sigsegv(sig, current); | 242 | |
251 | else | 243 | signal_setup_done(ret, ksig, test_thread_flag(TIF_SINGLESTEP)); |
252 | signal_delivered(sig, info, ka, regs, | ||
253 | test_thread_flag(TIF_SINGLESTEP)); | ||
254 | } | 244 | } |
255 | 245 | ||
256 | /* | 246 | /* |
@@ -264,16 +254,13 @@ handle_signal(int sig, siginfo_t *info, struct k_sigaction *ka, | |||
264 | */ | 254 | */ |
265 | asmlinkage void do_signal(struct pt_regs *regs) | 255 | asmlinkage void do_signal(struct pt_regs *regs) |
266 | { | 256 | { |
267 | siginfo_t info; | 257 | struct ksignal ksig; |
268 | int signr; | ||
269 | struct k_sigaction ka; | ||
270 | 258 | ||
271 | current->thread.esp0 = (unsigned long)regs; | 259 | current->thread.esp0 = (unsigned long)regs; |
272 | 260 | ||
273 | signr = get_signal_to_deliver(&info, &ka, regs, NULL); | 261 | if (get_signal(&ksig)) { |
274 | if (signr > 0) { | ||
275 | /* Whee! Actually deliver the signal. */ | 262 | /* Whee! Actually deliver the signal. */ |
276 | handle_signal(signr, &info, &ka, regs); | 263 | handle_signal(&ksig, regs); |
277 | return; | 264 | return; |
278 | } | 265 | } |
279 | 266 | ||
diff --git a/arch/c6x/kernel/signal.c b/arch/c6x/kernel/signal.c index 3998b24e26f2..fe68226f6c4d 100644 --- a/arch/c6x/kernel/signal.c +++ b/arch/c6x/kernel/signal.c | |||
@@ -127,17 +127,11 @@ static int setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs, | |||
127 | return err; | 127 | return err; |
128 | } | 128 | } |
129 | 129 | ||
130 | static inline void __user *get_sigframe(struct k_sigaction *ka, | 130 | static inline void __user *get_sigframe(struct ksignal *ksig, |
131 | struct pt_regs *regs, | 131 | struct pt_regs *regs, |
132 | unsigned long framesize) | 132 | unsigned long framesize) |
133 | { | 133 | { |
134 | unsigned long sp = regs->sp; | 134 | unsigned long sp = sigsp(regs->sp, ksig); |
135 | |||
136 | /* | ||
137 | * This is the X/Open sanctioned signal stack switching. | ||
138 | */ | ||
139 | if ((ka->sa.sa_flags & SA_ONSTACK) && sas_ss_flags(sp) == 0) | ||
140 | sp = current->sas_ss_sp + current->sas_ss_size; | ||
141 | 135 | ||
142 | /* | 136 | /* |
143 | * No matter what happens, 'sp' must be dword | 137 | * No matter what happens, 'sp' must be dword |
@@ -146,21 +140,21 @@ static inline void __user *get_sigframe(struct k_sigaction *ka, | |||
146 | return (void __user *)((sp - framesize) & ~7); | 140 | return (void __user *)((sp - framesize) & ~7); |
147 | } | 141 | } |
148 | 142 | ||
149 | static int setup_rt_frame(int signr, struct k_sigaction *ka, siginfo_t *info, | 143 | static int setup_rt_frame(struct ksignal *ksig, sigset_t *set, |
150 | sigset_t *set, struct pt_regs *regs) | 144 | struct pt_regs *regs) |
151 | { | 145 | { |
152 | struct rt_sigframe __user *frame; | 146 | struct rt_sigframe __user *frame; |
153 | unsigned long __user *retcode; | 147 | unsigned long __user *retcode; |
154 | int err = 0; | 148 | int err = 0; |
155 | 149 | ||
156 | frame = get_sigframe(ka, regs, sizeof(*frame)); | 150 | frame = get_sigframe(ksig, regs, sizeof(*frame)); |
157 | 151 | ||
158 | if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) | 152 | if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) |
159 | goto segv_and_exit; | 153 | return -EFAULT; |
160 | 154 | ||
161 | err |= __put_user(&frame->info, &frame->pinfo); | 155 | err |= __put_user(&frame->info, &frame->pinfo); |
162 | err |= __put_user(&frame->uc, &frame->puc); | 156 | err |= __put_user(&frame->uc, &frame->puc); |
163 | err |= copy_siginfo_to_user(&frame->info, info); | 157 | err |= copy_siginfo_to_user(&frame->info, &ksig->info); |
164 | 158 | ||
165 | /* Clear all the bits of the ucontext we don't use. */ | 159 | /* Clear all the bits of the ucontext we don't use. */ |
166 | err |= __clear_user(&frame->uc, offsetof(struct ucontext, uc_mcontext)); | 160 | err |= __clear_user(&frame->uc, offsetof(struct ucontext, uc_mcontext)); |
@@ -188,7 +182,7 @@ static int setup_rt_frame(int signr, struct k_sigaction *ka, siginfo_t *info, | |||
188 | #undef COPY | 182 | #undef COPY |
189 | 183 | ||
190 | if (err) | 184 | if (err) |
191 | goto segv_and_exit; | 185 | return -EFAULT; |
192 | 186 | ||
193 | flush_icache_range((unsigned long) &frame->retcode, | 187 | flush_icache_range((unsigned long) &frame->retcode, |
194 | (unsigned long) &frame->retcode + RETCODE_SIZE); | 188 | (unsigned long) &frame->retcode + RETCODE_SIZE); |
@@ -198,10 +192,10 @@ static int setup_rt_frame(int signr, struct k_sigaction *ka, siginfo_t *info, | |||
198 | /* Change user context to branch to signal handler */ | 192 | /* Change user context to branch to signal handler */ |
199 | regs->sp = (unsigned long) frame - 8; | 193 | regs->sp = (unsigned long) frame - 8; |
200 | regs->b3 = (unsigned long) retcode; | 194 | regs->b3 = (unsigned long) retcode; |
201 | regs->pc = (unsigned long) ka->sa.sa_handler; | 195 | regs->pc = (unsigned long) ksig->ka.sa.sa_handler; |
202 | 196 | ||
203 | /* Give the signal number to the handler */ | 197 | /* Give the signal number to the handler */ |
204 | regs->a4 = signr; | 198 | regs->a4 = ksig->sig; |
205 | 199 | ||
206 | /* | 200 | /* |
207 | * For realtime signals we must also set the second and third | 201 | * For realtime signals we must also set the second and third |
@@ -212,10 +206,6 @@ static int setup_rt_frame(int signr, struct k_sigaction *ka, siginfo_t *info, | |||
212 | regs->a6 = (unsigned long)&frame->uc; | 206 | regs->a6 = (unsigned long)&frame->uc; |
213 | 207 | ||
214 | return 0; | 208 | return 0; |
215 | |||
216 | segv_and_exit: | ||
217 | force_sigsegv(signr, current); | ||
218 | return -EFAULT; | ||
219 | } | 209 | } |
220 | 210 | ||
221 | static inline void | 211 | static inline void |
@@ -245,10 +235,11 @@ do_restart: | |||
245 | /* | 235 | /* |
246 | * handle the actual delivery of a signal to userspace | 236 | * handle the actual delivery of a signal to userspace |
247 | */ | 237 | */ |
248 | static void handle_signal(int sig, | 238 | static void handle_signal(struct ksignal *ksig, struct pt_regs *regs, |
249 | siginfo_t *info, struct k_sigaction *ka, | 239 | int syscall) |
250 | struct pt_regs *regs, int syscall) | ||
251 | { | 240 | { |
241 | int ret; | ||
242 | |||
252 | /* Are we from a system call? */ | 243 | /* Are we from a system call? */ |
253 | if (syscall) { | 244 | if (syscall) { |
254 | /* If so, check system call restarting.. */ | 245 | /* If so, check system call restarting.. */ |
@@ -259,7 +250,7 @@ static void handle_signal(int sig, | |||
259 | break; | 250 | break; |
260 | 251 | ||
261 | case -ERESTARTSYS: | 252 | case -ERESTARTSYS: |
262 | if (!(ka->sa.sa_flags & SA_RESTART)) { | 253 | if (!(ksig->ka.sa.sa_flags & SA_RESTART)) { |
263 | regs->a4 = -EINTR; | 254 | regs->a4 = -EINTR; |
264 | break; | 255 | break; |
265 | } | 256 | } |
@@ -272,9 +263,8 @@ static void handle_signal(int sig, | |||
272 | } | 263 | } |
273 | 264 | ||
274 | /* Set up the stack frame */ | 265 | /* Set up the stack frame */ |
275 | if (setup_rt_frame(sig, ka, info, sigmask_to_save(), regs) < 0) | 266 | ret = setup_rt_frame(ksig, sigmask_to_save(), regs); |
276 | return; | 267 | signal_setup_done(ret, ksig, 0); |
277 | signal_delivered(sig, info, ka, regs, 0); | ||
278 | } | 268 | } |
279 | 269 | ||
280 | /* | 270 | /* |
@@ -282,18 +272,15 @@ static void handle_signal(int sig, | |||
282 | */ | 272 | */ |
283 | static void do_signal(struct pt_regs *regs, int syscall) | 273 | static void do_signal(struct pt_regs *regs, int syscall) |
284 | { | 274 | { |
285 | struct k_sigaction ka; | 275 | struct ksignal ksig; |
286 | siginfo_t info; | ||
287 | int signr; | ||
288 | 276 | ||
289 | /* we want the common case to go fast, which is why we may in certain | 277 | /* we want the common case to go fast, which is why we may in certain |
290 | * cases get here from kernel mode */ | 278 | * cases get here from kernel mode */ |
291 | if (!user_mode(regs)) | 279 | if (!user_mode(regs)) |
292 | return; | 280 | return; |
293 | 281 | ||
294 | signr = get_signal_to_deliver(&info, &ka, regs, NULL); | 282 | if (get_signal(&ksig)) { |
295 | if (signr > 0) { | 283 | handle_signal(&ksig, regs, syscall); |
296 | handle_signal(signr, &info, &ka, regs, syscall); | ||
297 | return; | 284 | return; |
298 | } | 285 | } |
299 | 286 | ||
diff --git a/arch/cris/arch-v10/kernel/signal.c b/arch/cris/arch-v10/kernel/signal.c index 61ce6273a895..9b32d338838b 100644 --- a/arch/cris/arch-v10/kernel/signal.c +++ b/arch/cris/arch-v10/kernel/signal.c | |||
@@ -203,15 +203,9 @@ static int setup_sigcontext(struct sigcontext __user *sc, | |||
203 | * - usually on the stack. */ | 203 | * - usually on the stack. */ |
204 | 204 | ||
205 | static inline void __user * | 205 | static inline void __user * |
206 | get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size) | 206 | get_sigframe(struct ksignal *ksig, size_t frame_size) |
207 | { | 207 | { |
208 | unsigned long sp = rdusp(); | 208 | unsigned long sp = sigsp(rdusp(), ksig); |
209 | |||
210 | /* This is the X/Open sanctioned signal stack switching. */ | ||
211 | if (ka->sa.sa_flags & SA_ONSTACK) { | ||
212 | if (! on_sig_stack(sp)) | ||
213 | sp = current->sas_ss_sp + current->sas_ss_size; | ||
214 | } | ||
215 | 209 | ||
216 | /* make sure the frame is dword-aligned */ | 210 | /* make sure the frame is dword-aligned */ |
217 | 211 | ||
@@ -228,33 +222,33 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size) | |||
228 | * user-mode trampoline. | 222 | * user-mode trampoline. |
229 | */ | 223 | */ |
230 | 224 | ||
231 | static int setup_frame(int sig, struct k_sigaction *ka, | 225 | static int setup_frame(struct ksignal *ksig, sigset_t *set, |
232 | sigset_t *set, struct pt_regs *regs) | 226 | struct pt_regs *regs) |
233 | { | 227 | { |
234 | struct sigframe __user *frame; | 228 | struct sigframe __user *frame; |
235 | unsigned long return_ip; | 229 | unsigned long return_ip; |
236 | int err = 0; | 230 | int err = 0; |
237 | 231 | ||
238 | frame = get_sigframe(ka, regs, sizeof(*frame)); | 232 | frame = get_sigframe(ksig, sizeof(*frame)); |
239 | 233 | ||
240 | if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) | 234 | if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) |
241 | goto give_sigsegv; | 235 | return -EFAULT; |
242 | 236 | ||
243 | err |= setup_sigcontext(&frame->sc, regs, set->sig[0]); | 237 | err |= setup_sigcontext(&frame->sc, regs, set->sig[0]); |
244 | if (err) | 238 | if (err) |
245 | goto give_sigsegv; | 239 | return -EFAULT; |
246 | 240 | ||
247 | if (_NSIG_WORDS > 1) { | 241 | if (_NSIG_WORDS > 1) { |
248 | err |= __copy_to_user(frame->extramask, &set->sig[1], | 242 | err |= __copy_to_user(frame->extramask, &set->sig[1], |
249 | sizeof(frame->extramask)); | 243 | sizeof(frame->extramask)); |
250 | } | 244 | } |
251 | if (err) | 245 | if (err) |
252 | goto give_sigsegv; | 246 | return -EFAULT; |
253 | 247 | ||
254 | /* Set up to return from userspace. If provided, use a stub | 248 | /* Set up to return from userspace. If provided, use a stub |
255 | already in userspace. */ | 249 | already in userspace. */ |
256 | if (ka->sa.sa_flags & SA_RESTORER) { | 250 | if (ksig->ka.sa.sa_flags & SA_RESTORER) { |
257 | return_ip = (unsigned long)ka->sa.sa_restorer; | 251 | return_ip = (unsigned long)ksig->ka.sa.sa_restorer; |
258 | } else { | 252 | } else { |
259 | /* trampoline - the desired return ip is the retcode itself */ | 253 | /* trampoline - the desired return ip is the retcode itself */ |
260 | return_ip = (unsigned long)&frame->retcode; | 254 | return_ip = (unsigned long)&frame->retcode; |
@@ -265,42 +259,38 @@ static int setup_frame(int sig, struct k_sigaction *ka, | |||
265 | } | 259 | } |
266 | 260 | ||
267 | if (err) | 261 | if (err) |
268 | goto give_sigsegv; | 262 | return -EFAULT; |
269 | 263 | ||
270 | /* Set up registers for signal handler */ | 264 | /* Set up registers for signal handler */ |
271 | 265 | ||
272 | regs->irp = (unsigned long) ka->sa.sa_handler; /* what we enter NOW */ | 266 | regs->irp = (unsigned long) ksig->ka.sa.sa_handler; /* what we enter NOW */ |
273 | regs->srp = return_ip; /* what we enter LATER */ | 267 | regs->srp = return_ip; /* what we enter LATER */ |
274 | regs->r10 = sig; /* first argument is signo */ | 268 | regs->r10 = ksig->sig; /* first argument is signo */ |
275 | 269 | ||
276 | /* actually move the usp to reflect the stacked frame */ | 270 | /* actually move the usp to reflect the stacked frame */ |
277 | 271 | ||
278 | wrusp((unsigned long)frame); | 272 | wrusp((unsigned long)frame); |
279 | 273 | ||
280 | return 0; | 274 | return 0; |
281 | |||
282 | give_sigsegv: | ||
283 | force_sigsegv(sig, current); | ||
284 | return -EFAULT; | ||
285 | } | 275 | } |
286 | 276 | ||
287 | static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, | 277 | static int setup_rt_frame(struct ksignal *ksig, sigset_t *set, |
288 | sigset_t *set, struct pt_regs *regs) | 278 | struct pt_regs *regs) |
289 | { | 279 | { |
290 | struct rt_sigframe __user *frame; | 280 | struct rt_sigframe __user *frame; |
291 | unsigned long return_ip; | 281 | unsigned long return_ip; |
292 | int err = 0; | 282 | int err = 0; |
293 | 283 | ||
294 | frame = get_sigframe(ka, regs, sizeof(*frame)); | 284 | frame = get_sigframe(ksig, sizeof(*frame)); |
295 | 285 | ||
296 | if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) | 286 | if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) |
297 | goto give_sigsegv; | 287 | return -EFAULT; |
298 | 288 | ||
299 | err |= __put_user(&frame->info, &frame->pinfo); | 289 | err |= __put_user(&frame->info, &frame->pinfo); |
300 | err |= __put_user(&frame->uc, &frame->puc); | 290 | err |= __put_user(&frame->uc, &frame->puc); |
301 | err |= copy_siginfo_to_user(&frame->info, info); | 291 | err |= copy_siginfo_to_user(&frame->info, &ksig->info); |
302 | if (err) | 292 | if (err) |
303 | goto give_sigsegv; | 293 | return -EFAULT; |
304 | 294 | ||
305 | /* Clear all the bits of the ucontext we don't use. */ | 295 | /* Clear all the bits of the ucontext we don't use. */ |
306 | err |= __clear_user(&frame->uc, offsetof(struct ucontext, uc_mcontext)); | 296 | err |= __clear_user(&frame->uc, offsetof(struct ucontext, uc_mcontext)); |
@@ -312,12 +302,12 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, | |||
312 | err |= __save_altstack(&frame->uc.uc_stack, rdusp()); | 302 | err |= __save_altstack(&frame->uc.uc_stack, rdusp()); |
313 | 303 | ||
314 | if (err) | 304 | if (err) |
315 | goto give_sigsegv; | 305 | return -EFAULT; |
316 | 306 | ||
317 | /* Set up to return from userspace. If provided, use a stub | 307 | /* Set up to return from userspace. If provided, use a stub |
318 | already in userspace. */ | 308 | already in userspace. */ |
319 | if (ka->sa.sa_flags & SA_RESTORER) { | 309 | if (ksig->ka.sa.sa_flags & SA_RESTORER) { |
320 | return_ip = (unsigned long)ka->sa.sa_restorer; | 310 | return_ip = (unsigned long)ksig->ka.sa.sa_restorer; |
321 | } else { | 311 | } else { |
322 | /* trampoline - the desired return ip is the retcode itself */ | 312 | /* trampoline - the desired return ip is the retcode itself */ |
323 | return_ip = (unsigned long)&frame->retcode; | 313 | return_ip = (unsigned long)&frame->retcode; |
@@ -329,18 +319,18 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, | |||
329 | } | 319 | } |
330 | 320 | ||
331 | if (err) | 321 | if (err) |
332 | goto give_sigsegv; | 322 | return -EFAULT; |
333 | 323 | ||
334 | /* TODO what is the current->exec_domain stuff and invmap ? */ | 324 | /* TODO what is the current->exec_domain stuff and invmap ? */ |
335 | 325 | ||
336 | /* Set up registers for signal handler */ | 326 | /* Set up registers for signal handler */ |
337 | 327 | ||
338 | /* What we enter NOW */ | 328 | /* What we enter NOW */ |
339 | regs->irp = (unsigned long) ka->sa.sa_handler; | 329 | regs->irp = (unsigned long) ksig->ka.sa.sa_handler; |
340 | /* What we enter LATER */ | 330 | /* What we enter LATER */ |
341 | regs->srp = return_ip; | 331 | regs->srp = return_ip; |
342 | /* First argument is signo */ | 332 | /* First argument is signo */ |
343 | regs->r10 = sig; | 333 | regs->r10 = ksig->sig; |
344 | /* Second argument is (siginfo_t *) */ | 334 | /* Second argument is (siginfo_t *) */ |
345 | regs->r11 = (unsigned long)&frame->info; | 335 | regs->r11 = (unsigned long)&frame->info; |
346 | /* Third argument is unused */ | 336 | /* Third argument is unused */ |
@@ -350,19 +340,14 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, | |||
350 | wrusp((unsigned long)frame); | 340 | wrusp((unsigned long)frame); |
351 | 341 | ||
352 | return 0; | 342 | return 0; |
353 | |||
354 | give_sigsegv: | ||
355 | force_sigsegv(sig, current); | ||
356 | return -EFAULT; | ||
357 | } | 343 | } |
358 | 344 | ||
359 | /* | 345 | /* |
360 | * OK, we're invoking a handler | 346 | * OK, we're invoking a handler |
361 | */ | 347 | */ |
362 | 348 | ||
363 | static inline void handle_signal(int canrestart, unsigned long sig, | 349 | static inline void handle_signal(int canrestart, struct ksignal *ksig, |
364 | siginfo_t *info, struct k_sigaction *ka, | 350 | struct pt_regs *regs) |
365 | struct pt_regs *regs) | ||
366 | { | 351 | { |
367 | sigset_t *oldset = sigmask_to_save(); | 352 | sigset_t *oldset = sigmask_to_save(); |
368 | int ret; | 353 | int ret; |
@@ -383,7 +368,7 @@ static inline void handle_signal(int canrestart, unsigned long sig, | |||
383 | /* ERESTARTSYS means to restart the syscall if | 368 | /* ERESTARTSYS means to restart the syscall if |
384 | * there is no handler or the handler was | 369 | * there is no handler or the handler was |
385 | * registered with SA_RESTART */ | 370 | * registered with SA_RESTART */ |
386 | if (!(ka->sa.sa_flags & SA_RESTART)) { | 371 | if (!(ksig->ka.sa.sa_flags & SA_RESTART)) { |
387 | regs->r10 = -EINTR; | 372 | regs->r10 = -EINTR; |
388 | break; | 373 | break; |
389 | } | 374 | } |
@@ -396,13 +381,12 @@ static inline void handle_signal(int canrestart, unsigned long sig, | |||
396 | } | 381 | } |
397 | 382 | ||
398 | /* Set up the stack frame */ | 383 | /* Set up the stack frame */ |
399 | if (ka->sa.sa_flags & SA_SIGINFO) | 384 | if (ksig->ka.sa.sa_flags & SA_SIGINFO) |
400 | ret = setup_rt_frame(sig, ka, info, oldset, regs); | 385 | ret = setup_rt_frame(ksig, oldset, regs); |
401 | else | 386 | else |
402 | ret = setup_frame(sig, ka, oldset, regs); | 387 | ret = setup_frame(ksig, oldset, regs); |
403 | 388 | ||
404 | if (ret == 0) | 389 | signal_setup_done(ret, ksig, 0); |
405 | signal_delivered(sig, info, ka, regs, 0); | ||
406 | } | 390 | } |
407 | 391 | ||
408 | /* | 392 | /* |
@@ -419,9 +403,7 @@ static inline void handle_signal(int canrestart, unsigned long sig, | |||
419 | 403 | ||
420 | void do_signal(int canrestart, struct pt_regs *regs) | 404 | void do_signal(int canrestart, struct pt_regs *regs) |
421 | { | 405 | { |
422 | siginfo_t info; | 406 | struct ksignal ksig; |
423 | int signr; | ||
424 | struct k_sigaction ka; | ||
425 | 407 | ||
426 | /* | 408 | /* |
427 | * We want the common case to go fast, which | 409 | * We want the common case to go fast, which |
@@ -432,10 +414,9 @@ void do_signal(int canrestart, struct pt_regs *regs) | |||
432 | if (!user_mode(regs)) | 414 | if (!user_mode(regs)) |
433 | return; | 415 | return; |
434 | 416 | ||
435 | signr = get_signal_to_deliver(&info, &ka, regs, NULL); | 417 | if (get_signal(&ksig)) { |
436 | if (signr > 0) { | ||
437 | /* Whee! Actually deliver the signal. */ | 418 | /* Whee! Actually deliver the signal. */ |
438 | handle_signal(canrestart, signr, &info, &ka, regs); | 419 | handle_signal(canrestart, &ksig, regs); |
439 | return; | 420 | return; |
440 | } | 421 | } |
441 | 422 | ||
diff --git a/arch/cris/arch-v32/kernel/signal.c b/arch/cris/arch-v32/kernel/signal.c index 01d1375c9004..78ce3b1c9bcb 100644 --- a/arch/cris/arch-v32/kernel/signal.c +++ b/arch/cris/arch-v32/kernel/signal.c | |||
@@ -189,17 +189,9 @@ setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs, | |||
189 | 189 | ||
190 | /* Figure out where to put the new signal frame - usually on the stack. */ | 190 | /* Figure out where to put the new signal frame - usually on the stack. */ |
191 | static inline void __user * | 191 | static inline void __user * |
192 | get_sigframe(struct k_sigaction *ka, struct pt_regs * regs, size_t frame_size) | 192 | get_sigframe(struct ksignal *ksig, size_t frame_size) |
193 | { | 193 | { |
194 | unsigned long sp; | 194 | unsigned long sp = sigsp(rdusp(), ksig); |
195 | |||
196 | sp = rdusp(); | ||
197 | |||
198 | /* This is the X/Open sanctioned signal stack switching. */ | ||
199 | if (ka->sa.sa_flags & SA_ONSTACK) { | ||
200 | if (!on_sig_stack(sp)) | ||
201 | sp = current->sas_ss_sp + current->sas_ss_size; | ||
202 | } | ||
203 | 195 | ||
204 | /* Make sure the frame is dword-aligned. */ | 196 | /* Make sure the frame is dword-aligned. */ |
205 | sp &= ~3; | 197 | sp &= ~3; |
@@ -215,23 +207,22 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs * regs, size_t frame_size) | |||
215 | * trampoline. | 207 | * trampoline. |
216 | */ | 208 | */ |
217 | static int | 209 | static int |
218 | setup_frame(int sig, struct k_sigaction *ka, sigset_t *set, | 210 | setup_frame(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs) |
219 | struct pt_regs * regs) | ||
220 | { | 211 | { |
221 | int err; | 212 | int err; |
222 | unsigned long return_ip; | 213 | unsigned long return_ip; |
223 | struct signal_frame __user *frame; | 214 | struct signal_frame __user *frame; |
224 | 215 | ||
225 | err = 0; | 216 | err = 0; |
226 | frame = get_sigframe(ka, regs, sizeof(*frame)); | 217 | frame = get_sigframe(ksig, sizeof(*frame)); |
227 | 218 | ||
228 | if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) | 219 | if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) |
229 | goto give_sigsegv; | 220 | return -EFAULT; |
230 | 221 | ||
231 | err |= setup_sigcontext(&frame->sc, regs, set->sig[0]); | 222 | err |= setup_sigcontext(&frame->sc, regs, set->sig[0]); |
232 | 223 | ||
233 | if (err) | 224 | if (err) |
234 | goto give_sigsegv; | 225 | return -EFAULT; |
235 | 226 | ||
236 | if (_NSIG_WORDS > 1) { | 227 | if (_NSIG_WORDS > 1) { |
237 | err |= __copy_to_user(frame->extramask, &set->sig[1], | 228 | err |= __copy_to_user(frame->extramask, &set->sig[1], |
@@ -239,14 +230,14 @@ setup_frame(int sig, struct k_sigaction *ka, sigset_t *set, | |||
239 | } | 230 | } |
240 | 231 | ||
241 | if (err) | 232 | if (err) |
242 | goto give_sigsegv; | 233 | return -EFAULT; |
243 | 234 | ||
244 | /* | 235 | /* |
245 | * Set up to return from user-space. If provided, use a stub | 236 | * Set up to return from user-space. If provided, use a stub |
246 | * already located in user-space. | 237 | * already located in user-space. |
247 | */ | 238 | */ |
248 | if (ka->sa.sa_flags & SA_RESTORER) { | 239 | if (ksig->ka.sa.sa_flags & SA_RESTORER) { |
249 | return_ip = (unsigned long)ka->sa.sa_restorer; | 240 | return_ip = (unsigned long)ksig->ka.sa.sa_restorer; |
250 | } else { | 241 | } else { |
251 | /* Trampoline - the desired return ip is in the signal return page. */ | 242 | /* Trampoline - the desired return ip is in the signal return page. */ |
252 | return_ip = cris_signal_return_page; | 243 | return_ip = cris_signal_return_page; |
@@ -264,7 +255,7 @@ setup_frame(int sig, struct k_sigaction *ka, sigset_t *set, | |||
264 | } | 255 | } |
265 | 256 | ||
266 | if (err) | 257 | if (err) |
267 | goto give_sigsegv; | 258 | return -EFAULT; |
268 | 259 | ||
269 | /* | 260 | /* |
270 | * Set up registers for signal handler. | 261 | * Set up registers for signal handler. |
@@ -273,42 +264,37 @@ setup_frame(int sig, struct k_sigaction *ka, sigset_t *set, | |||
273 | * Where the code enter later. | 264 | * Where the code enter later. |
274 | * First argument, signo. | 265 | * First argument, signo. |
275 | */ | 266 | */ |
276 | regs->erp = (unsigned long) ka->sa.sa_handler; | 267 | regs->erp = (unsigned long) ksig->ka.sa.sa_handler; |
277 | regs->srp = return_ip; | 268 | regs->srp = return_ip; |
278 | regs->r10 = sig; | 269 | regs->r10 = ksig->sig; |
279 | 270 | ||
280 | /* Actually move the USP to reflect the stacked frame. */ | 271 | /* Actually move the USP to reflect the stacked frame. */ |
281 | wrusp((unsigned long)frame); | 272 | wrusp((unsigned long)frame); |
282 | 273 | ||
283 | return 0; | 274 | return 0; |
284 | |||
285 | give_sigsegv: | ||
286 | force_sigsegv(sig, current); | ||
287 | return -EFAULT; | ||
288 | } | 275 | } |
289 | 276 | ||
290 | static int | 277 | static int |
291 | setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, | 278 | setup_rt_frame(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs) |
292 | sigset_t *set, struct pt_regs * regs) | ||
293 | { | 279 | { |
294 | int err; | 280 | int err; |
295 | unsigned long return_ip; | 281 | unsigned long return_ip; |
296 | struct rt_signal_frame __user *frame; | 282 | struct rt_signal_frame __user *frame; |
297 | 283 | ||
298 | err = 0; | 284 | err = 0; |
299 | frame = get_sigframe(ka, regs, sizeof(*frame)); | 285 | frame = get_sigframe(ksig, sizeof(*frame)); |
300 | 286 | ||
301 | if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) | 287 | if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) |
302 | goto give_sigsegv; | 288 | return -EFAULT; |
303 | 289 | ||
304 | /* TODO: what is the current->exec_domain stuff and invmap ? */ | 290 | /* TODO: what is the current->exec_domain stuff and invmap ? */ |
305 | 291 | ||
306 | err |= __put_user(&frame->info, &frame->pinfo); | 292 | err |= __put_user(&frame->info, &frame->pinfo); |
307 | err |= __put_user(&frame->uc, &frame->puc); | 293 | err |= __put_user(&frame->uc, &frame->puc); |
308 | err |= copy_siginfo_to_user(&frame->info, info); | 294 | err |= copy_siginfo_to_user(&frame->info, &ksig->info); |
309 | 295 | ||
310 | if (err) | 296 | if (err) |
311 | goto give_sigsegv; | 297 | return -EFAULT; |
312 | 298 | ||
313 | /* Clear all the bits of the ucontext we don't use. */ | 299 | /* Clear all the bits of the ucontext we don't use. */ |
314 | err |= __clear_user(&frame->uc, offsetof(struct ucontext, uc_mcontext)); | 300 | err |= __clear_user(&frame->uc, offsetof(struct ucontext, uc_mcontext)); |
@@ -317,14 +303,14 @@ setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, | |||
317 | err |= __save_altstack(&frame->uc.uc_stack, rdusp()); | 303 | err |= __save_altstack(&frame->uc.uc_stack, rdusp()); |
318 | 304 | ||
319 | if (err) | 305 | if (err) |
320 | goto give_sigsegv; | 306 | return -EFAULT; |
321 | 307 | ||
322 | /* | 308 | /* |
323 | * Set up to return from user-space. If provided, use a stub | 309 | * Set up to return from user-space. If provided, use a stub |
324 | * already located in user-space. | 310 | * already located in user-space. |
325 | */ | 311 | */ |
326 | if (ka->sa.sa_flags & SA_RESTORER) { | 312 | if (ksig->ka.sa.sa_flags & SA_RESTORER) { |
327 | return_ip = (unsigned long) ka->sa.sa_restorer; | 313 | return_ip = (unsigned long) ksig->ka.sa.sa_restorer; |
328 | } else { | 314 | } else { |
329 | /* Trampoline - the desired return ip is in the signal return page. */ | 315 | /* Trampoline - the desired return ip is in the signal return page. */ |
330 | return_ip = cris_signal_return_page + 6; | 316 | return_ip = cris_signal_return_page + 6; |
@@ -345,7 +331,7 @@ setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, | |||
345 | } | 331 | } |
346 | 332 | ||
347 | if (err) | 333 | if (err) |
348 | goto give_sigsegv; | 334 | return -EFAULT; |
349 | 335 | ||
350 | /* | 336 | /* |
351 | * Set up registers for signal handler. | 337 | * Set up registers for signal handler. |
@@ -356,9 +342,9 @@ setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, | |||
356 | * Second argument is (siginfo_t *). | 342 | * Second argument is (siginfo_t *). |
357 | * Third argument is unused. | 343 | * Third argument is unused. |
358 | */ | 344 | */ |
359 | regs->erp = (unsigned long) ka->sa.sa_handler; | 345 | regs->erp = (unsigned long) ksig->ka.sa.sa_handler; |
360 | regs->srp = return_ip; | 346 | regs->srp = return_ip; |
361 | regs->r10 = sig; | 347 | regs->r10 = ksig->sig; |
362 | regs->r11 = (unsigned long) &frame->info; | 348 | regs->r11 = (unsigned long) &frame->info; |
363 | regs->r12 = 0; | 349 | regs->r12 = 0; |
364 | 350 | ||
@@ -366,17 +352,11 @@ setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, | |||
366 | wrusp((unsigned long)frame); | 352 | wrusp((unsigned long)frame); |
367 | 353 | ||
368 | return 0; | 354 | return 0; |
369 | |||
370 | give_sigsegv: | ||
371 | force_sigsegv(sig, current); | ||
372 | return -EFAULT; | ||
373 | } | 355 | } |
374 | 356 | ||
375 | /* Invoke a signal handler to, well, handle the signal. */ | 357 | /* Invoke a signal handler to, well, handle the signal. */ |
376 | static inline void | 358 | static inline void |
377 | handle_signal(int canrestart, unsigned long sig, | 359 | handle_signal(int canrestart, struct ksignal *ksig, struct pt_regs *regs) |
378 | siginfo_t *info, struct k_sigaction *ka, | ||
379 | struct pt_regs * regs) | ||
380 | { | 360 | { |
381 | sigset_t *oldset = sigmask_to_save(); | 361 | sigset_t *oldset = sigmask_to_save(); |
382 | int ret; | 362 | int ret; |
@@ -404,7 +384,7 @@ handle_signal(int canrestart, unsigned long sig, | |||
404 | * there is no handler, or the handler | 384 | * there is no handler, or the handler |
405 | * was registered with SA_RESTART. | 385 | * was registered with SA_RESTART. |
406 | */ | 386 | */ |
407 | if (!(ka->sa.sa_flags & SA_RESTART)) { | 387 | if (!(ksig->ka.sa.sa_flags & SA_RESTART)) { |
408 | regs->r10 = -EINTR; | 388 | regs->r10 = -EINTR; |
409 | break; | 389 | break; |
410 | } | 390 | } |
@@ -423,13 +403,12 @@ handle_signal(int canrestart, unsigned long sig, | |||
423 | } | 403 | } |
424 | 404 | ||
425 | /* Set up the stack frame. */ | 405 | /* Set up the stack frame. */ |
426 | if (ka->sa.sa_flags & SA_SIGINFO) | 406 | if (ksig->ka.sa.sa_flags & SA_SIGINFO) |
427 | ret = setup_rt_frame(sig, ka, info, oldset, regs); | 407 | ret = setup_rt_frame(ksig, oldset, regs); |
428 | else | 408 | else |
429 | ret = setup_frame(sig, ka, oldset, regs); | 409 | ret = setup_frame(ksig, oldset, regs); |
430 | 410 | ||
431 | if (ret == 0) | 411 | signal_setup_done(ret, ksig, 0); |
432 | signal_delivered(sig, info, ka, regs, 0); | ||
433 | } | 412 | } |
434 | 413 | ||
435 | /* | 414 | /* |
@@ -446,9 +425,7 @@ handle_signal(int canrestart, unsigned long sig, | |||
446 | void | 425 | void |
447 | do_signal(int canrestart, struct pt_regs *regs) | 426 | do_signal(int canrestart, struct pt_regs *regs) |
448 | { | 427 | { |
449 | int signr; | 428 | struct ksignal ksig; |
450 | siginfo_t info; | ||
451 | struct k_sigaction ka; | ||
452 | 429 | ||
453 | /* | 430 | /* |
454 | * The common case should go fast, which is why this point is | 431 | * The common case should go fast, which is why this point is |
@@ -458,11 +435,9 @@ do_signal(int canrestart, struct pt_regs *regs) | |||
458 | if (!user_mode(regs)) | 435 | if (!user_mode(regs)) |
459 | return; | 436 | return; |
460 | 437 | ||
461 | signr = get_signal_to_deliver(&info, &ka, regs, NULL); | 438 | if (get_signal(&ksig)) { |
462 | |||
463 | if (signr > 0) { | ||
464 | /* Whee! Actually deliver the signal. */ | 439 | /* Whee! Actually deliver the signal. */ |
465 | handle_signal(canrestart, signr, &info, &ka, regs); | 440 | handle_signal(canrestart, &ksig, regs); |
466 | return; | 441 | return; |
467 | } | 442 | } |
468 | 443 | ||
diff --git a/arch/frv/kernel/signal.c b/arch/frv/kernel/signal.c index d822700d4f15..dc3d59de0870 100644 --- a/arch/frv/kernel/signal.c +++ b/arch/frv/kernel/signal.c | |||
@@ -158,19 +158,10 @@ static int setup_sigcontext(struct sigcontext __user *sc, unsigned long mask) | |||
158 | /* | 158 | /* |
159 | * Determine which stack to use.. | 159 | * Determine which stack to use.. |
160 | */ | 160 | */ |
161 | static inline void __user *get_sigframe(struct k_sigaction *ka, | 161 | static inline void __user *get_sigframe(struct ksignal *ksig, |
162 | size_t frame_size) | 162 | size_t frame_size) |
163 | { | 163 | { |
164 | unsigned long sp; | 164 | unsigned long sp = sigsp(__frame->sp, ksig); |
165 | |||
166 | /* Default to using normal stack */ | ||
167 | sp = __frame->sp; | ||
168 | |||
169 | /* This is the X/Open sanctioned signal stack switching. */ | ||
170 | if (ka->sa.sa_flags & SA_ONSTACK) { | ||
171 | if (! sas_ss_flags(sp)) | ||
172 | sp = current->sas_ss_sp + current->sas_ss_size; | ||
173 | } | ||
174 | 165 | ||
175 | return (void __user *) ((sp - frame_size) & ~7UL); | 166 | return (void __user *) ((sp - frame_size) & ~7UL); |
176 | 167 | ||
@@ -180,17 +171,17 @@ static inline void __user *get_sigframe(struct k_sigaction *ka, | |||
180 | /* | 171 | /* |
181 | * | 172 | * |
182 | */ | 173 | */ |
183 | static int setup_frame(int sig, struct k_sigaction *ka, sigset_t *set) | 174 | static int setup_frame(struct ksignal *ksig, sigset_t *set) |
184 | { | 175 | { |
185 | struct sigframe __user *frame; | 176 | struct sigframe __user *frame; |
186 | int rsig; | 177 | int rsig, sig = ksig->sig; |
187 | 178 | ||
188 | set_fs(USER_DS); | 179 | set_fs(USER_DS); |
189 | 180 | ||
190 | frame = get_sigframe(ka, sizeof(*frame)); | 181 | frame = get_sigframe(ksig, sizeof(*frame)); |
191 | 182 | ||
192 | if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) | 183 | if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) |
193 | goto give_sigsegv; | 184 | return -EFAULT; |
194 | 185 | ||
195 | rsig = sig; | 186 | rsig = sig; |
196 | if (sig < 32 && | 187 | if (sig < 32 && |
@@ -199,22 +190,22 @@ static int setup_frame(int sig, struct k_sigaction *ka, sigset_t *set) | |||
199 | rsig = __current_thread_info->exec_domain->signal_invmap[sig]; | 190 | rsig = __current_thread_info->exec_domain->signal_invmap[sig]; |
200 | 191 | ||
201 | if (__put_user(rsig, &frame->sig) < 0) | 192 | if (__put_user(rsig, &frame->sig) < 0) |
202 | goto give_sigsegv; | 193 | return -EFAULT; |
203 | 194 | ||
204 | if (setup_sigcontext(&frame->sc, set->sig[0])) | 195 | if (setup_sigcontext(&frame->sc, set->sig[0])) |
205 | goto give_sigsegv; | 196 | return -EFAULT; |
206 | 197 | ||
207 | if (_NSIG_WORDS > 1) { | 198 | if (_NSIG_WORDS > 1) { |
208 | if (__copy_to_user(frame->extramask, &set->sig[1], | 199 | if (__copy_to_user(frame->extramask, &set->sig[1], |
209 | sizeof(frame->extramask))) | 200 | sizeof(frame->extramask))) |
210 | goto give_sigsegv; | 201 | return -EFAULT; |
211 | } | 202 | } |
212 | 203 | ||
213 | /* Set up to return from userspace. If provided, use a stub | 204 | /* Set up to return from userspace. If provided, use a stub |
214 | * already in userspace. */ | 205 | * already in userspace. */ |
215 | if (ka->sa.sa_flags & SA_RESTORER) { | 206 | if (ksig->ka.sa.sa_flags & SA_RESTORER) { |
216 | if (__put_user(ka->sa.sa_restorer, &frame->pretcode) < 0) | 207 | if (__put_user(ksig->ka.sa.sa_restorer, &frame->pretcode) < 0) |
217 | goto give_sigsegv; | 208 | return -EFAULT; |
218 | } | 209 | } |
219 | else { | 210 | else { |
220 | /* Set up the following code on the stack: | 211 | /* Set up the following code on the stack: |
@@ -224,7 +215,7 @@ static int setup_frame(int sig, struct k_sigaction *ka, sigset_t *set) | |||
224 | if (__put_user((__sigrestore_t)frame->retcode, &frame->pretcode) || | 215 | if (__put_user((__sigrestore_t)frame->retcode, &frame->pretcode) || |
225 | __put_user(0x8efc0000|__NR_sigreturn, &frame->retcode[0]) || | 216 | __put_user(0x8efc0000|__NR_sigreturn, &frame->retcode[0]) || |
226 | __put_user(0xc0700000, &frame->retcode[1])) | 217 | __put_user(0xc0700000, &frame->retcode[1])) |
227 | goto give_sigsegv; | 218 | return -EFAULT; |
228 | 219 | ||
229 | flush_icache_range((unsigned long) frame->retcode, | 220 | flush_icache_range((unsigned long) frame->retcode, |
230 | (unsigned long) (frame->retcode + 2)); | 221 | (unsigned long) (frame->retcode + 2)); |
@@ -233,14 +224,14 @@ static int setup_frame(int sig, struct k_sigaction *ka, sigset_t *set) | |||
233 | /* Set up registers for the signal handler */ | 224 | /* Set up registers for the signal handler */ |
234 | if (current->personality & FDPIC_FUNCPTRS) { | 225 | if (current->personality & FDPIC_FUNCPTRS) { |
235 | struct fdpic_func_descriptor __user *funcptr = | 226 | struct fdpic_func_descriptor __user *funcptr = |
236 | (struct fdpic_func_descriptor __user *) ka->sa.sa_handler; | 227 | (struct fdpic_func_descriptor __user *) ksig->ka.sa.sa_handler; |
237 | struct fdpic_func_descriptor desc; | 228 | struct fdpic_func_descriptor desc; |
238 | if (copy_from_user(&desc, funcptr, sizeof(desc))) | 229 | if (copy_from_user(&desc, funcptr, sizeof(desc))) |
239 | goto give_sigsegv; | 230 | return -EFAULT; |
240 | __frame->pc = desc.text; | 231 | __frame->pc = desc.text; |
241 | __frame->gr15 = desc.GOT; | 232 | __frame->gr15 = desc.GOT; |
242 | } else { | 233 | } else { |
243 | __frame->pc = (unsigned long) ka->sa.sa_handler; | 234 | __frame->pc = (unsigned long) ksig->ka.sa.sa_handler; |
244 | __frame->gr15 = 0; | 235 | __frame->gr15 = 0; |
245 | } | 236 | } |
246 | 237 | ||
@@ -255,29 +246,23 @@ static int setup_frame(int sig, struct k_sigaction *ka, sigset_t *set) | |||
255 | #endif | 246 | #endif |
256 | 247 | ||
257 | return 0; | 248 | return 0; |
258 | |||
259 | give_sigsegv: | ||
260 | force_sigsegv(sig, current); | ||
261 | return -EFAULT; | ||
262 | |||
263 | } /* end setup_frame() */ | 249 | } /* end setup_frame() */ |
264 | 250 | ||
265 | /*****************************************************************************/ | 251 | /*****************************************************************************/ |
266 | /* | 252 | /* |
267 | * | 253 | * |
268 | */ | 254 | */ |
269 | static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, | 255 | static int setup_rt_frame(struct ksignal *ksig, sigset_t *set) |
270 | sigset_t *set) | ||
271 | { | 256 | { |
272 | struct rt_sigframe __user *frame; | 257 | struct rt_sigframe __user *frame; |
273 | int rsig; | 258 | int rsig, sig = ksig->sig; |
274 | 259 | ||
275 | set_fs(USER_DS); | 260 | set_fs(USER_DS); |
276 | 261 | ||
277 | frame = get_sigframe(ka, sizeof(*frame)); | 262 | frame = get_sigframe(ksig, sizeof(*frame)); |
278 | 263 | ||
279 | if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) | 264 | if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) |
280 | goto give_sigsegv; | 265 | return -EFAULT; |
281 | 266 | ||
282 | rsig = sig; | 267 | rsig = sig; |
283 | if (sig < 32 && | 268 | if (sig < 32 && |
@@ -288,28 +273,28 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, | |||
288 | if (__put_user(rsig, &frame->sig) || | 273 | if (__put_user(rsig, &frame->sig) || |
289 | __put_user(&frame->info, &frame->pinfo) || | 274 | __put_user(&frame->info, &frame->pinfo) || |
290 | __put_user(&frame->uc, &frame->puc)) | 275 | __put_user(&frame->uc, &frame->puc)) |
291 | goto give_sigsegv; | 276 | return -EFAULT; |
292 | 277 | ||
293 | if (copy_siginfo_to_user(&frame->info, info)) | 278 | if (copy_siginfo_to_user(&frame->info, &ksig->info)) |
294 | goto give_sigsegv; | 279 | return -EFAULT; |
295 | 280 | ||
296 | /* Create the ucontext. */ | 281 | /* Create the ucontext. */ |
297 | if (__put_user(0, &frame->uc.uc_flags) || | 282 | if (__put_user(0, &frame->uc.uc_flags) || |
298 | __put_user(NULL, &frame->uc.uc_link) || | 283 | __put_user(NULL, &frame->uc.uc_link) || |
299 | __save_altstack(&frame->uc.uc_stack, __frame->sp)) | 284 | __save_altstack(&frame->uc.uc_stack, __frame->sp)) |
300 | goto give_sigsegv; | 285 | return -EFAULT; |
301 | 286 | ||
302 | if (setup_sigcontext(&frame->uc.uc_mcontext, set->sig[0])) | 287 | if (setup_sigcontext(&frame->uc.uc_mcontext, set->sig[0])) |
303 | goto give_sigsegv; | 288 | return -EFAULT; |
304 | 289 | ||
305 | if (__copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set))) | 290 | if (__copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set))) |
306 | goto give_sigsegv; | 291 | return -EFAULT; |
307 | 292 | ||
308 | /* Set up to return from userspace. If provided, use a stub | 293 | /* Set up to return from userspace. If provided, use a stub |
309 | * already in userspace. */ | 294 | * already in userspace. */ |
310 | if (ka->sa.sa_flags & SA_RESTORER) { | 295 | if (ksig->ka.sa.sa_flags & SA_RESTORER) { |
311 | if (__put_user(ka->sa.sa_restorer, &frame->pretcode)) | 296 | if (__put_user(ksig->ka.sa.sa_restorer, &frame->pretcode)) |
312 | goto give_sigsegv; | 297 | return -EFAULT; |
313 | } | 298 | } |
314 | else { | 299 | else { |
315 | /* Set up the following code on the stack: | 300 | /* Set up the following code on the stack: |
@@ -319,7 +304,7 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, | |||
319 | if (__put_user((__sigrestore_t)frame->retcode, &frame->pretcode) || | 304 | if (__put_user((__sigrestore_t)frame->retcode, &frame->pretcode) || |
320 | __put_user(0x8efc0000|__NR_rt_sigreturn, &frame->retcode[0]) || | 305 | __put_user(0x8efc0000|__NR_rt_sigreturn, &frame->retcode[0]) || |
321 | __put_user(0xc0700000, &frame->retcode[1])) | 306 | __put_user(0xc0700000, &frame->retcode[1])) |
322 | goto give_sigsegv; | 307 | return -EFAULT; |
323 | 308 | ||
324 | flush_icache_range((unsigned long) frame->retcode, | 309 | flush_icache_range((unsigned long) frame->retcode, |
325 | (unsigned long) (frame->retcode + 2)); | 310 | (unsigned long) (frame->retcode + 2)); |
@@ -328,14 +313,14 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, | |||
328 | /* Set up registers for signal handler */ | 313 | /* Set up registers for signal handler */ |
329 | if (current->personality & FDPIC_FUNCPTRS) { | 314 | if (current->personality & FDPIC_FUNCPTRS) { |
330 | struct fdpic_func_descriptor __user *funcptr = | 315 | struct fdpic_func_descriptor __user *funcptr = |
331 | (struct fdpic_func_descriptor __user *) ka->sa.sa_handler; | 316 | (struct fdpic_func_descriptor __user *) ksig->ka.sa.sa_handler; |
332 | struct fdpic_func_descriptor desc; | 317 | struct fdpic_func_descriptor desc; |
333 | if (copy_from_user(&desc, funcptr, sizeof(desc))) | 318 | if (copy_from_user(&desc, funcptr, sizeof(desc))) |
334 | goto give_sigsegv; | 319 | return -EFAULT; |
335 | __frame->pc = desc.text; | 320 | __frame->pc = desc.text; |
336 | __frame->gr15 = desc.GOT; | 321 | __frame->gr15 = desc.GOT; |
337 | } else { | 322 | } else { |
338 | __frame->pc = (unsigned long) ka->sa.sa_handler; | 323 | __frame->pc = (unsigned long) ksig->ka.sa.sa_handler; |
339 | __frame->gr15 = 0; | 324 | __frame->gr15 = 0; |
340 | } | 325 | } |
341 | 326 | ||
@@ -349,21 +334,15 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, | |||
349 | sig, current->comm, current->pid, frame, __frame->pc, | 334 | sig, current->comm, current->pid, frame, __frame->pc, |
350 | frame->pretcode); | 335 | frame->pretcode); |
351 | #endif | 336 | #endif |
352 | |||
353 | return 0; | 337 | return 0; |
354 | 338 | ||
355 | give_sigsegv: | ||
356 | force_sigsegv(sig, current); | ||
357 | return -EFAULT; | ||
358 | |||
359 | } /* end setup_rt_frame() */ | 339 | } /* end setup_rt_frame() */ |
360 | 340 | ||
361 | /*****************************************************************************/ | 341 | /*****************************************************************************/ |
362 | /* | 342 | /* |
363 | * OK, we're invoking a handler | 343 | * OK, we're invoking a handler |
364 | */ | 344 | */ |
365 | static void handle_signal(unsigned long sig, siginfo_t *info, | 345 | static void handle_signal(struct ksignal *ksig) |
366 | struct k_sigaction *ka) | ||
367 | { | 346 | { |
368 | sigset_t *oldset = sigmask_to_save(); | 347 | sigset_t *oldset = sigmask_to_save(); |
369 | int ret; | 348 | int ret; |
@@ -378,7 +357,7 @@ static void handle_signal(unsigned long sig, siginfo_t *info, | |||
378 | break; | 357 | break; |
379 | 358 | ||
380 | case -ERESTARTSYS: | 359 | case -ERESTARTSYS: |
381 | if (!(ka->sa.sa_flags & SA_RESTART)) { | 360 | if (!(ksig->ka.sa.sa_flags & SA_RESTART)) { |
382 | __frame->gr8 = -EINTR; | 361 | __frame->gr8 = -EINTR; |
383 | break; | 362 | break; |
384 | } | 363 | } |
@@ -392,16 +371,12 @@ static void handle_signal(unsigned long sig, siginfo_t *info, | |||
392 | } | 371 | } |
393 | 372 | ||
394 | /* Set up the stack frame */ | 373 | /* Set up the stack frame */ |
395 | if (ka->sa.sa_flags & SA_SIGINFO) | 374 | if (ksig->ka.sa.sa_flags & SA_SIGINFO) |
396 | ret = setup_rt_frame(sig, ka, info, oldset); | 375 | ret = setup_rt_frame(ksig, oldset); |
397 | else | 376 | else |
398 | ret = setup_frame(sig, ka, oldset); | 377 | ret = setup_frame(ksig, oldset); |
399 | |||
400 | if (ret) | ||
401 | return; | ||
402 | 378 | ||
403 | signal_delivered(sig, info, ka, __frame, | 379 | signal_setup_done(ret, ksig, test_thread_flag(TIF_SINGLESTEP)); |
404 | test_thread_flag(TIF_SINGLESTEP)); | ||
405 | } /* end handle_signal() */ | 380 | } /* end handle_signal() */ |
406 | 381 | ||
407 | /*****************************************************************************/ | 382 | /*****************************************************************************/ |
@@ -412,13 +387,10 @@ static void handle_signal(unsigned long sig, siginfo_t *info, | |||
412 | */ | 387 | */ |
413 | static void do_signal(void) | 388 | static void do_signal(void) |
414 | { | 389 | { |
415 | struct k_sigaction ka; | 390 | struct ksignal ksig; |
416 | siginfo_t info; | ||
417 | int signr; | ||
418 | 391 | ||
419 | signr = get_signal_to_deliver(&info, &ka, __frame, NULL); | 392 | if (get_signal(&ksig)) { |
420 | if (signr > 0) { | 393 | handle_signal(&ksig); |
421 | handle_signal(signr, &info, &ka); | ||
422 | return; | 394 | return; |
423 | } | 395 | } |
424 | 396 | ||
diff --git a/arch/hexagon/kernel/signal.c b/arch/hexagon/kernel/signal.c index d7c73874b515..eadd70e47e7e 100644 --- a/arch/hexagon/kernel/signal.c +++ b/arch/hexagon/kernel/signal.c | |||
@@ -36,18 +36,10 @@ struct rt_sigframe { | |||
36 | struct ucontext uc; | 36 | struct ucontext uc; |
37 | }; | 37 | }; |
38 | 38 | ||
39 | static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, | 39 | static void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs, |
40 | size_t frame_size) | 40 | size_t frame_size) |
41 | { | 41 | { |
42 | unsigned long sp = regs->r29; | 42 | unsigned long sp = sigsp(regs->r29, ksig); |
43 | |||
44 | /* check if we would overflow the alt stack */ | ||
45 | if (on_sig_stack(sp) && !likely(on_sig_stack(sp - frame_size))) | ||
46 | return (void __user __force *)-1UL; | ||
47 | |||
48 | /* Switch to signal stack if appropriate */ | ||
49 | if ((ka->sa.sa_flags & SA_ONSTACK) && (sas_ss_flags(sp) == 0)) | ||
50 | sp = current->sas_ss_sp + current->sas_ss_size; | ||
51 | 43 | ||
52 | return (void __user *)((sp - frame_size) & ~(sizeof(long long) - 1)); | 44 | return (void __user *)((sp - frame_size) & ~(sizeof(long long) - 1)); |
53 | } | 45 | } |
@@ -112,20 +104,20 @@ static int restore_sigcontext(struct pt_regs *regs, | |||
112 | /* | 104 | /* |
113 | * Setup signal stack frame with siginfo structure | 105 | * Setup signal stack frame with siginfo structure |
114 | */ | 106 | */ |
115 | static int setup_rt_frame(int signr, struct k_sigaction *ka, siginfo_t *info, | 107 | static int setup_rt_frame(struct ksignal *ksig, sigset_t *set, |
116 | sigset_t *set, struct pt_regs *regs) | 108 | struct pt_regs *regs) |
117 | { | 109 | { |
118 | int err = 0; | 110 | int err = 0; |
119 | struct rt_sigframe __user *frame; | 111 | struct rt_sigframe __user *frame; |
120 | struct hexagon_vdso *vdso = current->mm->context.vdso; | 112 | struct hexagon_vdso *vdso = current->mm->context.vdso; |
121 | 113 | ||
122 | frame = get_sigframe(ka, regs, sizeof(struct rt_sigframe)); | 114 | frame = get_sigframe(ksig, regs, sizeof(struct rt_sigframe)); |
123 | 115 | ||
124 | if (!access_ok(VERIFY_WRITE, frame, sizeof(struct rt_sigframe))) | 116 | if (!access_ok(VERIFY_WRITE, frame, sizeof(struct rt_sigframe))) |
125 | goto sigsegv; | 117 | return -EFAULT; |
126 | 118 | ||
127 | if (copy_siginfo_to_user(&frame->info, info)) | 119 | if (copy_siginfo_to_user(&frame->info, &ksig->info)) |
128 | goto sigsegv; | 120 | return -EFAULT; |
129 | 121 | ||
130 | /* The on-stack signal trampoline is no longer executed; | 122 | /* The on-stack signal trampoline is no longer executed; |
131 | * however, the libgcc signal frame unwinding code checks for | 123 | * however, the libgcc signal frame unwinding code checks for |
@@ -137,29 +129,26 @@ static int setup_rt_frame(int signr, struct k_sigaction *ka, siginfo_t *info, | |||
137 | err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)); | 129 | err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)); |
138 | err |= __save_altstack(&frame->uc.uc_stack, user_stack_pointer(regs)); | 130 | err |= __save_altstack(&frame->uc.uc_stack, user_stack_pointer(regs)); |
139 | if (err) | 131 | if (err) |
140 | goto sigsegv; | 132 | return -EFAULT; |
141 | 133 | ||
142 | /* Load r0/r1 pair with signumber/siginfo pointer... */ | 134 | /* Load r0/r1 pair with signumber/siginfo pointer... */ |
143 | regs->r0100 = ((unsigned long long)((unsigned long)&frame->info) << 32) | 135 | regs->r0100 = ((unsigned long long)((unsigned long)&frame->info) << 32) |
144 | | (unsigned long long)signr; | 136 | | (unsigned long long)ksig->sig; |
145 | regs->r02 = (unsigned long) &frame->uc; | 137 | regs->r02 = (unsigned long) &frame->uc; |
146 | regs->r31 = (unsigned long) vdso->rt_signal_trampoline; | 138 | regs->r31 = (unsigned long) vdso->rt_signal_trampoline; |
147 | pt_psp(regs) = (unsigned long) frame; | 139 | pt_psp(regs) = (unsigned long) frame; |
148 | pt_set_elr(regs, (unsigned long)ka->sa.sa_handler); | 140 | pt_set_elr(regs, (unsigned long)ksig->ka.sa.sa_handler); |
149 | 141 | ||
150 | return 0; | 142 | return 0; |
151 | |||
152 | sigsegv: | ||
153 | force_sigsegv(signr, current); | ||
154 | return -EFAULT; | ||
155 | } | 143 | } |
156 | 144 | ||
157 | /* | 145 | /* |
158 | * Setup invocation of signal handler | 146 | * Setup invocation of signal handler |
159 | */ | 147 | */ |
160 | static void handle_signal(int sig, siginfo_t *info, struct k_sigaction *ka, | 148 | static void handle_signal(struct ksignal *ksig, struct pt_regs *regs) |
161 | struct pt_regs *regs) | ||
162 | { | 149 | { |
150 | int ret; | ||
151 | |||
163 | /* | 152 | /* |
164 | * If we're handling a signal that aborted a system call, | 153 | * If we're handling a signal that aborted a system call, |
165 | * set up the error return value before adding the signal | 154 | * set up the error return value before adding the signal |
@@ -173,7 +162,7 @@ static void handle_signal(int sig, siginfo_t *info, struct k_sigaction *ka, | |||
173 | regs->r00 = -EINTR; | 162 | regs->r00 = -EINTR; |
174 | break; | 163 | break; |
175 | case -ERESTARTSYS: | 164 | case -ERESTARTSYS: |
176 | if (!(ka->sa.sa_flags & SA_RESTART)) { | 165 | if (!(ksig->ka.sa.sa_flags & SA_RESTART)) { |
177 | regs->r00 = -EINTR; | 166 | regs->r00 = -EINTR; |
178 | break; | 167 | break; |
179 | } | 168 | } |
@@ -193,11 +182,9 @@ static void handle_signal(int sig, siginfo_t *info, struct k_sigaction *ka, | |||
193 | * only set up the rt_frame flavor. | 182 | * only set up the rt_frame flavor. |
194 | */ | 183 | */ |
195 | /* If there was an error on setup, no signal was delivered. */ | 184 | /* If there was an error on setup, no signal was delivered. */ |
196 | if (setup_rt_frame(sig, ka, info, sigmask_to_save(), regs) < 0) | 185 | ret = setup_rt_frame(ksig, sigmask_to_save(), regs); |
197 | return; | ||
198 | 186 | ||
199 | signal_delivered(sig, info, ka, regs, | 187 | signal_setup_done(ret, ksig, test_thread_flag(TIF_SINGLESTEP)); |
200 | test_thread_flag(TIF_SINGLESTEP)); | ||
201 | } | 188 | } |
202 | 189 | ||
203 | /* | 190 | /* |
@@ -205,17 +192,13 @@ static void handle_signal(int sig, siginfo_t *info, struct k_sigaction *ka, | |||
205 | */ | 192 | */ |
206 | void do_signal(struct pt_regs *regs) | 193 | void do_signal(struct pt_regs *regs) |
207 | { | 194 | { |
208 | struct k_sigaction sigact; | 195 | struct ksignal ksig; |
209 | siginfo_t info; | ||
210 | int signo; | ||
211 | 196 | ||
212 | if (!user_mode(regs)) | 197 | if (!user_mode(regs)) |
213 | return; | 198 | return; |
214 | 199 | ||
215 | signo = get_signal_to_deliver(&info, &sigact, regs, NULL); | 200 | if (get_signal(&ksig)) { |
216 | 201 | handle_signal(&ksig, regs); | |
217 | if (signo > 0) { | ||
218 | handle_signal(signo, &info, &sigact, regs); | ||
219 | return; | 202 | return; |
220 | } | 203 | } |
221 | 204 | ||
diff --git a/arch/ia64/include/asm/unistd.h b/arch/ia64/include/asm/unistd.h index fb13dc5e8f8c..4254f5d3218c 100644 --- a/arch/ia64/include/asm/unistd.h +++ b/arch/ia64/include/asm/unistd.h | |||
@@ -11,7 +11,7 @@ | |||
11 | 11 | ||
12 | 12 | ||
13 | 13 | ||
14 | #define NR_syscalls 315 /* length of syscall table */ | 14 | #define NR_syscalls 316 /* length of syscall table */ |
15 | 15 | ||
16 | /* | 16 | /* |
17 | * The following defines stop scripts/checksyscalls.sh from complaining about | 17 | * The following defines stop scripts/checksyscalls.sh from complaining about |
diff --git a/arch/ia64/include/uapi/asm/unistd.h b/arch/ia64/include/uapi/asm/unistd.h index 7de0a2d65da4..99801c3be914 100644 --- a/arch/ia64/include/uapi/asm/unistd.h +++ b/arch/ia64/include/uapi/asm/unistd.h | |||
@@ -328,5 +328,6 @@ | |||
328 | #define __NR_sched_setattr 1336 | 328 | #define __NR_sched_setattr 1336 |
329 | #define __NR_sched_getattr 1337 | 329 | #define __NR_sched_getattr 1337 |
330 | #define __NR_renameat2 1338 | 330 | #define __NR_renameat2 1338 |
331 | #define __NR_getrandom 1339 | ||
331 | 332 | ||
332 | #endif /* _UAPI_ASM_IA64_UNISTD_H */ | 333 | #endif /* _UAPI_ASM_IA64_UNISTD_H */ |
diff --git a/arch/ia64/kernel/entry.S b/arch/ia64/kernel/entry.S index ba3d03503e84..4c13837a9269 100644 --- a/arch/ia64/kernel/entry.S +++ b/arch/ia64/kernel/entry.S | |||
@@ -1776,6 +1776,7 @@ sys_call_table: | |||
1776 | data8 sys_sched_setattr | 1776 | data8 sys_sched_setattr |
1777 | data8 sys_sched_getattr | 1777 | data8 sys_sched_getattr |
1778 | data8 sys_renameat2 | 1778 | data8 sys_renameat2 |
1779 | data8 sys_getrandom | ||
1779 | 1780 | ||
1780 | .org sys_call_table + 8*NR_syscalls // guard against failures to increase NR_syscalls | 1781 | .org sys_call_table + 8*NR_syscalls // guard against failures to increase NR_syscalls |
1781 | #endif /* __IA64_ASM_PARAVIRTUALIZED_NATIVE */ | 1782 | #endif /* __IA64_ASM_PARAVIRTUALIZED_NATIVE */ |
diff --git a/arch/ia64/kernel/signal.c b/arch/ia64/kernel/signal.c index 33cab9a8adff..6d92170be457 100644 --- a/arch/ia64/kernel/signal.c +++ b/arch/ia64/kernel/signal.c | |||
@@ -309,12 +309,11 @@ force_sigsegv_info (int sig, void __user *addr) | |||
309 | si.si_uid = from_kuid_munged(current_user_ns(), current_uid()); | 309 | si.si_uid = from_kuid_munged(current_user_ns(), current_uid()); |
310 | si.si_addr = addr; | 310 | si.si_addr = addr; |
311 | force_sig_info(SIGSEGV, &si, current); | 311 | force_sig_info(SIGSEGV, &si, current); |
312 | return 0; | 312 | return 1; |
313 | } | 313 | } |
314 | 314 | ||
315 | static long | 315 | static long |
316 | setup_frame (int sig, struct k_sigaction *ka, siginfo_t *info, sigset_t *set, | 316 | setup_frame(struct ksignal *ksig, sigset_t *set, struct sigscratch *scr) |
317 | struct sigscratch *scr) | ||
318 | { | 317 | { |
319 | extern char __kernel_sigtramp[]; | 318 | extern char __kernel_sigtramp[]; |
320 | unsigned long tramp_addr, new_rbs = 0, new_sp; | 319 | unsigned long tramp_addr, new_rbs = 0, new_sp; |
@@ -323,7 +322,7 @@ setup_frame (int sig, struct k_sigaction *ka, siginfo_t *info, sigset_t *set, | |||
323 | 322 | ||
324 | new_sp = scr->pt.r12; | 323 | new_sp = scr->pt.r12; |
325 | tramp_addr = (unsigned long) __kernel_sigtramp; | 324 | tramp_addr = (unsigned long) __kernel_sigtramp; |
326 | if (ka->sa.sa_flags & SA_ONSTACK) { | 325 | if (ksig->ka.sa.sa_flags & SA_ONSTACK) { |
327 | int onstack = sas_ss_flags(new_sp); | 326 | int onstack = sas_ss_flags(new_sp); |
328 | 327 | ||
329 | if (onstack == 0) { | 328 | if (onstack == 0) { |
@@ -347,29 +346,29 @@ setup_frame (int sig, struct k_sigaction *ka, siginfo_t *info, sigset_t *set, | |||
347 | */ | 346 | */ |
348 | check_sp = (new_sp - sizeof(*frame)) & -STACK_ALIGN; | 347 | check_sp = (new_sp - sizeof(*frame)) & -STACK_ALIGN; |
349 | if (!likely(on_sig_stack(check_sp))) | 348 | if (!likely(on_sig_stack(check_sp))) |
350 | return force_sigsegv_info(sig, (void __user *) | 349 | return force_sigsegv_info(ksig->sig, (void __user *) |
351 | check_sp); | 350 | check_sp); |
352 | } | 351 | } |
353 | } | 352 | } |
354 | frame = (void __user *) ((new_sp - sizeof(*frame)) & -STACK_ALIGN); | 353 | frame = (void __user *) ((new_sp - sizeof(*frame)) & -STACK_ALIGN); |
355 | 354 | ||
356 | if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) | 355 | if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) |
357 | return force_sigsegv_info(sig, frame); | 356 | return force_sigsegv_info(ksig->sig, frame); |
358 | 357 | ||
359 | err = __put_user(sig, &frame->arg0); | 358 | err = __put_user(ksig->sig, &frame->arg0); |
360 | err |= __put_user(&frame->info, &frame->arg1); | 359 | err |= __put_user(&frame->info, &frame->arg1); |
361 | err |= __put_user(&frame->sc, &frame->arg2); | 360 | err |= __put_user(&frame->sc, &frame->arg2); |
362 | err |= __put_user(new_rbs, &frame->sc.sc_rbs_base); | 361 | err |= __put_user(new_rbs, &frame->sc.sc_rbs_base); |
363 | err |= __put_user(0, &frame->sc.sc_loadrs); /* initialize to zero */ | 362 | err |= __put_user(0, &frame->sc.sc_loadrs); /* initialize to zero */ |
364 | err |= __put_user(ka->sa.sa_handler, &frame->handler); | 363 | err |= __put_user(ksig->ka.sa.sa_handler, &frame->handler); |
365 | 364 | ||
366 | err |= copy_siginfo_to_user(&frame->info, info); | 365 | err |= copy_siginfo_to_user(&frame->info, &ksig->info); |
367 | 366 | ||
368 | err |= __save_altstack(&frame->sc.sc_stack, scr->pt.r12); | 367 | err |= __save_altstack(&frame->sc.sc_stack, scr->pt.r12); |
369 | err |= setup_sigcontext(&frame->sc, set, scr); | 368 | err |= setup_sigcontext(&frame->sc, set, scr); |
370 | 369 | ||
371 | if (unlikely(err)) | 370 | if (unlikely(err)) |
372 | return force_sigsegv_info(sig, frame); | 371 | return force_sigsegv_info(ksig->sig, frame); |
373 | 372 | ||
374 | scr->pt.r12 = (unsigned long) frame - 16; /* new stack pointer */ | 373 | scr->pt.r12 = (unsigned long) frame - 16; /* new stack pointer */ |
375 | scr->pt.ar_fpsr = FPSR_DEFAULT; /* reset fpsr for signal handler */ | 374 | scr->pt.ar_fpsr = FPSR_DEFAULT; /* reset fpsr for signal handler */ |
@@ -394,22 +393,20 @@ setup_frame (int sig, struct k_sigaction *ka, siginfo_t *info, sigset_t *set, | |||
394 | 393 | ||
395 | #if DEBUG_SIG | 394 | #if DEBUG_SIG |
396 | printk("SIG deliver (%s:%d): sig=%d sp=%lx ip=%lx handler=%p\n", | 395 | printk("SIG deliver (%s:%d): sig=%d sp=%lx ip=%lx handler=%p\n", |
397 | current->comm, current->pid, sig, scr->pt.r12, frame->sc.sc_ip, frame->handler); | 396 | current->comm, current->pid, ksig->sig, scr->pt.r12, frame->sc.sc_ip, frame->handler); |
398 | #endif | 397 | #endif |
399 | return 1; | 398 | return 0; |
400 | } | 399 | } |
401 | 400 | ||
402 | static long | 401 | static long |
403 | handle_signal (unsigned long sig, struct k_sigaction *ka, siginfo_t *info, | 402 | handle_signal (struct ksignal *ksig, struct sigscratch *scr) |
404 | struct sigscratch *scr) | ||
405 | { | 403 | { |
406 | if (!setup_frame(sig, ka, info, sigmask_to_save(), scr)) | 404 | int ret = setup_frame(ksig, sigmask_to_save(), scr); |
407 | return 0; | ||
408 | 405 | ||
409 | signal_delivered(sig, info, ka, &scr->pt, | 406 | if (!ret) |
410 | test_thread_flag(TIF_SINGLESTEP)); | 407 | signal_setup_done(ret, ksig, test_thread_flag(TIF_SINGLESTEP)); |
411 | 408 | ||
412 | return 1; | 409 | return ret; |
413 | } | 410 | } |
414 | 411 | ||
415 | /* | 412 | /* |
@@ -419,17 +416,16 @@ handle_signal (unsigned long sig, struct k_sigaction *ka, siginfo_t *info, | |||
419 | void | 416 | void |
420 | ia64_do_signal (struct sigscratch *scr, long in_syscall) | 417 | ia64_do_signal (struct sigscratch *scr, long in_syscall) |
421 | { | 418 | { |
422 | struct k_sigaction ka; | ||
423 | siginfo_t info; | ||
424 | long restart = in_syscall; | 419 | long restart = in_syscall; |
425 | long errno = scr->pt.r8; | 420 | long errno = scr->pt.r8; |
421 | struct ksignal ksig; | ||
426 | 422 | ||
427 | /* | 423 | /* |
428 | * This only loops in the rare cases of handle_signal() failing, in which case we | 424 | * This only loops in the rare cases of handle_signal() failing, in which case we |
429 | * need to push through a forced SIGSEGV. | 425 | * need to push through a forced SIGSEGV. |
430 | */ | 426 | */ |
431 | while (1) { | 427 | while (1) { |
432 | int signr = get_signal_to_deliver(&info, &ka, &scr->pt, NULL); | 428 | get_signal(&ksig); |
433 | 429 | ||
434 | /* | 430 | /* |
435 | * get_signal_to_deliver() may have run a debugger (via notify_parent()) | 431 | * get_signal_to_deliver() may have run a debugger (via notify_parent()) |
@@ -446,7 +442,7 @@ ia64_do_signal (struct sigscratch *scr, long in_syscall) | |||
446 | */ | 442 | */ |
447 | restart = 0; | 443 | restart = 0; |
448 | 444 | ||
449 | if (signr <= 0) | 445 | if (ksig.sig <= 0) |
450 | break; | 446 | break; |
451 | 447 | ||
452 | if (unlikely(restart)) { | 448 | if (unlikely(restart)) { |
@@ -458,7 +454,7 @@ ia64_do_signal (struct sigscratch *scr, long in_syscall) | |||
458 | break; | 454 | break; |
459 | 455 | ||
460 | case ERESTARTSYS: | 456 | case ERESTARTSYS: |
461 | if ((ka.sa.sa_flags & SA_RESTART) == 0) { | 457 | if ((ksig.ka.sa.sa_flags & SA_RESTART) == 0) { |
462 | scr->pt.r8 = EINTR; | 458 | scr->pt.r8 = EINTR; |
463 | /* note: scr->pt.r10 is already -1 */ | 459 | /* note: scr->pt.r10 is already -1 */ |
464 | break; | 460 | break; |
@@ -473,7 +469,7 @@ ia64_do_signal (struct sigscratch *scr, long in_syscall) | |||
473 | * Whee! Actually deliver the signal. If the delivery failed, we need to | 469 | * Whee! Actually deliver the signal. If the delivery failed, we need to |
474 | * continue to iterate in this loop so we can deliver the SIGSEGV... | 470 | * continue to iterate in this loop so we can deliver the SIGSEGV... |
475 | */ | 471 | */ |
476 | if (handle_signal(signr, &ka, &info, scr)) | 472 | if (handle_signal(&ksig, scr)) |
477 | return; | 473 | return; |
478 | } | 474 | } |
479 | 475 | ||
diff --git a/arch/m32r/kernel/signal.c b/arch/m32r/kernel/signal.c index d503568cb753..95408b8f130a 100644 --- a/arch/m32r/kernel/signal.c +++ b/arch/m32r/kernel/signal.c | |||
@@ -162,28 +162,22 @@ setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs, | |||
162 | * Determine which stack to use.. | 162 | * Determine which stack to use.. |
163 | */ | 163 | */ |
164 | static inline void __user * | 164 | static inline void __user * |
165 | get_sigframe(struct k_sigaction *ka, unsigned long sp, size_t frame_size) | 165 | get_sigframe(struct ksignal *ksig, unsigned long sp, size_t frame_size) |
166 | { | 166 | { |
167 | /* This is the X/Open sanctioned signal stack switching. */ | 167 | return (void __user *)((sigsp(sp, ksig) - frame_size) & -8ul); |
168 | if (ka->sa.sa_flags & SA_ONSTACK) { | ||
169 | if (sas_ss_flags(sp) == 0) | ||
170 | sp = current->sas_ss_sp + current->sas_ss_size; | ||
171 | } | ||
172 | |||
173 | return (void __user *)((sp - frame_size) & -8ul); | ||
174 | } | 168 | } |
175 | 169 | ||
176 | static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, | 170 | static int setup_rt_frame(struct ksignal *ksig, sigset_t *set, |
177 | sigset_t *set, struct pt_regs *regs) | 171 | struct pt_regs *regs) |
178 | { | 172 | { |
179 | struct rt_sigframe __user *frame; | 173 | struct rt_sigframe __user *frame; |
180 | int err = 0; | 174 | int err = 0; |
181 | int signal; | 175 | int signal, sig = ksig->sig; |
182 | 176 | ||
183 | frame = get_sigframe(ka, regs->spu, sizeof(*frame)); | 177 | frame = get_sigframe(ksig, regs->spu, sizeof(*frame)); |
184 | 178 | ||
185 | if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) | 179 | if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) |
186 | goto give_sigsegv; | 180 | return -EFAULT; |
187 | 181 | ||
188 | signal = current_thread_info()->exec_domain | 182 | signal = current_thread_info()->exec_domain |
189 | && current_thread_info()->exec_domain->signal_invmap | 183 | && current_thread_info()->exec_domain->signal_invmap |
@@ -193,13 +187,13 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, | |||
193 | 187 | ||
194 | err |= __put_user(signal, &frame->sig); | 188 | err |= __put_user(signal, &frame->sig); |
195 | if (err) | 189 | if (err) |
196 | goto give_sigsegv; | 190 | return -EFAULT; |
197 | 191 | ||
198 | err |= __put_user(&frame->info, &frame->pinfo); | 192 | err |= __put_user(&frame->info, &frame->pinfo); |
199 | err |= __put_user(&frame->uc, &frame->puc); | 193 | err |= __put_user(&frame->uc, &frame->puc); |
200 | err |= copy_siginfo_to_user(&frame->info, info); | 194 | err |= copy_siginfo_to_user(&frame->info, &ksig->info); |
201 | if (err) | 195 | if (err) |
202 | goto give_sigsegv; | 196 | return -EFAULT; |
203 | 197 | ||
204 | /* Create the ucontext. */ | 198 | /* Create the ucontext. */ |
205 | err |= __put_user(0, &frame->uc.uc_flags); | 199 | err |= __put_user(0, &frame->uc.uc_flags); |
@@ -208,17 +202,17 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, | |||
208 | err |= setup_sigcontext(&frame->uc.uc_mcontext, regs, set->sig[0]); | 202 | err |= setup_sigcontext(&frame->uc.uc_mcontext, regs, set->sig[0]); |
209 | err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)); | 203 | err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)); |
210 | if (err) | 204 | if (err) |
211 | goto give_sigsegv; | 205 | return -EFAULT; |
212 | 206 | ||
213 | /* Set up to return from userspace. */ | 207 | /* Set up to return from userspace. */ |
214 | regs->lr = (unsigned long)ka->sa.sa_restorer; | 208 | regs->lr = (unsigned long)ksig->ka.sa.sa_restorer; |
215 | 209 | ||
216 | /* Set up registers for signal handler */ | 210 | /* Set up registers for signal handler */ |
217 | regs->spu = (unsigned long)frame; | 211 | regs->spu = (unsigned long)frame; |
218 | regs->r0 = signal; /* Arg for signal handler */ | 212 | regs->r0 = signal; /* Arg for signal handler */ |
219 | regs->r1 = (unsigned long)&frame->info; | 213 | regs->r1 = (unsigned long)&frame->info; |
220 | regs->r2 = (unsigned long)&frame->uc; | 214 | regs->r2 = (unsigned long)&frame->uc; |
221 | regs->bpc = (unsigned long)ka->sa.sa_handler; | 215 | regs->bpc = (unsigned long)ksig->ka.sa.sa_handler; |
222 | 216 | ||
223 | set_fs(USER_DS); | 217 | set_fs(USER_DS); |
224 | 218 | ||
@@ -228,10 +222,6 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, | |||
228 | #endif | 222 | #endif |
229 | 223 | ||
230 | return 0; | 224 | return 0; |
231 | |||
232 | give_sigsegv: | ||
233 | force_sigsegv(sig, current); | ||
234 | return -EFAULT; | ||
235 | } | 225 | } |
236 | 226 | ||
237 | static int prev_insn(struct pt_regs *regs) | 227 | static int prev_insn(struct pt_regs *regs) |
@@ -252,9 +242,10 @@ static int prev_insn(struct pt_regs *regs) | |||
252 | */ | 242 | */ |
253 | 243 | ||
254 | static void | 244 | static void |
255 | handle_signal(unsigned long sig, struct k_sigaction *ka, siginfo_t *info, | 245 | handle_signal(struct ksignal *ksig, struct pt_regs *regs) |
256 | struct pt_regs *regs) | ||
257 | { | 246 | { |
247 | int ret; | ||
248 | |||
258 | /* Are we from a system call? */ | 249 | /* Are we from a system call? */ |
259 | if (regs->syscall_nr >= 0) { | 250 | if (regs->syscall_nr >= 0) { |
260 | /* If so, check system call restarting.. */ | 251 | /* If so, check system call restarting.. */ |
@@ -265,7 +256,7 @@ handle_signal(unsigned long sig, struct k_sigaction *ka, siginfo_t *info, | |||
265 | break; | 256 | break; |
266 | 257 | ||
267 | case -ERESTARTSYS: | 258 | case -ERESTARTSYS: |
268 | if (!(ka->sa.sa_flags & SA_RESTART)) { | 259 | if (!(ksig->ka.sa.sa_flags & SA_RESTART)) { |
269 | regs->r0 = -EINTR; | 260 | regs->r0 = -EINTR; |
270 | break; | 261 | break; |
271 | } | 262 | } |
@@ -278,10 +269,9 @@ handle_signal(unsigned long sig, struct k_sigaction *ka, siginfo_t *info, | |||
278 | } | 269 | } |
279 | 270 | ||
280 | /* Set up the stack frame */ | 271 | /* Set up the stack frame */ |
281 | if (setup_rt_frame(sig, ka, info, sigmask_to_save(), regs)) | 272 | ret = setup_rt_frame(ksig, sigmask_to_save(), regs); |
282 | return; | ||
283 | 273 | ||
284 | signal_delivered(sig, info, ka, regs, 0); | 274 | signal_setup_done(ret, ksig, 0); |
285 | } | 275 | } |
286 | 276 | ||
287 | /* | 277 | /* |
@@ -291,9 +281,7 @@ handle_signal(unsigned long sig, struct k_sigaction *ka, siginfo_t *info, | |||
291 | */ | 281 | */ |
292 | static void do_signal(struct pt_regs *regs) | 282 | static void do_signal(struct pt_regs *regs) |
293 | { | 283 | { |
294 | siginfo_t info; | 284 | struct ksignal ksig; |
295 | int signr; | ||
296 | struct k_sigaction ka; | ||
297 | 285 | ||
298 | /* | 286 | /* |
299 | * We want the common case to go fast, which | 287 | * We want the common case to go fast, which |
@@ -304,8 +292,7 @@ static void do_signal(struct pt_regs *regs) | |||
304 | if (!user_mode(regs)) | 292 | if (!user_mode(regs)) |
305 | return; | 293 | return; |
306 | 294 | ||
307 | signr = get_signal_to_deliver(&info, &ka, regs, NULL); | 295 | if (get_signal(&ksig)) { |
308 | if (signr > 0) { | ||
309 | /* Re-enable any watchpoints before delivering the | 296 | /* Re-enable any watchpoints before delivering the |
310 | * signal to user space. The processor register will | 297 | * signal to user space. The processor register will |
311 | * have been cleared if the watchpoint triggered | 298 | * have been cleared if the watchpoint triggered |
@@ -313,7 +300,7 @@ static void do_signal(struct pt_regs *regs) | |||
313 | */ | 300 | */ |
314 | 301 | ||
315 | /* Whee! Actually deliver the signal. */ | 302 | /* Whee! Actually deliver the signal. */ |
316 | handle_signal(signr, &ka, &info, regs); | 303 | handle_signal(&ksig, regs); |
317 | 304 | ||
318 | return; | 305 | return; |
319 | } | 306 | } |
diff --git a/arch/m68k/kernel/signal.c b/arch/m68k/kernel/signal.c index 57fd286e4b0b..967a8b7e1527 100644 --- a/arch/m68k/kernel/signal.c +++ b/arch/m68k/kernel/signal.c | |||
@@ -835,38 +835,30 @@ static inline int rt_setup_ucontext(struct ucontext __user *uc, struct pt_regs * | |||
835 | } | 835 | } |
836 | 836 | ||
837 | static inline void __user * | 837 | static inline void __user * |
838 | get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size) | 838 | get_sigframe(struct ksignal *ksig, size_t frame_size) |
839 | { | 839 | { |
840 | unsigned long usp; | 840 | unsigned long usp = sigsp(rdusp(), ksig); |
841 | |||
842 | /* Default to using normal stack. */ | ||
843 | usp = rdusp(); | ||
844 | 841 | ||
845 | /* This is the X/Open sanctioned signal stack switching. */ | ||
846 | if (ka->sa.sa_flags & SA_ONSTACK) { | ||
847 | if (!sas_ss_flags(usp)) | ||
848 | usp = current->sas_ss_sp + current->sas_ss_size; | ||
849 | } | ||
850 | return (void __user *)((usp - frame_size) & -8UL); | 842 | return (void __user *)((usp - frame_size) & -8UL); |
851 | } | 843 | } |
852 | 844 | ||
853 | static int setup_frame (int sig, struct k_sigaction *ka, | 845 | static int setup_frame(struct ksignal *ksig, sigset_t *set, |
854 | sigset_t *set, struct pt_regs *regs) | 846 | struct pt_regs *regs) |
855 | { | 847 | { |
856 | struct sigframe __user *frame; | 848 | struct sigframe __user *frame; |
857 | int fsize = frame_extra_sizes(regs->format); | 849 | int fsize = frame_extra_sizes(regs->format); |
858 | struct sigcontext context; | 850 | struct sigcontext context; |
859 | int err = 0; | 851 | int err = 0, sig = ksig->sig; |
860 | 852 | ||
861 | if (fsize < 0) { | 853 | if (fsize < 0) { |
862 | #ifdef DEBUG | 854 | #ifdef DEBUG |
863 | printk ("setup_frame: Unknown frame format %#x\n", | 855 | printk ("setup_frame: Unknown frame format %#x\n", |
864 | regs->format); | 856 | regs->format); |
865 | #endif | 857 | #endif |
866 | goto give_sigsegv; | 858 | return -EFAULT; |
867 | } | 859 | } |
868 | 860 | ||
869 | frame = get_sigframe(ka, regs, sizeof(*frame) + fsize); | 861 | frame = get_sigframe(ksig, sizeof(*frame) + fsize); |
870 | 862 | ||
871 | if (fsize) | 863 | if (fsize) |
872 | err |= copy_to_user (frame + 1, regs + 1, fsize); | 864 | err |= copy_to_user (frame + 1, regs + 1, fsize); |
@@ -899,7 +891,7 @@ static int setup_frame (int sig, struct k_sigaction *ka, | |||
899 | #endif | 891 | #endif |
900 | 892 | ||
901 | if (err) | 893 | if (err) |
902 | goto give_sigsegv; | 894 | return -EFAULT; |
903 | 895 | ||
904 | push_cache ((unsigned long) &frame->retcode); | 896 | push_cache ((unsigned long) &frame->retcode); |
905 | 897 | ||
@@ -908,7 +900,7 @@ static int setup_frame (int sig, struct k_sigaction *ka, | |||
908 | * to destroy is successfully copied to sigframe. | 900 | * to destroy is successfully copied to sigframe. |
909 | */ | 901 | */ |
910 | wrusp ((unsigned long) frame); | 902 | wrusp ((unsigned long) frame); |
911 | regs->pc = (unsigned long) ka->sa.sa_handler; | 903 | regs->pc = (unsigned long) ksig->ka.sa.sa_handler; |
912 | adjustformat(regs); | 904 | adjustformat(regs); |
913 | 905 | ||
914 | /* | 906 | /* |
@@ -934,28 +926,24 @@ static int setup_frame (int sig, struct k_sigaction *ka, | |||
934 | tregs->sr = regs->sr; | 926 | tregs->sr = regs->sr; |
935 | } | 927 | } |
936 | return 0; | 928 | return 0; |
937 | |||
938 | give_sigsegv: | ||
939 | force_sigsegv(sig, current); | ||
940 | return err; | ||
941 | } | 929 | } |
942 | 930 | ||
943 | static int setup_rt_frame (int sig, struct k_sigaction *ka, siginfo_t *info, | 931 | static int setup_rt_frame(struct ksignal *ksig, sigset_t *set, |
944 | sigset_t *set, struct pt_regs *regs) | 932 | struct pt_regs *regs) |
945 | { | 933 | { |
946 | struct rt_sigframe __user *frame; | 934 | struct rt_sigframe __user *frame; |
947 | int fsize = frame_extra_sizes(regs->format); | 935 | int fsize = frame_extra_sizes(regs->format); |
948 | int err = 0; | 936 | int err = 0, sig = ksig->sig; |
949 | 937 | ||
950 | if (fsize < 0) { | 938 | if (fsize < 0) { |
951 | #ifdef DEBUG | 939 | #ifdef DEBUG |
952 | printk ("setup_frame: Unknown frame format %#x\n", | 940 | printk ("setup_frame: Unknown frame format %#x\n", |
953 | regs->format); | 941 | regs->format); |
954 | #endif | 942 | #endif |
955 | goto give_sigsegv; | 943 | return -EFAULT; |
956 | } | 944 | } |
957 | 945 | ||
958 | frame = get_sigframe(ka, regs, sizeof(*frame)); | 946 | frame = get_sigframe(ksig, sizeof(*frame)); |
959 | 947 | ||
960 | if (fsize) | 948 | if (fsize) |
961 | err |= copy_to_user (&frame->uc.uc_extra, regs + 1, fsize); | 949 | err |= copy_to_user (&frame->uc.uc_extra, regs + 1, fsize); |
@@ -968,7 +956,7 @@ static int setup_rt_frame (int sig, struct k_sigaction *ka, siginfo_t *info, | |||
968 | &frame->sig); | 956 | &frame->sig); |
969 | err |= __put_user(&frame->info, &frame->pinfo); | 957 | err |= __put_user(&frame->info, &frame->pinfo); |
970 | err |= __put_user(&frame->uc, &frame->puc); | 958 | err |= __put_user(&frame->uc, &frame->puc); |
971 | err |= copy_siginfo_to_user(&frame->info, info); | 959 | err |= copy_siginfo_to_user(&frame->info, &ksig->info); |
972 | 960 | ||
973 | /* Create the ucontext. */ | 961 | /* Create the ucontext. */ |
974 | err |= __put_user(0, &frame->uc.uc_flags); | 962 | err |= __put_user(0, &frame->uc.uc_flags); |
@@ -996,7 +984,7 @@ static int setup_rt_frame (int sig, struct k_sigaction *ka, siginfo_t *info, | |||
996 | #endif /* CONFIG_MMU */ | 984 | #endif /* CONFIG_MMU */ |
997 | 985 | ||
998 | if (err) | 986 | if (err) |
999 | goto give_sigsegv; | 987 | return -EFAULT; |
1000 | 988 | ||
1001 | push_cache ((unsigned long) &frame->retcode); | 989 | push_cache ((unsigned long) &frame->retcode); |
1002 | 990 | ||
@@ -1005,7 +993,7 @@ static int setup_rt_frame (int sig, struct k_sigaction *ka, siginfo_t *info, | |||
1005 | * to destroy is successfully copied to sigframe. | 993 | * to destroy is successfully copied to sigframe. |
1006 | */ | 994 | */ |
1007 | wrusp ((unsigned long) frame); | 995 | wrusp ((unsigned long) frame); |
1008 | regs->pc = (unsigned long) ka->sa.sa_handler; | 996 | regs->pc = (unsigned long) ksig->ka.sa.sa_handler; |
1009 | adjustformat(regs); | 997 | adjustformat(regs); |
1010 | 998 | ||
1011 | /* | 999 | /* |
@@ -1031,10 +1019,6 @@ static int setup_rt_frame (int sig, struct k_sigaction *ka, siginfo_t *info, | |||
1031 | tregs->sr = regs->sr; | 1019 | tregs->sr = regs->sr; |
1032 | } | 1020 | } |
1033 | return 0; | 1021 | return 0; |
1034 | |||
1035 | give_sigsegv: | ||
1036 | force_sigsegv(sig, current); | ||
1037 | return err; | ||
1038 | } | 1022 | } |
1039 | 1023 | ||
1040 | static inline void | 1024 | static inline void |
@@ -1074,26 +1058,22 @@ handle_restart(struct pt_regs *regs, struct k_sigaction *ka, int has_handler) | |||
1074 | * OK, we're invoking a handler | 1058 | * OK, we're invoking a handler |
1075 | */ | 1059 | */ |
1076 | static void | 1060 | static void |
1077 | handle_signal(int sig, struct k_sigaction *ka, siginfo_t *info, | 1061 | handle_signal(struct ksignal *ksig, struct pt_regs *regs) |
1078 | struct pt_regs *regs) | ||
1079 | { | 1062 | { |
1080 | sigset_t *oldset = sigmask_to_save(); | 1063 | sigset_t *oldset = sigmask_to_save(); |
1081 | int err; | 1064 | int err; |
1082 | /* are we from a system call? */ | 1065 | /* are we from a system call? */ |
1083 | if (regs->orig_d0 >= 0) | 1066 | if (regs->orig_d0 >= 0) |
1084 | /* If so, check system call restarting.. */ | 1067 | /* If so, check system call restarting.. */ |
1085 | handle_restart(regs, ka, 1); | 1068 | handle_restart(regs, &ksig->ka, 1); |
1086 | 1069 | ||
1087 | /* set up the stack frame */ | 1070 | /* set up the stack frame */ |
1088 | if (ka->sa.sa_flags & SA_SIGINFO) | 1071 | if (ksig->ka.sa.sa_flags & SA_SIGINFO) |
1089 | err = setup_rt_frame(sig, ka, info, oldset, regs); | 1072 | err = setup_rt_frame(ksig, oldset, regs); |
1090 | else | 1073 | else |
1091 | err = setup_frame(sig, ka, oldset, regs); | 1074 | err = setup_frame(ksig, oldset, regs); |
1092 | |||
1093 | if (err) | ||
1094 | return; | ||
1095 | 1075 | ||
1096 | signal_delivered(sig, info, ka, regs, 0); | 1076 | signal_setup_done(err, ksig, 0); |
1097 | 1077 | ||
1098 | if (test_thread_flag(TIF_DELAYED_TRACE)) { | 1078 | if (test_thread_flag(TIF_DELAYED_TRACE)) { |
1099 | regs->sr &= ~0x8000; | 1079 | regs->sr &= ~0x8000; |
@@ -1108,16 +1088,13 @@ handle_signal(int sig, struct k_sigaction *ka, siginfo_t *info, | |||
1108 | */ | 1088 | */ |
1109 | static void do_signal(struct pt_regs *regs) | 1089 | static void do_signal(struct pt_regs *regs) |
1110 | { | 1090 | { |
1111 | siginfo_t info; | 1091 | struct ksignal ksig; |
1112 | struct k_sigaction ka; | ||
1113 | int signr; | ||
1114 | 1092 | ||
1115 | current->thread.esp0 = (unsigned long) regs; | 1093 | current->thread.esp0 = (unsigned long) regs; |
1116 | 1094 | ||
1117 | signr = get_signal_to_deliver(&info, &ka, regs, NULL); | 1095 | if (get_signal(&ksig)) { |
1118 | if (signr > 0) { | ||
1119 | /* Whee! Actually deliver the signal. */ | 1096 | /* Whee! Actually deliver the signal. */ |
1120 | handle_signal(signr, &ka, &info, regs); | 1097 | handle_signal(&ksig, regs); |
1121 | return; | 1098 | return; |
1122 | } | 1099 | } |
1123 | 1100 | ||
diff --git a/arch/metag/kernel/signal.c b/arch/metag/kernel/signal.c index b9e4a82d2bd4..0d100d5c1407 100644 --- a/arch/metag/kernel/signal.c +++ b/arch/metag/kernel/signal.c | |||
@@ -140,13 +140,9 @@ static int setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs, | |||
140 | /* | 140 | /* |
141 | * Determine which stack to use.. | 141 | * Determine which stack to use.. |
142 | */ | 142 | */ |
143 | static void __user *get_sigframe(struct k_sigaction *ka, unsigned long sp, | 143 | static void __user *get_sigframe(struct ksignal *ksig, unsigned long sp) |
144 | size_t frame_size) | ||
145 | { | 144 | { |
146 | /* Meta stacks grows upwards */ | 145 | sp = sigsp(sp, ksig); |
147 | if ((ka->sa.sa_flags & SA_ONSTACK) && (sas_ss_flags(sp) == 0)) | ||
148 | sp = current->sas_ss_sp; | ||
149 | |||
150 | sp = (sp + 7) & ~7; /* 8byte align stack */ | 146 | sp = (sp + 7) & ~7; /* 8byte align stack */ |
151 | 147 | ||
152 | return (void __user *)sp; | 148 | return (void __user *)sp; |
@@ -159,7 +155,7 @@ static int setup_rt_frame(struct ksignal *ksig, sigset_t *set, | |||
159 | int err; | 155 | int err; |
160 | unsigned long code; | 156 | unsigned long code; |
161 | 157 | ||
162 | frame = get_sigframe(&ksig->ka, regs->REG_SP, sizeof(*frame)); | 158 | frame = get_sigframe(ksig, regs->REG_SP); |
163 | if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) | 159 | if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) |
164 | return -EFAULT; | 160 | return -EFAULT; |
165 | 161 | ||
diff --git a/arch/microblaze/kernel/signal.c b/arch/microblaze/kernel/signal.c index 49a07a4d76d0..8955a3829cf0 100644 --- a/arch/microblaze/kernel/signal.c +++ b/arch/microblaze/kernel/signal.c | |||
@@ -145,22 +145,19 @@ setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs, | |||
145 | * Determine which stack to use.. | 145 | * Determine which stack to use.. |
146 | */ | 146 | */ |
147 | static inline void __user * | 147 | static inline void __user * |
148 | get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size) | 148 | get_sigframe(struct ksignal *ksig, struct pt_regs *regs, size_t frame_size) |
149 | { | 149 | { |
150 | /* Default to using normal stack */ | 150 | /* Default to using normal stack */ |
151 | unsigned long sp = regs->r1; | 151 | unsigned long sp = sigsp(regs->r1, ksig); |
152 | |||
153 | if ((ka->sa.sa_flags & SA_ONSTACK) != 0 && !on_sig_stack(sp)) | ||
154 | sp = current->sas_ss_sp + current->sas_ss_size; | ||
155 | 152 | ||
156 | return (void __user *)((sp - frame_size) & -8UL); | 153 | return (void __user *)((sp - frame_size) & -8UL); |
157 | } | 154 | } |
158 | 155 | ||
159 | static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, | 156 | static int setup_rt_frame(struct ksignal *ksig, sigset_t *set, |
160 | sigset_t *set, struct pt_regs *regs) | 157 | struct pt_regs *regs) |
161 | { | 158 | { |
162 | struct rt_sigframe __user *frame; | 159 | struct rt_sigframe __user *frame; |
163 | int err = 0; | 160 | int err = 0, sig = ksig->sig; |
164 | int signal; | 161 | int signal; |
165 | unsigned long address = 0; | 162 | unsigned long address = 0; |
166 | #ifdef CONFIG_MMU | 163 | #ifdef CONFIG_MMU |
@@ -168,10 +165,10 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, | |||
168 | pte_t *ptep; | 165 | pte_t *ptep; |
169 | #endif | 166 | #endif |
170 | 167 | ||
171 | frame = get_sigframe(ka, regs, sizeof(*frame)); | 168 | frame = get_sigframe(ksig, regs, sizeof(*frame)); |
172 | 169 | ||
173 | if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) | 170 | if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) |
174 | goto give_sigsegv; | 171 | return -EFAULT; |
175 | 172 | ||
176 | signal = current_thread_info()->exec_domain | 173 | signal = current_thread_info()->exec_domain |
177 | && current_thread_info()->exec_domain->signal_invmap | 174 | && current_thread_info()->exec_domain->signal_invmap |
@@ -179,8 +176,8 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, | |||
179 | ? current_thread_info()->exec_domain->signal_invmap[sig] | 176 | ? current_thread_info()->exec_domain->signal_invmap[sig] |
180 | : sig; | 177 | : sig; |
181 | 178 | ||
182 | if (info) | 179 | if (ksig->ka.sa.sa_flags & SA_SIGINFO) |
183 | err |= copy_siginfo_to_user(&frame->info, info); | 180 | err |= copy_siginfo_to_user(&frame->info, &ksig->info); |
184 | 181 | ||
185 | /* Create the ucontext. */ | 182 | /* Create the ucontext. */ |
186 | err |= __put_user(0, &frame->uc.uc_flags); | 183 | err |= __put_user(0, &frame->uc.uc_flags); |
@@ -227,7 +224,7 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, | |||
227 | flush_dcache_range(address, address + 8); | 224 | flush_dcache_range(address, address + 8); |
228 | #endif | 225 | #endif |
229 | if (err) | 226 | if (err) |
230 | goto give_sigsegv; | 227 | return -EFAULT; |
231 | 228 | ||
232 | /* Set up registers for signal handler */ | 229 | /* Set up registers for signal handler */ |
233 | regs->r1 = (unsigned long) frame; | 230 | regs->r1 = (unsigned long) frame; |
@@ -237,7 +234,7 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, | |||
237 | regs->r6 = (unsigned long) &frame->info; /* arg 1: siginfo */ | 234 | regs->r6 = (unsigned long) &frame->info; /* arg 1: siginfo */ |
238 | regs->r7 = (unsigned long) &frame->uc; /* arg2: ucontext */ | 235 | regs->r7 = (unsigned long) &frame->uc; /* arg2: ucontext */ |
239 | /* Offset to handle microblaze rtid r14, 0 */ | 236 | /* Offset to handle microblaze rtid r14, 0 */ |
240 | regs->pc = (unsigned long)ka->sa.sa_handler; | 237 | regs->pc = (unsigned long)ksig->ka.sa.sa_handler; |
241 | 238 | ||
242 | set_fs(USER_DS); | 239 | set_fs(USER_DS); |
243 | 240 | ||
@@ -247,10 +244,6 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, | |||
247 | #endif | 244 | #endif |
248 | 245 | ||
249 | return 0; | 246 | return 0; |
250 | |||
251 | give_sigsegv: | ||
252 | force_sigsegv(sig, current); | ||
253 | return -EFAULT; | ||
254 | } | 247 | } |
255 | 248 | ||
256 | /* Handle restarting system calls */ | 249 | /* Handle restarting system calls */ |
@@ -283,23 +276,15 @@ do_restart: | |||
283 | */ | 276 | */ |
284 | 277 | ||
285 | static void | 278 | static void |
286 | handle_signal(unsigned long sig, struct k_sigaction *ka, | 279 | handle_signal(struct ksignal *ksig, struct pt_regs *regs) |
287 | siginfo_t *info, struct pt_regs *regs) | ||
288 | { | 280 | { |
289 | sigset_t *oldset = sigmask_to_save(); | 281 | sigset_t *oldset = sigmask_to_save(); |
290 | int ret; | 282 | int ret; |
291 | 283 | ||
292 | /* Set up the stack frame */ | 284 | /* Set up the stack frame */ |
293 | if (ka->sa.sa_flags & SA_SIGINFO) | 285 | ret = setup_rt_frame(ksig, oldset, regs); |
294 | ret = setup_rt_frame(sig, ka, info, oldset, regs); | ||
295 | else | ||
296 | ret = setup_rt_frame(sig, ka, NULL, oldset, regs); | ||
297 | 286 | ||
298 | if (ret) | 287 | signal_setup_done(ret, ksig, test_thread_flag(TIF_SINGLESTEP)); |
299 | return; | ||
300 | |||
301 | signal_delivered(sig, info, ka, regs, | ||
302 | test_thread_flag(TIF_SINGLESTEP)); | ||
303 | } | 288 | } |
304 | 289 | ||
305 | /* | 290 | /* |
@@ -313,21 +298,19 @@ handle_signal(unsigned long sig, struct k_sigaction *ka, | |||
313 | */ | 298 | */ |
314 | static void do_signal(struct pt_regs *regs, int in_syscall) | 299 | static void do_signal(struct pt_regs *regs, int in_syscall) |
315 | { | 300 | { |
316 | siginfo_t info; | 301 | struct ksignal ksig; |
317 | int signr; | 302 | |
318 | struct k_sigaction ka; | ||
319 | #ifdef DEBUG_SIG | 303 | #ifdef DEBUG_SIG |
320 | pr_info("do signal: %p %d\n", regs, in_syscall); | 304 | pr_info("do signal: %p %d\n", regs, in_syscall); |
321 | pr_info("do signal2: %lx %lx %ld [%lx]\n", regs->pc, regs->r1, | 305 | pr_info("do signal2: %lx %lx %ld [%lx]\n", regs->pc, regs->r1, |
322 | regs->r12, current_thread_info()->flags); | 306 | regs->r12, current_thread_info()->flags); |
323 | #endif | 307 | #endif |
324 | 308 | ||
325 | signr = get_signal_to_deliver(&info, &ka, regs, NULL); | 309 | if (get_signal(&ksig)) { |
326 | if (signr > 0) { | ||
327 | /* Whee! Actually deliver the signal. */ | 310 | /* Whee! Actually deliver the signal. */ |
328 | if (in_syscall) | 311 | if (in_syscall) |
329 | handle_restart(regs, &ka, 1); | 312 | handle_restart(regs, &ksig.ka, 1); |
330 | handle_signal(signr, &ka, &info, regs); | 313 | handle_signal(&ksig, regs); |
331 | return; | 314 | return; |
332 | } | 315 | } |
333 | 316 | ||
diff --git a/arch/mips/include/asm/abi.h b/arch/mips/include/asm/abi.h index 909bb6984866..7186bb51b89b 100644 --- a/arch/mips/include/asm/abi.h +++ b/arch/mips/include/asm/abi.h | |||
@@ -13,13 +13,11 @@ | |||
13 | #include <asm/siginfo.h> | 13 | #include <asm/siginfo.h> |
14 | 14 | ||
15 | struct mips_abi { | 15 | struct mips_abi { |
16 | int (* const setup_frame)(void *sig_return, struct k_sigaction *ka, | 16 | int (* const setup_frame)(void *sig_return, struct ksignal *ksig, |
17 | struct pt_regs *regs, int signr, | 17 | struct pt_regs *regs, sigset_t *set); |
18 | sigset_t *set); | ||
19 | const unsigned long signal_return_offset; | 18 | const unsigned long signal_return_offset; |
20 | int (* const setup_rt_frame)(void *sig_return, struct k_sigaction *ka, | 19 | int (* const setup_rt_frame)(void *sig_return, struct ksignal *ksig, |
21 | struct pt_regs *regs, int signr, | 20 | struct pt_regs *regs, sigset_t *set); |
22 | sigset_t *set, siginfo_t *info); | ||
23 | const unsigned long rt_signal_return_offset; | 21 | const unsigned long rt_signal_return_offset; |
24 | const unsigned long restart; | 22 | const unsigned long restart; |
25 | }; | 23 | }; |
diff --git a/arch/mips/kernel/signal-common.h b/arch/mips/kernel/signal-common.h index 9c60d09e62a7..06805e09bcd3 100644 --- a/arch/mips/kernel/signal-common.h +++ b/arch/mips/kernel/signal-common.h | |||
@@ -22,7 +22,7 @@ | |||
22 | /* | 22 | /* |
23 | * Determine which stack to use.. | 23 | * Determine which stack to use.. |
24 | */ | 24 | */ |
25 | extern void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, | 25 | extern void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs, |
26 | size_t frame_size); | 26 | size_t frame_size); |
27 | /* Check and clear pending FPU exceptions in saved CSR */ | 27 | /* Check and clear pending FPU exceptions in saved CSR */ |
28 | extern int fpcsr_pending(unsigned int __user *fpcsr); | 28 | extern int fpcsr_pending(unsigned int __user *fpcsr); |
diff --git a/arch/mips/kernel/signal.c b/arch/mips/kernel/signal.c index 9e60d117e41e..1d57605e4615 100644 --- a/arch/mips/kernel/signal.c +++ b/arch/mips/kernel/signal.c | |||
@@ -280,7 +280,7 @@ int restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc) | |||
280 | return err; | 280 | return err; |
281 | } | 281 | } |
282 | 282 | ||
283 | void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, | 283 | void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs, |
284 | size_t frame_size) | 284 | size_t frame_size) |
285 | { | 285 | { |
286 | unsigned long sp; | 286 | unsigned long sp; |
@@ -295,9 +295,7 @@ void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, | |||
295 | */ | 295 | */ |
296 | sp -= 32; | 296 | sp -= 32; |
297 | 297 | ||
298 | /* This is the X/Open sanctioned signal stack switching. */ | 298 | sp = sigsp(sp, ksig); |
299 | if ((ka->sa.sa_flags & SA_ONSTACK) && (sas_ss_flags (sp) == 0)) | ||
300 | sp = current->sas_ss_sp + current->sas_ss_size; | ||
301 | 299 | ||
302 | return (void __user *)((sp - frame_size) & (ICACHE_REFILLS_WORKAROUND_WAR ? ~(cpu_icache_line_size()-1) : ALMASK)); | 300 | return (void __user *)((sp - frame_size) & (ICACHE_REFILLS_WORKAROUND_WAR ? ~(cpu_icache_line_size()-1) : ALMASK)); |
303 | } | 301 | } |
@@ -428,20 +426,20 @@ badframe: | |||
428 | } | 426 | } |
429 | 427 | ||
430 | #ifdef CONFIG_TRAD_SIGNALS | 428 | #ifdef CONFIG_TRAD_SIGNALS |
431 | static int setup_frame(void *sig_return, struct k_sigaction *ka, | 429 | static int setup_frame(void *sig_return, struct ksignal *ksig, |
432 | struct pt_regs *regs, int signr, sigset_t *set) | 430 | struct pt_regs *regs, sigset_t *set) |
433 | { | 431 | { |
434 | struct sigframe __user *frame; | 432 | struct sigframe __user *frame; |
435 | int err = 0; | 433 | int err = 0; |
436 | 434 | ||
437 | frame = get_sigframe(ka, regs, sizeof(*frame)); | 435 | frame = get_sigframe(ksig, regs, sizeof(*frame)); |
438 | if (!access_ok(VERIFY_WRITE, frame, sizeof (*frame))) | 436 | if (!access_ok(VERIFY_WRITE, frame, sizeof (*frame))) |
439 | goto give_sigsegv; | 437 | return -EFAULT; |
440 | 438 | ||
441 | err |= setup_sigcontext(regs, &frame->sf_sc); | 439 | err |= setup_sigcontext(regs, &frame->sf_sc); |
442 | err |= __copy_to_user(&frame->sf_mask, set, sizeof(*set)); | 440 | err |= __copy_to_user(&frame->sf_mask, set, sizeof(*set)); |
443 | if (err) | 441 | if (err) |
444 | goto give_sigsegv; | 442 | return -EFAULT; |
445 | 443 | ||
446 | /* | 444 | /* |
447 | * Arguments to signal handler: | 445 | * Arguments to signal handler: |
@@ -453,37 +451,32 @@ static int setup_frame(void *sig_return, struct k_sigaction *ka, | |||
453 | * $25 and c0_epc point to the signal handler, $29 points to the | 451 | * $25 and c0_epc point to the signal handler, $29 points to the |
454 | * struct sigframe. | 452 | * struct sigframe. |
455 | */ | 453 | */ |
456 | regs->regs[ 4] = signr; | 454 | regs->regs[ 4] = ksig->sig; |
457 | regs->regs[ 5] = 0; | 455 | regs->regs[ 5] = 0; |
458 | regs->regs[ 6] = (unsigned long) &frame->sf_sc; | 456 | regs->regs[ 6] = (unsigned long) &frame->sf_sc; |
459 | regs->regs[29] = (unsigned long) frame; | 457 | regs->regs[29] = (unsigned long) frame; |
460 | regs->regs[31] = (unsigned long) sig_return; | 458 | regs->regs[31] = (unsigned long) sig_return; |
461 | regs->cp0_epc = regs->regs[25] = (unsigned long) ka->sa.sa_handler; | 459 | regs->cp0_epc = regs->regs[25] = (unsigned long) ksig->ka.sa.sa_handler; |
462 | 460 | ||
463 | DEBUGP("SIG deliver (%s:%d): sp=0x%p pc=0x%lx ra=0x%lx\n", | 461 | DEBUGP("SIG deliver (%s:%d): sp=0x%p pc=0x%lx ra=0x%lx\n", |
464 | current->comm, current->pid, | 462 | current->comm, current->pid, |
465 | frame, regs->cp0_epc, regs->regs[31]); | 463 | frame, regs->cp0_epc, regs->regs[31]); |
466 | return 0; | 464 | return 0; |
467 | |||
468 | give_sigsegv: | ||
469 | force_sigsegv(signr, current); | ||
470 | return -EFAULT; | ||
471 | } | 465 | } |
472 | #endif | 466 | #endif |
473 | 467 | ||
474 | static int setup_rt_frame(void *sig_return, struct k_sigaction *ka, | 468 | static int setup_rt_frame(void *sig_return, struct ksignal *ksig, |
475 | struct pt_regs *regs, int signr, sigset_t *set, | 469 | struct pt_regs *regs, sigset_t *set) |
476 | siginfo_t *info) | ||
477 | { | 470 | { |
478 | struct rt_sigframe __user *frame; | 471 | struct rt_sigframe __user *frame; |
479 | int err = 0; | 472 | int err = 0; |
480 | 473 | ||
481 | frame = get_sigframe(ka, regs, sizeof(*frame)); | 474 | frame = get_sigframe(ksig, regs, sizeof(*frame)); |
482 | if (!access_ok(VERIFY_WRITE, frame, sizeof (*frame))) | 475 | if (!access_ok(VERIFY_WRITE, frame, sizeof (*frame))) |
483 | goto give_sigsegv; | 476 | return -EFAULT; |
484 | 477 | ||
485 | /* Create siginfo. */ | 478 | /* Create siginfo. */ |
486 | err |= copy_siginfo_to_user(&frame->rs_info, info); | 479 | err |= copy_siginfo_to_user(&frame->rs_info, &ksig->info); |
487 | 480 | ||
488 | /* Create the ucontext. */ | 481 | /* Create the ucontext. */ |
489 | err |= __put_user(0, &frame->rs_uc.uc_flags); | 482 | err |= __put_user(0, &frame->rs_uc.uc_flags); |
@@ -493,7 +486,7 @@ static int setup_rt_frame(void *sig_return, struct k_sigaction *ka, | |||
493 | err |= __copy_to_user(&frame->rs_uc.uc_sigmask, set, sizeof(*set)); | 486 | err |= __copy_to_user(&frame->rs_uc.uc_sigmask, set, sizeof(*set)); |
494 | 487 | ||
495 | if (err) | 488 | if (err) |
496 | goto give_sigsegv; | 489 | return -EFAULT; |
497 | 490 | ||
498 | /* | 491 | /* |
499 | * Arguments to signal handler: | 492 | * Arguments to signal handler: |
@@ -505,22 +498,18 @@ static int setup_rt_frame(void *sig_return, struct k_sigaction *ka, | |||
505 | * $25 and c0_epc point to the signal handler, $29 points to | 498 | * $25 and c0_epc point to the signal handler, $29 points to |
506 | * the struct rt_sigframe. | 499 | * the struct rt_sigframe. |
507 | */ | 500 | */ |
508 | regs->regs[ 4] = signr; | 501 | regs->regs[ 4] = ksig->sig; |
509 | regs->regs[ 5] = (unsigned long) &frame->rs_info; | 502 | regs->regs[ 5] = (unsigned long) &frame->rs_info; |
510 | regs->regs[ 6] = (unsigned long) &frame->rs_uc; | 503 | regs->regs[ 6] = (unsigned long) &frame->rs_uc; |
511 | regs->regs[29] = (unsigned long) frame; | 504 | regs->regs[29] = (unsigned long) frame; |
512 | regs->regs[31] = (unsigned long) sig_return; | 505 | regs->regs[31] = (unsigned long) sig_return; |
513 | regs->cp0_epc = regs->regs[25] = (unsigned long) ka->sa.sa_handler; | 506 | regs->cp0_epc = regs->regs[25] = (unsigned long) ksig->ka.sa.sa_handler; |
514 | 507 | ||
515 | DEBUGP("SIG deliver (%s:%d): sp=0x%p pc=0x%lx ra=0x%lx\n", | 508 | DEBUGP("SIG deliver (%s:%d): sp=0x%p pc=0x%lx ra=0x%lx\n", |
516 | current->comm, current->pid, | 509 | current->comm, current->pid, |
517 | frame, regs->cp0_epc, regs->regs[31]); | 510 | frame, regs->cp0_epc, regs->regs[31]); |
518 | 511 | ||
519 | return 0; | 512 | return 0; |
520 | |||
521 | give_sigsegv: | ||
522 | force_sigsegv(signr, current); | ||
523 | return -EFAULT; | ||
524 | } | 513 | } |
525 | 514 | ||
526 | struct mips_abi mips_abi = { | 515 | struct mips_abi mips_abi = { |
@@ -534,8 +523,7 @@ struct mips_abi mips_abi = { | |||
534 | .restart = __NR_restart_syscall | 523 | .restart = __NR_restart_syscall |
535 | }; | 524 | }; |
536 | 525 | ||
537 | static void handle_signal(unsigned long sig, siginfo_t *info, | 526 | static void handle_signal(struct ksignal *ksig, struct pt_regs *regs) |
538 | struct k_sigaction *ka, struct pt_regs *regs) | ||
539 | { | 527 | { |
540 | sigset_t *oldset = sigmask_to_save(); | 528 | sigset_t *oldset = sigmask_to_save(); |
541 | int ret; | 529 | int ret; |
@@ -557,7 +545,7 @@ static void handle_signal(unsigned long sig, siginfo_t *info, | |||
557 | regs->regs[2] = EINTR; | 545 | regs->regs[2] = EINTR; |
558 | break; | 546 | break; |
559 | case ERESTARTSYS: | 547 | case ERESTARTSYS: |
560 | if (!(ka->sa.sa_flags & SA_RESTART)) { | 548 | if (!(ksig->ka.sa.sa_flags & SA_RESTART)) { |
561 | regs->regs[2] = EINTR; | 549 | regs->regs[2] = EINTR; |
562 | break; | 550 | break; |
563 | } | 551 | } |
@@ -571,29 +559,23 @@ static void handle_signal(unsigned long sig, siginfo_t *info, | |||
571 | regs->regs[0] = 0; /* Don't deal with this again. */ | 559 | regs->regs[0] = 0; /* Don't deal with this again. */ |
572 | } | 560 | } |
573 | 561 | ||
574 | if (sig_uses_siginfo(ka)) | 562 | if (sig_uses_siginfo(&ksig->ka)) |
575 | ret = abi->setup_rt_frame(vdso + abi->rt_signal_return_offset, | 563 | ret = abi->setup_rt_frame(vdso + abi->rt_signal_return_offset, |
576 | ka, regs, sig, oldset, info); | 564 | ksig, regs, oldset); |
577 | else | 565 | else |
578 | ret = abi->setup_frame(vdso + abi->signal_return_offset, | 566 | ret = abi->setup_frame(vdso + abi->signal_return_offset, ksig, |
579 | ka, regs, sig, oldset); | 567 | regs, oldset); |
580 | |||
581 | if (ret) | ||
582 | return; | ||
583 | 568 | ||
584 | signal_delivered(sig, info, ka, regs, 0); | 569 | signal_setup_done(ret, ksig, 0); |
585 | } | 570 | } |
586 | 571 | ||
587 | static void do_signal(struct pt_regs *regs) | 572 | static void do_signal(struct pt_regs *regs) |
588 | { | 573 | { |
589 | struct k_sigaction ka; | 574 | struct ksignal ksig; |
590 | siginfo_t info; | ||
591 | int signr; | ||
592 | 575 | ||
593 | signr = get_signal_to_deliver(&info, &ka, regs, NULL); | 576 | if (get_signal(&ksig)) { |
594 | if (signr > 0) { | ||
595 | /* Whee! Actually deliver the signal. */ | 577 | /* Whee! Actually deliver the signal. */ |
596 | handle_signal(signr, &info, &ka, regs); | 578 | handle_signal(&ksig, regs); |
597 | return; | 579 | return; |
598 | } | 580 | } |
599 | 581 | ||
diff --git a/arch/mips/kernel/signal32.c b/arch/mips/kernel/signal32.c index bae2e6ee2109..d69179c0d49d 100644 --- a/arch/mips/kernel/signal32.c +++ b/arch/mips/kernel/signal32.c | |||
@@ -490,21 +490,21 @@ badframe: | |||
490 | force_sig(SIGSEGV, current); | 490 | force_sig(SIGSEGV, current); |
491 | } | 491 | } |
492 | 492 | ||
493 | static int setup_frame_32(void *sig_return, struct k_sigaction *ka, | 493 | static int setup_frame_32(void *sig_return, struct ksignal *ksig, |
494 | struct pt_regs *regs, int signr, sigset_t *set) | 494 | struct pt_regs *regs, sigset_t *set) |
495 | { | 495 | { |
496 | struct sigframe32 __user *frame; | 496 | struct sigframe32 __user *frame; |
497 | int err = 0; | 497 | int err = 0; |
498 | 498 | ||
499 | frame = get_sigframe(ka, regs, sizeof(*frame)); | 499 | frame = get_sigframe(ksig, regs, sizeof(*frame)); |
500 | if (!access_ok(VERIFY_WRITE, frame, sizeof (*frame))) | 500 | if (!access_ok(VERIFY_WRITE, frame, sizeof (*frame))) |
501 | goto give_sigsegv; | 501 | return -EFAULT; |
502 | 502 | ||
503 | err |= setup_sigcontext32(regs, &frame->sf_sc); | 503 | err |= setup_sigcontext32(regs, &frame->sf_sc); |
504 | err |= __copy_conv_sigset_to_user(&frame->sf_mask, set); | 504 | err |= __copy_conv_sigset_to_user(&frame->sf_mask, set); |
505 | 505 | ||
506 | if (err) | 506 | if (err) |
507 | goto give_sigsegv; | 507 | return -EFAULT; |
508 | 508 | ||
509 | /* | 509 | /* |
510 | * Arguments to signal handler: | 510 | * Arguments to signal handler: |
@@ -516,37 +516,32 @@ static int setup_frame_32(void *sig_return, struct k_sigaction *ka, | |||
516 | * $25 and c0_epc point to the signal handler, $29 points to the | 516 | * $25 and c0_epc point to the signal handler, $29 points to the |
517 | * struct sigframe. | 517 | * struct sigframe. |
518 | */ | 518 | */ |
519 | regs->regs[ 4] = signr; | 519 | regs->regs[ 4] = ksig->sig; |
520 | regs->regs[ 5] = 0; | 520 | regs->regs[ 5] = 0; |
521 | regs->regs[ 6] = (unsigned long) &frame->sf_sc; | 521 | regs->regs[ 6] = (unsigned long) &frame->sf_sc; |
522 | regs->regs[29] = (unsigned long) frame; | 522 | regs->regs[29] = (unsigned long) frame; |
523 | regs->regs[31] = (unsigned long) sig_return; | 523 | regs->regs[31] = (unsigned long) sig_return; |
524 | regs->cp0_epc = regs->regs[25] = (unsigned long) ka->sa.sa_handler; | 524 | regs->cp0_epc = regs->regs[25] = (unsigned long) ksig->ka.sa.sa_handler; |
525 | 525 | ||
526 | DEBUGP("SIG deliver (%s:%d): sp=0x%p pc=0x%lx ra=0x%lx\n", | 526 | DEBUGP("SIG deliver (%s:%d): sp=0x%p pc=0x%lx ra=0x%lx\n", |
527 | current->comm, current->pid, | 527 | current->comm, current->pid, |
528 | frame, regs->cp0_epc, regs->regs[31]); | 528 | frame, regs->cp0_epc, regs->regs[31]); |
529 | 529 | ||
530 | return 0; | 530 | return 0; |
531 | |||
532 | give_sigsegv: | ||
533 | force_sigsegv(signr, current); | ||
534 | return -EFAULT; | ||
535 | } | 531 | } |
536 | 532 | ||
537 | static int setup_rt_frame_32(void *sig_return, struct k_sigaction *ka, | 533 | static int setup_rt_frame_32(void *sig_return, struct ksignal *ksig, |
538 | struct pt_regs *regs, int signr, sigset_t *set, | 534 | struct pt_regs *regs, sigset_t *set) |
539 | siginfo_t *info) | ||
540 | { | 535 | { |
541 | struct rt_sigframe32 __user *frame; | 536 | struct rt_sigframe32 __user *frame; |
542 | int err = 0; | 537 | int err = 0; |
543 | 538 | ||
544 | frame = get_sigframe(ka, regs, sizeof(*frame)); | 539 | frame = get_sigframe(ksig, regs, sizeof(*frame)); |
545 | if (!access_ok(VERIFY_WRITE, frame, sizeof (*frame))) | 540 | if (!access_ok(VERIFY_WRITE, frame, sizeof (*frame))) |
546 | goto give_sigsegv; | 541 | return -EFAULT; |
547 | 542 | ||
548 | /* Convert (siginfo_t -> compat_siginfo_t) and copy to user. */ | 543 | /* Convert (siginfo_t -> compat_siginfo_t) and copy to user. */ |
549 | err |= copy_siginfo_to_user32(&frame->rs_info, info); | 544 | err |= copy_siginfo_to_user32(&frame->rs_info, &ksig->info); |
550 | 545 | ||
551 | /* Create the ucontext. */ | 546 | /* Create the ucontext. */ |
552 | err |= __put_user(0, &frame->rs_uc.uc_flags); | 547 | err |= __put_user(0, &frame->rs_uc.uc_flags); |
@@ -556,7 +551,7 @@ static int setup_rt_frame_32(void *sig_return, struct k_sigaction *ka, | |||
556 | err |= __copy_conv_sigset_to_user(&frame->rs_uc.uc_sigmask, set); | 551 | err |= __copy_conv_sigset_to_user(&frame->rs_uc.uc_sigmask, set); |
557 | 552 | ||
558 | if (err) | 553 | if (err) |
559 | goto give_sigsegv; | 554 | return -EFAULT; |
560 | 555 | ||
561 | /* | 556 | /* |
562 | * Arguments to signal handler: | 557 | * Arguments to signal handler: |
@@ -568,22 +563,18 @@ static int setup_rt_frame_32(void *sig_return, struct k_sigaction *ka, | |||
568 | * $25 and c0_epc point to the signal handler, $29 points to | 563 | * $25 and c0_epc point to the signal handler, $29 points to |
569 | * the struct rt_sigframe32. | 564 | * the struct rt_sigframe32. |
570 | */ | 565 | */ |
571 | regs->regs[ 4] = signr; | 566 | regs->regs[ 4] = ksig->sig; |
572 | regs->regs[ 5] = (unsigned long) &frame->rs_info; | 567 | regs->regs[ 5] = (unsigned long) &frame->rs_info; |
573 | regs->regs[ 6] = (unsigned long) &frame->rs_uc; | 568 | regs->regs[ 6] = (unsigned long) &frame->rs_uc; |
574 | regs->regs[29] = (unsigned long) frame; | 569 | regs->regs[29] = (unsigned long) frame; |
575 | regs->regs[31] = (unsigned long) sig_return; | 570 | regs->regs[31] = (unsigned long) sig_return; |
576 | regs->cp0_epc = regs->regs[25] = (unsigned long) ka->sa.sa_handler; | 571 | regs->cp0_epc = regs->regs[25] = (unsigned long) ksig->ka.sa.sa_handler; |
577 | 572 | ||
578 | DEBUGP("SIG deliver (%s:%d): sp=0x%p pc=0x%lx ra=0x%lx\n", | 573 | DEBUGP("SIG deliver (%s:%d): sp=0x%p pc=0x%lx ra=0x%lx\n", |
579 | current->comm, current->pid, | 574 | current->comm, current->pid, |
580 | frame, regs->cp0_epc, regs->regs[31]); | 575 | frame, regs->cp0_epc, regs->regs[31]); |
581 | 576 | ||
582 | return 0; | 577 | return 0; |
583 | |||
584 | give_sigsegv: | ||
585 | force_sigsegv(signr, current); | ||
586 | return -EFAULT; | ||
587 | } | 578 | } |
588 | 579 | ||
589 | /* | 580 | /* |
diff --git a/arch/mips/kernel/signal_n32.c b/arch/mips/kernel/signal_n32.c index b2241bb9cac1..f1d4751eead0 100644 --- a/arch/mips/kernel/signal_n32.c +++ b/arch/mips/kernel/signal_n32.c | |||
@@ -102,18 +102,18 @@ badframe: | |||
102 | force_sig(SIGSEGV, current); | 102 | force_sig(SIGSEGV, current); |
103 | } | 103 | } |
104 | 104 | ||
105 | static int setup_rt_frame_n32(void *sig_return, struct k_sigaction *ka, | 105 | static int setup_rt_frame_n32(void *sig_return, struct ksignal *ksig, |
106 | struct pt_regs *regs, int signr, sigset_t *set, siginfo_t *info) | 106 | struct pt_regs *regs, sigset_t *set) |
107 | { | 107 | { |
108 | struct rt_sigframe_n32 __user *frame; | 108 | struct rt_sigframe_n32 __user *frame; |
109 | int err = 0; | 109 | int err = 0; |
110 | 110 | ||
111 | frame = get_sigframe(ka, regs, sizeof(*frame)); | 111 | frame = get_sigframe(ksig, regs, sizeof(*frame)); |
112 | if (!access_ok(VERIFY_WRITE, frame, sizeof (*frame))) | 112 | if (!access_ok(VERIFY_WRITE, frame, sizeof (*frame))) |
113 | goto give_sigsegv; | 113 | return -EFAULT; |
114 | 114 | ||
115 | /* Create siginfo. */ | 115 | /* Create siginfo. */ |
116 | err |= copy_siginfo_to_user32(&frame->rs_info, info); | 116 | err |= copy_siginfo_to_user32(&frame->rs_info, &ksig->info); |
117 | 117 | ||
118 | /* Create the ucontext. */ | 118 | /* Create the ucontext. */ |
119 | err |= __put_user(0, &frame->rs_uc.uc_flags); | 119 | err |= __put_user(0, &frame->rs_uc.uc_flags); |
@@ -123,7 +123,7 @@ static int setup_rt_frame_n32(void *sig_return, struct k_sigaction *ka, | |||
123 | err |= __copy_conv_sigset_to_user(&frame->rs_uc.uc_sigmask, set); | 123 | err |= __copy_conv_sigset_to_user(&frame->rs_uc.uc_sigmask, set); |
124 | 124 | ||
125 | if (err) | 125 | if (err) |
126 | goto give_sigsegv; | 126 | return -EFAULT; |
127 | 127 | ||
128 | /* | 128 | /* |
129 | * Arguments to signal handler: | 129 | * Arguments to signal handler: |
@@ -135,22 +135,18 @@ static int setup_rt_frame_n32(void *sig_return, struct k_sigaction *ka, | |||
135 | * $25 and c0_epc point to the signal handler, $29 points to | 135 | * $25 and c0_epc point to the signal handler, $29 points to |
136 | * the struct rt_sigframe. | 136 | * the struct rt_sigframe. |
137 | */ | 137 | */ |
138 | regs->regs[ 4] = signr; | 138 | regs->regs[ 4] = ksig->sig; |
139 | regs->regs[ 5] = (unsigned long) &frame->rs_info; | 139 | regs->regs[ 5] = (unsigned long) &frame->rs_info; |
140 | regs->regs[ 6] = (unsigned long) &frame->rs_uc; | 140 | regs->regs[ 6] = (unsigned long) &frame->rs_uc; |
141 | regs->regs[29] = (unsigned long) frame; | 141 | regs->regs[29] = (unsigned long) frame; |
142 | regs->regs[31] = (unsigned long) sig_return; | 142 | regs->regs[31] = (unsigned long) sig_return; |
143 | regs->cp0_epc = regs->regs[25] = (unsigned long) ka->sa.sa_handler; | 143 | regs->cp0_epc = regs->regs[25] = (unsigned long) ksig->ka.sa.sa_handler; |
144 | 144 | ||
145 | DEBUGP("SIG deliver (%s:%d): sp=0x%p pc=0x%lx ra=0x%lx\n", | 145 | DEBUGP("SIG deliver (%s:%d): sp=0x%p pc=0x%lx ra=0x%lx\n", |
146 | current->comm, current->pid, | 146 | current->comm, current->pid, |
147 | frame, regs->cp0_epc, regs->regs[31]); | 147 | frame, regs->cp0_epc, regs->regs[31]); |
148 | 148 | ||
149 | return 0; | 149 | return 0; |
150 | |||
151 | give_sigsegv: | ||
152 | force_sigsegv(signr, current); | ||
153 | return -EFAULT; | ||
154 | } | 150 | } |
155 | 151 | ||
156 | struct mips_abi mips_abi_n32 = { | 152 | struct mips_abi mips_abi_n32 = { |
diff --git a/arch/mn10300/kernel/signal.c b/arch/mn10300/kernel/signal.c index 9dfac5cd16e6..a6c0858592c3 100644 --- a/arch/mn10300/kernel/signal.c +++ b/arch/mn10300/kernel/signal.c | |||
@@ -186,20 +186,11 @@ static int setup_sigcontext(struct sigcontext __user *sc, | |||
186 | /* | 186 | /* |
187 | * determine which stack to use.. | 187 | * determine which stack to use.. |
188 | */ | 188 | */ |
189 | static inline void __user *get_sigframe(struct k_sigaction *ka, | 189 | static inline void __user *get_sigframe(struct ksignal *ksig, |
190 | struct pt_regs *regs, | 190 | struct pt_regs *regs, |
191 | size_t frame_size) | 191 | size_t frame_size) |
192 | { | 192 | { |
193 | unsigned long sp; | 193 | unsigned long sp = sigsp(regs->sp, ksig); |
194 | |||
195 | /* default to using normal stack */ | ||
196 | sp = regs->sp; | ||
197 | |||
198 | /* this is the X/Open sanctioned signal stack switching. */ | ||
199 | if (ka->sa.sa_flags & SA_ONSTACK) { | ||
200 | if (sas_ss_flags(sp) == 0) | ||
201 | sp = current->sas_ss_sp + current->sas_ss_size; | ||
202 | } | ||
203 | 194 | ||
204 | return (void __user *) ((sp - frame_size) & ~7UL); | 195 | return (void __user *) ((sp - frame_size) & ~7UL); |
205 | } | 196 | } |
@@ -207,16 +198,16 @@ static inline void __user *get_sigframe(struct k_sigaction *ka, | |||
207 | /* | 198 | /* |
208 | * set up a normal signal frame | 199 | * set up a normal signal frame |
209 | */ | 200 | */ |
210 | static int setup_frame(int sig, struct k_sigaction *ka, sigset_t *set, | 201 | static int setup_frame(struct ksignal *ksig, sigset_t *set, |
211 | struct pt_regs *regs) | 202 | struct pt_regs *regs) |
212 | { | 203 | { |
213 | struct sigframe __user *frame; | 204 | struct sigframe __user *frame; |
214 | int rsig; | 205 | int rsig, sig = ksig->sig; |
215 | 206 | ||
216 | frame = get_sigframe(ka, regs, sizeof(*frame)); | 207 | frame = get_sigframe(ksig, regs, sizeof(*frame)); |
217 | 208 | ||
218 | if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) | 209 | if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) |
219 | goto give_sigsegv; | 210 | return -EFAULT; |
220 | 211 | ||
221 | rsig = sig; | 212 | rsig = sig; |
222 | if (sig < 32 && | 213 | if (sig < 32 && |
@@ -226,40 +217,40 @@ static int setup_frame(int sig, struct k_sigaction *ka, sigset_t *set, | |||
226 | 217 | ||
227 | if (__put_user(rsig, &frame->sig) < 0 || | 218 | if (__put_user(rsig, &frame->sig) < 0 || |
228 | __put_user(&frame->sc, &frame->psc) < 0) | 219 | __put_user(&frame->sc, &frame->psc) < 0) |
229 | goto give_sigsegv; | 220 | return -EFAULT; |
230 | 221 | ||
231 | if (setup_sigcontext(&frame->sc, &frame->fpuctx, regs, set->sig[0])) | 222 | if (setup_sigcontext(&frame->sc, &frame->fpuctx, regs, set->sig[0])) |
232 | goto give_sigsegv; | 223 | return -EFAULT; |
233 | 224 | ||
234 | if (_NSIG_WORDS > 1) { | 225 | if (_NSIG_WORDS > 1) { |
235 | if (__copy_to_user(frame->extramask, &set->sig[1], | 226 | if (__copy_to_user(frame->extramask, &set->sig[1], |
236 | sizeof(frame->extramask))) | 227 | sizeof(frame->extramask))) |
237 | goto give_sigsegv; | 228 | return -EFAULT; |
238 | } | 229 | } |
239 | 230 | ||
240 | /* set up to return from userspace. If provided, use a stub already in | 231 | /* set up to return from userspace. If provided, use a stub already in |
241 | * userspace */ | 232 | * userspace */ |
242 | if (ka->sa.sa_flags & SA_RESTORER) { | 233 | if (ksig->ka.sa.sa_flags & SA_RESTORER) { |
243 | if (__put_user(ka->sa.sa_restorer, &frame->pretcode)) | 234 | if (__put_user(ksig->ka.sa.sa_restorer, &frame->pretcode)) |
244 | goto give_sigsegv; | 235 | return -EFAULT; |
245 | } else { | 236 | } else { |
246 | if (__put_user((void (*)(void))frame->retcode, | 237 | if (__put_user((void (*)(void))frame->retcode, |
247 | &frame->pretcode)) | 238 | &frame->pretcode)) |
248 | goto give_sigsegv; | 239 | return -EFAULT; |
249 | /* this is mov $,d0; syscall 0 */ | 240 | /* this is mov $,d0; syscall 0 */ |
250 | if (__put_user(0x2c, (char *)(frame->retcode + 0)) || | 241 | if (__put_user(0x2c, (char *)(frame->retcode + 0)) || |
251 | __put_user(__NR_sigreturn, (char *)(frame->retcode + 1)) || | 242 | __put_user(__NR_sigreturn, (char *)(frame->retcode + 1)) || |
252 | __put_user(0x00, (char *)(frame->retcode + 2)) || | 243 | __put_user(0x00, (char *)(frame->retcode + 2)) || |
253 | __put_user(0xf0, (char *)(frame->retcode + 3)) || | 244 | __put_user(0xf0, (char *)(frame->retcode + 3)) || |
254 | __put_user(0xe0, (char *)(frame->retcode + 4))) | 245 | __put_user(0xe0, (char *)(frame->retcode + 4))) |
255 | goto give_sigsegv; | 246 | return -EFAULT; |
256 | flush_icache_range((unsigned long) frame->retcode, | 247 | flush_icache_range((unsigned long) frame->retcode, |
257 | (unsigned long) frame->retcode + 5); | 248 | (unsigned long) frame->retcode + 5); |
258 | } | 249 | } |
259 | 250 | ||
260 | /* set up registers for signal handler */ | 251 | /* set up registers for signal handler */ |
261 | regs->sp = (unsigned long) frame; | 252 | regs->sp = (unsigned long) frame; |
262 | regs->pc = (unsigned long) ka->sa.sa_handler; | 253 | regs->pc = (unsigned long) ksig->ka.sa.sa_handler; |
263 | regs->d0 = sig; | 254 | regs->d0 = sig; |
264 | regs->d1 = (unsigned long) &frame->sc; | 255 | regs->d1 = (unsigned long) &frame->sc; |
265 | 256 | ||
@@ -270,25 +261,21 @@ static int setup_frame(int sig, struct k_sigaction *ka, sigset_t *set, | |||
270 | #endif | 261 | #endif |
271 | 262 | ||
272 | return 0; | 263 | return 0; |
273 | |||
274 | give_sigsegv: | ||
275 | force_sigsegv(sig, current); | ||
276 | return -EFAULT; | ||
277 | } | 264 | } |
278 | 265 | ||
279 | /* | 266 | /* |
280 | * set up a realtime signal frame | 267 | * set up a realtime signal frame |
281 | */ | 268 | */ |
282 | static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, | 269 | static int setup_rt_frame(struct ksignal *ksig, sigset_t *set, |
283 | sigset_t *set, struct pt_regs *regs) | 270 | struct pt_regs *regs) |
284 | { | 271 | { |
285 | struct rt_sigframe __user *frame; | 272 | struct rt_sigframe __user *frame; |
286 | int rsig; | 273 | int rsig, sig = ksig->sig; |
287 | 274 | ||
288 | frame = get_sigframe(ka, regs, sizeof(*frame)); | 275 | frame = get_sigframe(ksig, regs, sizeof(*frame)); |
289 | 276 | ||
290 | if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) | 277 | if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) |
291 | goto give_sigsegv; | 278 | return -EFAULT; |
292 | 279 | ||
293 | rsig = sig; | 280 | rsig = sig; |
294 | if (sig < 32 && | 281 | if (sig < 32 && |
@@ -299,8 +286,8 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, | |||
299 | if (__put_user(rsig, &frame->sig) || | 286 | if (__put_user(rsig, &frame->sig) || |
300 | __put_user(&frame->info, &frame->pinfo) || | 287 | __put_user(&frame->info, &frame->pinfo) || |
301 | __put_user(&frame->uc, &frame->puc) || | 288 | __put_user(&frame->uc, &frame->puc) || |
302 | copy_siginfo_to_user(&frame->info, info)) | 289 | copy_siginfo_to_user(&frame->info, &ksig->info)) |
303 | goto give_sigsegv; | 290 | return -EFAULT; |
304 | 291 | ||
305 | /* create the ucontext. */ | 292 | /* create the ucontext. */ |
306 | if (__put_user(0, &frame->uc.uc_flags) || | 293 | if (__put_user(0, &frame->uc.uc_flags) || |
@@ -309,13 +296,14 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, | |||
309 | setup_sigcontext(&frame->uc.uc_mcontext, | 296 | setup_sigcontext(&frame->uc.uc_mcontext, |
310 | &frame->fpuctx, regs, set->sig[0]) || | 297 | &frame->fpuctx, regs, set->sig[0]) || |
311 | __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set))) | 298 | __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set))) |
312 | goto give_sigsegv; | 299 | return -EFAULT; |
313 | 300 | ||
314 | /* set up to return from userspace. If provided, use a stub already in | 301 | /* set up to return from userspace. If provided, use a stub already in |
315 | * userspace */ | 302 | * userspace */ |
316 | if (ka->sa.sa_flags & SA_RESTORER) { | 303 | if (ksig->ka.sa.sa_flags & SA_RESTORER) { |
317 | if (__put_user(ka->sa.sa_restorer, &frame->pretcode)) | 304 | if (__put_user(ksig->ka.sa.sa_restorer, &frame->pretcode)) |
318 | goto give_sigsegv; | 305 | return -EFAULT; |
306 | |||
319 | } else { | 307 | } else { |
320 | if (__put_user((void(*)(void))frame->retcode, | 308 | if (__put_user((void(*)(void))frame->retcode, |
321 | &frame->pretcode) || | 309 | &frame->pretcode) || |
@@ -326,7 +314,7 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, | |||
326 | __put_user(0x00, (char *)(frame->retcode + 2)) || | 314 | __put_user(0x00, (char *)(frame->retcode + 2)) || |
327 | __put_user(0xf0, (char *)(frame->retcode + 3)) || | 315 | __put_user(0xf0, (char *)(frame->retcode + 3)) || |
328 | __put_user(0xe0, (char *)(frame->retcode + 4))) | 316 | __put_user(0xe0, (char *)(frame->retcode + 4))) |
329 | goto give_sigsegv; | 317 | return -EFAULT; |
330 | 318 | ||
331 | flush_icache_range((u_long) frame->retcode, | 319 | flush_icache_range((u_long) frame->retcode, |
332 | (u_long) frame->retcode + 5); | 320 | (u_long) frame->retcode + 5); |
@@ -334,7 +322,7 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, | |||
334 | 322 | ||
335 | /* Set up registers for signal handler */ | 323 | /* Set up registers for signal handler */ |
336 | regs->sp = (unsigned long) frame; | 324 | regs->sp = (unsigned long) frame; |
337 | regs->pc = (unsigned long) ka->sa.sa_handler; | 325 | regs->pc = (unsigned long) ksig->ka.sa.sa_handler; |
338 | regs->d0 = sig; | 326 | regs->d0 = sig; |
339 | regs->d1 = (long) &frame->info; | 327 | regs->d1 = (long) &frame->info; |
340 | 328 | ||
@@ -345,10 +333,6 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, | |||
345 | #endif | 333 | #endif |
346 | 334 | ||
347 | return 0; | 335 | return 0; |
348 | |||
349 | give_sigsegv: | ||
350 | force_sigsegv(sig, current); | ||
351 | return -EFAULT; | ||
352 | } | 336 | } |
353 | 337 | ||
354 | static inline void stepback(struct pt_regs *regs) | 338 | static inline void stepback(struct pt_regs *regs) |
@@ -360,9 +344,7 @@ static inline void stepback(struct pt_regs *regs) | |||
360 | /* | 344 | /* |
361 | * handle the actual delivery of a signal to userspace | 345 | * handle the actual delivery of a signal to userspace |
362 | */ | 346 | */ |
363 | static int handle_signal(int sig, | 347 | static int handle_signal(struct ksignal *ksig, struct pt_regs *regs) |
364 | siginfo_t *info, struct k_sigaction *ka, | ||
365 | struct pt_regs *regs) | ||
366 | { | 348 | { |
367 | sigset_t *oldset = sigmask_to_save(); | 349 | sigset_t *oldset = sigmask_to_save(); |
368 | int ret; | 350 | int ret; |
@@ -377,7 +359,7 @@ static int handle_signal(int sig, | |||
377 | break; | 359 | break; |
378 | 360 | ||
379 | case -ERESTARTSYS: | 361 | case -ERESTARTSYS: |
380 | if (!(ka->sa.sa_flags & SA_RESTART)) { | 362 | if (!(ksig->ka.sa.sa_flags & SA_RESTART)) { |
381 | regs->d0 = -EINTR; | 363 | regs->d0 = -EINTR; |
382 | break; | 364 | break; |
383 | } | 365 | } |
@@ -390,15 +372,12 @@ static int handle_signal(int sig, | |||
390 | } | 372 | } |
391 | 373 | ||
392 | /* Set up the stack frame */ | 374 | /* Set up the stack frame */ |
393 | if (ka->sa.sa_flags & SA_SIGINFO) | 375 | if (ksig->ka.sa.sa_flags & SA_SIGINFO) |
394 | ret = setup_rt_frame(sig, ka, info, oldset, regs); | 376 | ret = setup_rt_frame(ksig, oldset, regs); |
395 | else | 377 | else |
396 | ret = setup_frame(sig, ka, oldset, regs); | 378 | ret = setup_frame(ksig, oldset, regs); |
397 | if (ret) | ||
398 | return ret; | ||
399 | 379 | ||
400 | signal_delivered(sig, info, ka, regs, | 380 | signal_setup_done(ret, ksig, test_thread_flag(TIF_SINGLESTEP)); |
401 | test_thread_flag(TIF_SINGLESTEP)); | ||
402 | return 0; | 381 | return 0; |
403 | } | 382 | } |
404 | 383 | ||
@@ -407,15 +386,10 @@ static int handle_signal(int sig, | |||
407 | */ | 386 | */ |
408 | static void do_signal(struct pt_regs *regs) | 387 | static void do_signal(struct pt_regs *regs) |
409 | { | 388 | { |
410 | struct k_sigaction ka; | 389 | struct ksignal ksig; |
411 | siginfo_t info; | ||
412 | int signr; | ||
413 | |||
414 | signr = get_signal_to_deliver(&info, &ka, regs, NULL); | ||
415 | if (signr > 0) { | ||
416 | if (handle_signal(signr, &info, &ka, regs) == 0) { | ||
417 | } | ||
418 | 390 | ||
391 | if (get_signal(&ksig)) { | ||
392 | handle_signal(&ksig, regs); | ||
419 | return; | 393 | return; |
420 | } | 394 | } |
421 | 395 | ||
diff --git a/arch/openrisc/kernel/signal.c b/arch/openrisc/kernel/signal.c index 66775bc07a8e..7d1b8235bf90 100644 --- a/arch/openrisc/kernel/signal.c +++ b/arch/openrisc/kernel/signal.c | |||
@@ -132,30 +132,16 @@ static inline unsigned long align_sigframe(unsigned long sp) | |||
132 | * or the alternate stack. | 132 | * or the alternate stack. |
133 | */ | 133 | */ |
134 | 134 | ||
135 | static inline void __user *get_sigframe(struct k_sigaction *ka, | 135 | static inline void __user *get_sigframe(struct ksignal *ksig, |
136 | struct pt_regs *regs, size_t frame_size) | 136 | struct pt_regs *regs, size_t frame_size) |
137 | { | 137 | { |
138 | unsigned long sp = regs->sp; | 138 | unsigned long sp = regs->sp; |
139 | int onsigstack = on_sig_stack(sp); | ||
140 | 139 | ||
141 | /* redzone */ | 140 | /* redzone */ |
142 | sp -= STACK_FRAME_OVERHEAD; | 141 | sp -= STACK_FRAME_OVERHEAD; |
143 | 142 | sp = sigsp(sp, ksig); | |
144 | /* This is the X/Open sanctioned signal stack switching. */ | ||
145 | if ((ka->sa.sa_flags & SA_ONSTACK) && !onsigstack) { | ||
146 | if (current->sas_ss_size) | ||
147 | sp = current->sas_ss_sp + current->sas_ss_size; | ||
148 | } | ||
149 | |||
150 | sp = align_sigframe(sp - frame_size); | 143 | sp = align_sigframe(sp - frame_size); |
151 | 144 | ||
152 | /* | ||
153 | * If we are on the alternate signal stack and would overflow it, don't. | ||
154 | * Return an always-bogus address instead so we will die with SIGSEGV. | ||
155 | */ | ||
156 | if (onsigstack && !likely(on_sig_stack(sp))) | ||
157 | return (void __user *)-1L; | ||
158 | |||
159 | return (void __user *)sp; | 145 | return (void __user *)sp; |
160 | } | 146 | } |
161 | 147 | ||
@@ -173,7 +159,7 @@ static int setup_rt_frame(struct ksignal *ksig, sigset_t *set, | |||
173 | unsigned long return_ip; | 159 | unsigned long return_ip; |
174 | int err = 0; | 160 | int err = 0; |
175 | 161 | ||
176 | frame = get_sigframe(&ksig->ka, regs, sizeof(*frame)); | 162 | frame = get_sigframe(ksig, regs, sizeof(*frame)); |
177 | 163 | ||
178 | if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) | 164 | if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) |
179 | return -EFAULT; | 165 | return -EFAULT; |
diff --git a/arch/parisc/kernel/signal.c b/arch/parisc/kernel/signal.c index 1cba8f29bb49..012d4fa63d97 100644 --- a/arch/parisc/kernel/signal.c +++ b/arch/parisc/kernel/signal.c | |||
@@ -227,8 +227,8 @@ setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs, int in_sysc | |||
227 | } | 227 | } |
228 | 228 | ||
229 | static long | 229 | static long |
230 | setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, | 230 | setup_rt_frame(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs, |
231 | sigset_t *set, struct pt_regs *regs, int in_syscall) | 231 | int in_syscall) |
232 | { | 232 | { |
233 | struct rt_sigframe __user *frame; | 233 | struct rt_sigframe __user *frame; |
234 | unsigned long rp, usp; | 234 | unsigned long rp, usp; |
@@ -241,10 +241,10 @@ setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, | |||
241 | 241 | ||
242 | usp = (regs->gr[30] & ~(0x01UL)); | 242 | usp = (regs->gr[30] & ~(0x01UL)); |
243 | /*FIXME: frame_size parameter is unused, remove it. */ | 243 | /*FIXME: frame_size parameter is unused, remove it. */ |
244 | frame = get_sigframe(ka, usp, sizeof(*frame)); | 244 | frame = get_sigframe(&ksig->ka, usp, sizeof(*frame)); |
245 | 245 | ||
246 | DBG(1,"SETUP_RT_FRAME: START\n"); | 246 | DBG(1,"SETUP_RT_FRAME: START\n"); |
247 | DBG(1,"setup_rt_frame: frame %p info %p\n", frame, info); | 247 | DBG(1,"setup_rt_frame: frame %p info %p\n", frame, ksig->info); |
248 | 248 | ||
249 | 249 | ||
250 | #ifdef CONFIG_64BIT | 250 | #ifdef CONFIG_64BIT |
@@ -253,7 +253,7 @@ setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, | |||
253 | 253 | ||
254 | if (is_compat_task()) { | 254 | if (is_compat_task()) { |
255 | DBG(1,"setup_rt_frame: frame->info = 0x%p\n", &compat_frame->info); | 255 | DBG(1,"setup_rt_frame: frame->info = 0x%p\n", &compat_frame->info); |
256 | err |= copy_siginfo_to_user32(&compat_frame->info, info); | 256 | err |= copy_siginfo_to_user32(&compat_frame->info, &ksig->info); |
257 | err |= __compat_save_altstack( &compat_frame->uc.uc_stack, regs->gr[30]); | 257 | err |= __compat_save_altstack( &compat_frame->uc.uc_stack, regs->gr[30]); |
258 | DBG(1,"setup_rt_frame: frame->uc = 0x%p\n", &compat_frame->uc); | 258 | DBG(1,"setup_rt_frame: frame->uc = 0x%p\n", &compat_frame->uc); |
259 | DBG(1,"setup_rt_frame: frame->uc.uc_mcontext = 0x%p\n", &compat_frame->uc.uc_mcontext); | 259 | DBG(1,"setup_rt_frame: frame->uc.uc_mcontext = 0x%p\n", &compat_frame->uc.uc_mcontext); |
@@ -265,7 +265,7 @@ setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, | |||
265 | #endif | 265 | #endif |
266 | { | 266 | { |
267 | DBG(1,"setup_rt_frame: frame->info = 0x%p\n", &frame->info); | 267 | DBG(1,"setup_rt_frame: frame->info = 0x%p\n", &frame->info); |
268 | err |= copy_siginfo_to_user(&frame->info, info); | 268 | err |= copy_siginfo_to_user(&frame->info, &ksig->info); |
269 | err |= __save_altstack(&frame->uc.uc_stack, regs->gr[30]); | 269 | err |= __save_altstack(&frame->uc.uc_stack, regs->gr[30]); |
270 | DBG(1,"setup_rt_frame: frame->uc = 0x%p\n", &frame->uc); | 270 | DBG(1,"setup_rt_frame: frame->uc = 0x%p\n", &frame->uc); |
271 | DBG(1,"setup_rt_frame: frame->uc.uc_mcontext = 0x%p\n", &frame->uc.uc_mcontext); | 271 | DBG(1,"setup_rt_frame: frame->uc.uc_mcontext = 0x%p\n", &frame->uc.uc_mcontext); |
@@ -275,7 +275,7 @@ setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, | |||
275 | } | 275 | } |
276 | 276 | ||
277 | if (err) | 277 | if (err) |
278 | goto give_sigsegv; | 278 | return -EFAULT; |
279 | 279 | ||
280 | /* Set up to return from userspace. If provided, use a stub | 280 | /* Set up to return from userspace. If provided, use a stub |
281 | already in userspace. The first words of tramp are used to | 281 | already in userspace. The first words of tramp are used to |
@@ -312,9 +312,9 @@ setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, | |||
312 | rp = (unsigned long) &frame->tramp[SIGRESTARTBLOCK_TRAMP]; | 312 | rp = (unsigned long) &frame->tramp[SIGRESTARTBLOCK_TRAMP]; |
313 | 313 | ||
314 | if (err) | 314 | if (err) |
315 | goto give_sigsegv; | 315 | return -EFAULT; |
316 | 316 | ||
317 | haddr = A(ka->sa.sa_handler); | 317 | haddr = A(ksig->ka.sa.sa_handler); |
318 | /* The sa_handler may be a pointer to a function descriptor */ | 318 | /* The sa_handler may be a pointer to a function descriptor */ |
319 | #ifdef CONFIG_64BIT | 319 | #ifdef CONFIG_64BIT |
320 | if (is_compat_task()) { | 320 | if (is_compat_task()) { |
@@ -326,7 +326,7 @@ setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, | |||
326 | err = __copy_from_user(&fdesc, ufdesc, sizeof(fdesc)); | 326 | err = __copy_from_user(&fdesc, ufdesc, sizeof(fdesc)); |
327 | 327 | ||
328 | if (err) | 328 | if (err) |
329 | goto give_sigsegv; | 329 | return -EFAULT; |
330 | 330 | ||
331 | haddr = fdesc.addr; | 331 | haddr = fdesc.addr; |
332 | regs->gr[19] = fdesc.gp; | 332 | regs->gr[19] = fdesc.gp; |
@@ -339,7 +339,7 @@ setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, | |||
339 | err = __copy_from_user(&fdesc, ufdesc, sizeof(fdesc)); | 339 | err = __copy_from_user(&fdesc, ufdesc, sizeof(fdesc)); |
340 | 340 | ||
341 | if (err) | 341 | if (err) |
342 | goto give_sigsegv; | 342 | return -EFAULT; |
343 | 343 | ||
344 | haddr = fdesc.addr; | 344 | haddr = fdesc.addr; |
345 | regs->gr[19] = fdesc.gp; | 345 | regs->gr[19] = fdesc.gp; |
@@ -386,7 +386,7 @@ setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, | |||
386 | } | 386 | } |
387 | 387 | ||
388 | regs->gr[2] = rp; /* userland return pointer */ | 388 | regs->gr[2] = rp; /* userland return pointer */ |
389 | regs->gr[26] = sig; /* signal number */ | 389 | regs->gr[26] = ksig->sig; /* signal number */ |
390 | 390 | ||
391 | #ifdef CONFIG_64BIT | 391 | #ifdef CONFIG_64BIT |
392 | if (is_compat_task()) { | 392 | if (is_compat_task()) { |
@@ -410,11 +410,6 @@ setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, | |||
410 | current->comm, current->pid, frame, regs->gr[30], | 410 | current->comm, current->pid, frame, regs->gr[30], |
411 | regs->iaoq[0], regs->iaoq[1], rp); | 411 | regs->iaoq[0], regs->iaoq[1], rp); |
412 | 412 | ||
413 | return 1; | ||
414 | |||
415 | give_sigsegv: | ||
416 | DBG(1,"setup_rt_frame: sending SIGSEGV\n"); | ||
417 | force_sigsegv(sig, current); | ||
418 | return 0; | 413 | return 0; |
419 | } | 414 | } |
420 | 415 | ||
@@ -423,20 +418,19 @@ give_sigsegv: | |||
423 | */ | 418 | */ |
424 | 419 | ||
425 | static void | 420 | static void |
426 | handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka, | 421 | handle_signal(struct ksignal *ksig, struct pt_regs *regs, int in_syscall) |
427 | struct pt_regs *regs, int in_syscall) | ||
428 | { | 422 | { |
423 | int ret; | ||
429 | sigset_t *oldset = sigmask_to_save(); | 424 | sigset_t *oldset = sigmask_to_save(); |
425 | |||
430 | DBG(1,"handle_signal: sig=%ld, ka=%p, info=%p, oldset=%p, regs=%p\n", | 426 | DBG(1,"handle_signal: sig=%ld, ka=%p, info=%p, oldset=%p, regs=%p\n", |
431 | sig, ka, info, oldset, regs); | 427 | ksig->sig, ksig->ka, ksig->info, oldset, regs); |
432 | 428 | ||
433 | /* Set up the stack frame */ | 429 | /* Set up the stack frame */ |
434 | if (!setup_rt_frame(sig, ka, info, oldset, regs, in_syscall)) | 430 | ret = setup_rt_frame(ksig, oldset, regs, in_syscall); |
435 | return; | ||
436 | 431 | ||
437 | signal_delivered(sig, info, ka, regs, | 432 | signal_setup_done(ret, ksig, test_thread_flag(TIF_SINGLESTEP) || |
438 | test_thread_flag(TIF_SINGLESTEP) || | 433 | test_thread_flag(TIF_BLOCKSTEP)); |
439 | test_thread_flag(TIF_BLOCKSTEP)); | ||
440 | 434 | ||
441 | DBG(1,KERN_DEBUG "do_signal: Exit (success), regs->gr[28] = %ld\n", | 435 | DBG(1,KERN_DEBUG "do_signal: Exit (success), regs->gr[28] = %ld\n", |
442 | regs->gr[28]); | 436 | regs->gr[28]); |
@@ -544,22 +538,18 @@ insert_restart_trampoline(struct pt_regs *regs) | |||
544 | asmlinkage void | 538 | asmlinkage void |
545 | do_signal(struct pt_regs *regs, long in_syscall) | 539 | do_signal(struct pt_regs *regs, long in_syscall) |
546 | { | 540 | { |
547 | siginfo_t info; | 541 | struct ksignal ksig; |
548 | struct k_sigaction ka; | ||
549 | int signr; | ||
550 | 542 | ||
551 | DBG(1,"\ndo_signal: regs=0x%p, sr7 %#lx, in_syscall=%d\n", | 543 | DBG(1,"\ndo_signal: regs=0x%p, sr7 %#lx, in_syscall=%d\n", |
552 | regs, regs->sr[7], in_syscall); | 544 | regs, regs->sr[7], in_syscall); |
553 | 545 | ||
554 | signr = get_signal_to_deliver(&info, &ka, regs, NULL); | 546 | if (get_signal(&ksig)) { |
555 | DBG(3,"do_signal: signr = %d, regs->gr[28] = %ld\n", signr, regs->gr[28]); | 547 | DBG(3,"do_signal: signr = %d, regs->gr[28] = %ld\n", signr, regs->gr[28]); |
556 | |||
557 | if (signr > 0) { | ||
558 | /* Restart a system call if necessary. */ | 548 | /* Restart a system call if necessary. */ |
559 | if (in_syscall) | 549 | if (in_syscall) |
560 | syscall_restart(regs, &ka); | 550 | syscall_restart(regs, &ksig.ka); |
561 | 551 | ||
562 | handle_signal(signr, &info, &ka, regs, in_syscall); | 552 | handle_signal(&ksig, regs, in_syscall); |
563 | return; | 553 | return; |
564 | } | 554 | } |
565 | 555 | ||
diff --git a/arch/powerpc/kernel/signal.c b/arch/powerpc/kernel/signal.c index 1c794cef2883..cf8c7e4e0b21 100644 --- a/arch/powerpc/kernel/signal.c +++ b/arch/powerpc/kernel/signal.c | |||
@@ -31,20 +31,14 @@ int show_unhandled_signals = 1; | |||
31 | /* | 31 | /* |
32 | * Allocate space for the signal frame | 32 | * Allocate space for the signal frame |
33 | */ | 33 | */ |
34 | void __user * get_sigframe(struct k_sigaction *ka, unsigned long sp, | 34 | void __user *get_sigframe(struct ksignal *ksig, unsigned long sp, |
35 | size_t frame_size, int is_32) | 35 | size_t frame_size, int is_32) |
36 | { | 36 | { |
37 | unsigned long oldsp, newsp; | 37 | unsigned long oldsp, newsp; |
38 | 38 | ||
39 | /* Default to using normal stack */ | 39 | /* Default to using normal stack */ |
40 | oldsp = get_clean_sp(sp, is_32); | 40 | oldsp = get_clean_sp(sp, is_32); |
41 | 41 | oldsp = sigsp(oldsp, ksig); | |
42 | /* Check for alt stack */ | ||
43 | if ((ka->sa.sa_flags & SA_ONSTACK) && | ||
44 | current->sas_ss_size && !on_sig_stack(oldsp)) | ||
45 | oldsp = (current->sas_ss_sp + current->sas_ss_size); | ||
46 | |||
47 | /* Get aligned frame */ | ||
48 | newsp = (oldsp - frame_size) & ~0xFUL; | 42 | newsp = (oldsp - frame_size) & ~0xFUL; |
49 | 43 | ||
50 | /* Check access */ | 44 | /* Check access */ |
@@ -105,25 +99,23 @@ static void check_syscall_restart(struct pt_regs *regs, struct k_sigaction *ka, | |||
105 | } | 99 | } |
106 | } | 100 | } |
107 | 101 | ||
108 | static int do_signal(struct pt_regs *regs) | 102 | static void do_signal(struct pt_regs *regs) |
109 | { | 103 | { |
110 | sigset_t *oldset = sigmask_to_save(); | 104 | sigset_t *oldset = sigmask_to_save(); |
111 | siginfo_t info; | 105 | struct ksignal ksig; |
112 | int signr; | ||
113 | struct k_sigaction ka; | ||
114 | int ret; | 106 | int ret; |
115 | int is32 = is_32bit_task(); | 107 | int is32 = is_32bit_task(); |
116 | 108 | ||
117 | signr = get_signal_to_deliver(&info, &ka, regs, NULL); | 109 | get_signal(&ksig); |
118 | 110 | ||
119 | /* Is there any syscall restart business here ? */ | 111 | /* Is there any syscall restart business here ? */ |
120 | check_syscall_restart(regs, &ka, signr > 0); | 112 | check_syscall_restart(regs, &ksig.ka, ksig.sig > 0); |
121 | 113 | ||
122 | if (signr <= 0) { | 114 | if (ksig.sig <= 0) { |
123 | /* No signal to deliver -- put the saved sigmask back */ | 115 | /* No signal to deliver -- put the saved sigmask back */ |
124 | restore_saved_sigmask(); | 116 | restore_saved_sigmask(); |
125 | regs->trap = 0; | 117 | regs->trap = 0; |
126 | return 0; /* no signals delivered */ | 118 | return; /* no signals delivered */ |
127 | } | 119 | } |
128 | 120 | ||
129 | #ifndef CONFIG_PPC_ADV_DEBUG_REGS | 121 | #ifndef CONFIG_PPC_ADV_DEBUG_REGS |
@@ -140,23 +132,16 @@ static int do_signal(struct pt_regs *regs) | |||
140 | thread_change_pc(current, regs); | 132 | thread_change_pc(current, regs); |
141 | 133 | ||
142 | if (is32) { | 134 | if (is32) { |
143 | if (ka.sa.sa_flags & SA_SIGINFO) | 135 | if (ksig.ka.sa.sa_flags & SA_SIGINFO) |
144 | ret = handle_rt_signal32(signr, &ka, &info, oldset, | 136 | ret = handle_rt_signal32(&ksig, oldset, regs); |
145 | regs); | ||
146 | else | 137 | else |
147 | ret = handle_signal32(signr, &ka, &info, oldset, | 138 | ret = handle_signal32(&ksig, oldset, regs); |
148 | regs); | ||
149 | } else { | 139 | } else { |
150 | ret = handle_rt_signal64(signr, &ka, &info, oldset, regs); | 140 | ret = handle_rt_signal64(&ksig, oldset, regs); |
151 | } | 141 | } |
152 | 142 | ||
153 | regs->trap = 0; | 143 | regs->trap = 0; |
154 | if (ret) { | 144 | signal_setup_done(ret, &ksig, test_thread_flag(TIF_SINGLESTEP)); |
155 | signal_delivered(signr, &info, &ka, regs, | ||
156 | test_thread_flag(TIF_SINGLESTEP)); | ||
157 | } | ||
158 | |||
159 | return ret; | ||
160 | } | 145 | } |
161 | 146 | ||
162 | void do_notify_resume(struct pt_regs *regs, unsigned long thread_info_flags) | 147 | void do_notify_resume(struct pt_regs *regs, unsigned long thread_info_flags) |
diff --git a/arch/powerpc/kernel/signal.h b/arch/powerpc/kernel/signal.h index c69b9aeb9f23..51b274199dd9 100644 --- a/arch/powerpc/kernel/signal.h +++ b/arch/powerpc/kernel/signal.h | |||
@@ -12,15 +12,13 @@ | |||
12 | 12 | ||
13 | extern void do_notify_resume(struct pt_regs *regs, unsigned long thread_info_flags); | 13 | extern void do_notify_resume(struct pt_regs *regs, unsigned long thread_info_flags); |
14 | 14 | ||
15 | extern void __user * get_sigframe(struct k_sigaction *ka, unsigned long sp, | 15 | extern void __user *get_sigframe(struct ksignal *ksig, unsigned long sp, |
16 | size_t frame_size, int is_32); | 16 | size_t frame_size, int is_32); |
17 | 17 | ||
18 | extern int handle_signal32(unsigned long sig, struct k_sigaction *ka, | 18 | extern int handle_signal32(struct ksignal *ksig, sigset_t *oldset, |
19 | siginfo_t *info, sigset_t *oldset, | ||
20 | struct pt_regs *regs); | 19 | struct pt_regs *regs); |
21 | 20 | ||
22 | extern int handle_rt_signal32(unsigned long sig, struct k_sigaction *ka, | 21 | extern int handle_rt_signal32(struct ksignal *ksig, sigset_t *oldset, |
23 | siginfo_t *info, sigset_t *oldset, | ||
24 | struct pt_regs *regs); | 22 | struct pt_regs *regs); |
25 | 23 | ||
26 | extern unsigned long copy_fpr_to_user(void __user *to, | 24 | extern unsigned long copy_fpr_to_user(void __user *to, |
@@ -44,14 +42,12 @@ extern unsigned long copy_transact_vsx_from_user(struct task_struct *task, | |||
44 | 42 | ||
45 | #ifdef CONFIG_PPC64 | 43 | #ifdef CONFIG_PPC64 |
46 | 44 | ||
47 | extern int handle_rt_signal64(int signr, struct k_sigaction *ka, | 45 | extern int handle_rt_signal64(struct ksignal *ksig, sigset_t *set, |
48 | siginfo_t *info, sigset_t *set, | ||
49 | struct pt_regs *regs); | 46 | struct pt_regs *regs); |
50 | 47 | ||
51 | #else /* CONFIG_PPC64 */ | 48 | #else /* CONFIG_PPC64 */ |
52 | 49 | ||
53 | static inline int handle_rt_signal64(int signr, struct k_sigaction *ka, | 50 | static inline int handle_rt_signal64(struct ksignal *ksig, sigset_t *set, |
54 | siginfo_t *info, sigset_t *set, | ||
55 | struct pt_regs *regs) | 51 | struct pt_regs *regs) |
56 | { | 52 | { |
57 | return -EFAULT; | 53 | return -EFAULT; |
diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c index 1bc5a1755ed4..b171001698ff 100644 --- a/arch/powerpc/kernel/signal_32.c +++ b/arch/powerpc/kernel/signal_32.c | |||
@@ -981,9 +981,8 @@ int copy_siginfo_from_user32(siginfo_t *to, struct compat_siginfo __user *from) | |||
981 | * Set up a signal frame for a "real-time" signal handler | 981 | * Set up a signal frame for a "real-time" signal handler |
982 | * (one which gets siginfo). | 982 | * (one which gets siginfo). |
983 | */ | 983 | */ |
984 | int handle_rt_signal32(unsigned long sig, struct k_sigaction *ka, | 984 | int handle_rt_signal32(struct ksignal *ksig, sigset_t *oldset, |
985 | siginfo_t *info, sigset_t *oldset, | 985 | struct pt_regs *regs) |
986 | struct pt_regs *regs) | ||
987 | { | 986 | { |
988 | struct rt_sigframe __user *rt_sf; | 987 | struct rt_sigframe __user *rt_sf; |
989 | struct mcontext __user *frame; | 988 | struct mcontext __user *frame; |
@@ -995,13 +994,13 @@ int handle_rt_signal32(unsigned long sig, struct k_sigaction *ka, | |||
995 | 994 | ||
996 | /* Set up Signal Frame */ | 995 | /* Set up Signal Frame */ |
997 | /* Put a Real Time Context onto stack */ | 996 | /* Put a Real Time Context onto stack */ |
998 | rt_sf = get_sigframe(ka, get_tm_stackpointer(regs), sizeof(*rt_sf), 1); | 997 | rt_sf = get_sigframe(ksig, get_tm_stackpointer(regs), sizeof(*rt_sf), 1); |
999 | addr = rt_sf; | 998 | addr = rt_sf; |
1000 | if (unlikely(rt_sf == NULL)) | 999 | if (unlikely(rt_sf == NULL)) |
1001 | goto badframe; | 1000 | goto badframe; |
1002 | 1001 | ||
1003 | /* Put the siginfo & fill in most of the ucontext */ | 1002 | /* Put the siginfo & fill in most of the ucontext */ |
1004 | if (copy_siginfo_to_user(&rt_sf->info, info) | 1003 | if (copy_siginfo_to_user(&rt_sf->info, &ksig->info) |
1005 | || __put_user(0, &rt_sf->uc.uc_flags) | 1004 | || __put_user(0, &rt_sf->uc.uc_flags) |
1006 | || __save_altstack(&rt_sf->uc.uc_stack, regs->gpr[1]) | 1005 | || __save_altstack(&rt_sf->uc.uc_stack, regs->gpr[1]) |
1007 | || __put_user(to_user_ptr(&rt_sf->uc.uc_mcontext), | 1006 | || __put_user(to_user_ptr(&rt_sf->uc.uc_mcontext), |
@@ -1051,15 +1050,15 @@ int handle_rt_signal32(unsigned long sig, struct k_sigaction *ka, | |||
1051 | 1050 | ||
1052 | /* Fill registers for signal handler */ | 1051 | /* Fill registers for signal handler */ |
1053 | regs->gpr[1] = newsp; | 1052 | regs->gpr[1] = newsp; |
1054 | regs->gpr[3] = sig; | 1053 | regs->gpr[3] = ksig->sig; |
1055 | regs->gpr[4] = (unsigned long) &rt_sf->info; | 1054 | regs->gpr[4] = (unsigned long) &rt_sf->info; |
1056 | regs->gpr[5] = (unsigned long) &rt_sf->uc; | 1055 | regs->gpr[5] = (unsigned long) &rt_sf->uc; |
1057 | regs->gpr[6] = (unsigned long) rt_sf; | 1056 | regs->gpr[6] = (unsigned long) rt_sf; |
1058 | regs->nip = (unsigned long) ka->sa.sa_handler; | 1057 | regs->nip = (unsigned long) ksig->ka.sa.sa_handler; |
1059 | /* enter the signal handler in native-endian mode */ | 1058 | /* enter the signal handler in native-endian mode */ |
1060 | regs->msr &= ~MSR_LE; | 1059 | regs->msr &= ~MSR_LE; |
1061 | regs->msr |= (MSR_KERNEL & MSR_LE); | 1060 | regs->msr |= (MSR_KERNEL & MSR_LE); |
1062 | return 1; | 1061 | return 0; |
1063 | 1062 | ||
1064 | badframe: | 1063 | badframe: |
1065 | if (show_unhandled_signals) | 1064 | if (show_unhandled_signals) |
@@ -1069,8 +1068,7 @@ badframe: | |||
1069 | current->comm, current->pid, | 1068 | current->comm, current->pid, |
1070 | addr, regs->nip, regs->link); | 1069 | addr, regs->nip, regs->link); |
1071 | 1070 | ||
1072 | force_sigsegv(sig, current); | 1071 | return 1; |
1073 | return 0; | ||
1074 | } | 1072 | } |
1075 | 1073 | ||
1076 | static int do_setcontext(struct ucontext __user *ucp, struct pt_regs *regs, int sig) | 1074 | static int do_setcontext(struct ucontext __user *ucp, struct pt_regs *regs, int sig) |
@@ -1409,8 +1407,7 @@ int sys_debug_setcontext(struct ucontext __user *ctx, | |||
1409 | /* | 1407 | /* |
1410 | * OK, we're invoking a handler | 1408 | * OK, we're invoking a handler |
1411 | */ | 1409 | */ |
1412 | int handle_signal32(unsigned long sig, struct k_sigaction *ka, | 1410 | int handle_signal32(struct ksignal *ksig, sigset_t *oldset, struct pt_regs *regs) |
1413 | siginfo_t *info, sigset_t *oldset, struct pt_regs *regs) | ||
1414 | { | 1411 | { |
1415 | struct sigcontext __user *sc; | 1412 | struct sigcontext __user *sc; |
1416 | struct sigframe __user *frame; | 1413 | struct sigframe __user *frame; |
@@ -1420,7 +1417,7 @@ int handle_signal32(unsigned long sig, struct k_sigaction *ka, | |||
1420 | unsigned long tramp; | 1417 | unsigned long tramp; |
1421 | 1418 | ||
1422 | /* Set up Signal Frame */ | 1419 | /* Set up Signal Frame */ |
1423 | frame = get_sigframe(ka, get_tm_stackpointer(regs), sizeof(*frame), 1); | 1420 | frame = get_sigframe(ksig, get_tm_stackpointer(regs), sizeof(*frame), 1); |
1424 | if (unlikely(frame == NULL)) | 1421 | if (unlikely(frame == NULL)) |
1425 | goto badframe; | 1422 | goto badframe; |
1426 | sc = (struct sigcontext __user *) &frame->sctx; | 1423 | sc = (struct sigcontext __user *) &frame->sctx; |
@@ -1428,7 +1425,7 @@ int handle_signal32(unsigned long sig, struct k_sigaction *ka, | |||
1428 | #if _NSIG != 64 | 1425 | #if _NSIG != 64 |
1429 | #error "Please adjust handle_signal()" | 1426 | #error "Please adjust handle_signal()" |
1430 | #endif | 1427 | #endif |
1431 | if (__put_user(to_user_ptr(ka->sa.sa_handler), &sc->handler) | 1428 | if (__put_user(to_user_ptr(ksig->ka.sa.sa_handler), &sc->handler) |
1432 | || __put_user(oldset->sig[0], &sc->oldmask) | 1429 | || __put_user(oldset->sig[0], &sc->oldmask) |
1433 | #ifdef CONFIG_PPC64 | 1430 | #ifdef CONFIG_PPC64 |
1434 | || __put_user((oldset->sig[0] >> 32), &sc->_unused[3]) | 1431 | || __put_user((oldset->sig[0] >> 32), &sc->_unused[3]) |
@@ -1436,7 +1433,7 @@ int handle_signal32(unsigned long sig, struct k_sigaction *ka, | |||
1436 | || __put_user(oldset->sig[1], &sc->_unused[3]) | 1433 | || __put_user(oldset->sig[1], &sc->_unused[3]) |
1437 | #endif | 1434 | #endif |
1438 | || __put_user(to_user_ptr(&frame->mctx), &sc->regs) | 1435 | || __put_user(to_user_ptr(&frame->mctx), &sc->regs) |
1439 | || __put_user(sig, &sc->signal)) | 1436 | || __put_user(ksig->sig, &sc->signal)) |
1440 | goto badframe; | 1437 | goto badframe; |
1441 | 1438 | ||
1442 | if (vdso32_sigtramp && current->mm->context.vdso_base) { | 1439 | if (vdso32_sigtramp && current->mm->context.vdso_base) { |
@@ -1471,12 +1468,12 @@ int handle_signal32(unsigned long sig, struct k_sigaction *ka, | |||
1471 | goto badframe; | 1468 | goto badframe; |
1472 | 1469 | ||
1473 | regs->gpr[1] = newsp; | 1470 | regs->gpr[1] = newsp; |
1474 | regs->gpr[3] = sig; | 1471 | regs->gpr[3] = ksig->sig; |
1475 | regs->gpr[4] = (unsigned long) sc; | 1472 | regs->gpr[4] = (unsigned long) sc; |
1476 | regs->nip = (unsigned long) ka->sa.sa_handler; | 1473 | regs->nip = (unsigned long) (unsigned long)ksig->ka.sa.sa_handler; |
1477 | /* enter the signal handler in big-endian mode */ | 1474 | /* enter the signal handler in big-endian mode */ |
1478 | regs->msr &= ~MSR_LE; | 1475 | regs->msr &= ~MSR_LE; |
1479 | return 1; | 1476 | return 0; |
1480 | 1477 | ||
1481 | badframe: | 1478 | badframe: |
1482 | if (show_unhandled_signals) | 1479 | if (show_unhandled_signals) |
@@ -1486,8 +1483,7 @@ badframe: | |||
1486 | current->comm, current->pid, | 1483 | current->comm, current->pid, |
1487 | frame, regs->nip, regs->link); | 1484 | frame, regs->nip, regs->link); |
1488 | 1485 | ||
1489 | force_sigsegv(sig, current); | 1486 | return 1; |
1490 | return 0; | ||
1491 | } | 1487 | } |
1492 | 1488 | ||
1493 | /* | 1489 | /* |
diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c index 97c1e4b683fc..2cb0c94cafa5 100644 --- a/arch/powerpc/kernel/signal_64.c +++ b/arch/powerpc/kernel/signal_64.c | |||
@@ -708,20 +708,19 @@ badframe: | |||
708 | return 0; | 708 | return 0; |
709 | } | 709 | } |
710 | 710 | ||
711 | int handle_rt_signal64(int signr, struct k_sigaction *ka, siginfo_t *info, | 711 | int handle_rt_signal64(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs) |
712 | sigset_t *set, struct pt_regs *regs) | ||
713 | { | 712 | { |
714 | struct rt_sigframe __user *frame; | 713 | struct rt_sigframe __user *frame; |
715 | unsigned long newsp = 0; | 714 | unsigned long newsp = 0; |
716 | long err = 0; | 715 | long err = 0; |
717 | 716 | ||
718 | frame = get_sigframe(ka, get_tm_stackpointer(regs), sizeof(*frame), 0); | 717 | frame = get_sigframe(ksig, get_tm_stackpointer(regs), sizeof(*frame), 0); |
719 | if (unlikely(frame == NULL)) | 718 | if (unlikely(frame == NULL)) |
720 | goto badframe; | 719 | goto badframe; |
721 | 720 | ||
722 | err |= __put_user(&frame->info, &frame->pinfo); | 721 | err |= __put_user(&frame->info, &frame->pinfo); |
723 | err |= __put_user(&frame->uc, &frame->puc); | 722 | err |= __put_user(&frame->uc, &frame->puc); |
724 | err |= copy_siginfo_to_user(&frame->info, info); | 723 | err |= copy_siginfo_to_user(&frame->info, &ksig->info); |
725 | if (err) | 724 | if (err) |
726 | goto badframe; | 725 | goto badframe; |
727 | 726 | ||
@@ -736,15 +735,15 @@ int handle_rt_signal64(int signr, struct k_sigaction *ka, siginfo_t *info, | |||
736 | err |= __put_user(&frame->uc_transact, &frame->uc.uc_link); | 735 | err |= __put_user(&frame->uc_transact, &frame->uc.uc_link); |
737 | err |= setup_tm_sigcontexts(&frame->uc.uc_mcontext, | 736 | err |= setup_tm_sigcontexts(&frame->uc.uc_mcontext, |
738 | &frame->uc_transact.uc_mcontext, | 737 | &frame->uc_transact.uc_mcontext, |
739 | regs, signr, | 738 | regs, ksig->sig, |
740 | NULL, | 739 | NULL, |
741 | (unsigned long)ka->sa.sa_handler); | 740 | (unsigned long)ksig->ka.sa.sa_handler); |
742 | } else | 741 | } else |
743 | #endif | 742 | #endif |
744 | { | 743 | { |
745 | err |= __put_user(0, &frame->uc.uc_link); | 744 | err |= __put_user(0, &frame->uc.uc_link); |
746 | err |= setup_sigcontext(&frame->uc.uc_mcontext, regs, signr, | 745 | err |= setup_sigcontext(&frame->uc.uc_mcontext, regs, ksig->sig, |
747 | NULL, (unsigned long)ka->sa.sa_handler, | 746 | NULL, (unsigned long)ksig->ka.sa.sa_handler, |
748 | 1); | 747 | 1); |
749 | } | 748 | } |
750 | err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)); | 749 | err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)); |
@@ -770,7 +769,7 @@ int handle_rt_signal64(int signr, struct k_sigaction *ka, siginfo_t *info, | |||
770 | 769 | ||
771 | /* Set up "regs" so we "return" to the signal handler. */ | 770 | /* Set up "regs" so we "return" to the signal handler. */ |
772 | if (is_elf2_task()) { | 771 | if (is_elf2_task()) { |
773 | regs->nip = (unsigned long) ka->sa.sa_handler; | 772 | regs->nip = (unsigned long) ksig->ka.sa.sa_handler; |
774 | regs->gpr[12] = regs->nip; | 773 | regs->gpr[12] = regs->nip; |
775 | } else { | 774 | } else { |
776 | /* Handler is *really* a pointer to the function descriptor for | 775 | /* Handler is *really* a pointer to the function descriptor for |
@@ -779,7 +778,7 @@ int handle_rt_signal64(int signr, struct k_sigaction *ka, siginfo_t *info, | |||
779 | * entry is the TOC value we need to use. | 778 | * entry is the TOC value we need to use. |
780 | */ | 779 | */ |
781 | func_descr_t __user *funct_desc_ptr = | 780 | func_descr_t __user *funct_desc_ptr = |
782 | (func_descr_t __user *) ka->sa.sa_handler; | 781 | (func_descr_t __user *) ksig->ka.sa.sa_handler; |
783 | 782 | ||
784 | err |= get_user(regs->nip, &funct_desc_ptr->entry); | 783 | err |= get_user(regs->nip, &funct_desc_ptr->entry); |
785 | err |= get_user(regs->gpr[2], &funct_desc_ptr->toc); | 784 | err |= get_user(regs->gpr[2], &funct_desc_ptr->toc); |
@@ -789,9 +788,9 @@ int handle_rt_signal64(int signr, struct k_sigaction *ka, siginfo_t *info, | |||
789 | regs->msr &= ~MSR_LE; | 788 | regs->msr &= ~MSR_LE; |
790 | regs->msr |= (MSR_KERNEL & MSR_LE); | 789 | regs->msr |= (MSR_KERNEL & MSR_LE); |
791 | regs->gpr[1] = newsp; | 790 | regs->gpr[1] = newsp; |
792 | regs->gpr[3] = signr; | 791 | regs->gpr[3] = ksig->sig; |
793 | regs->result = 0; | 792 | regs->result = 0; |
794 | if (ka->sa.sa_flags & SA_SIGINFO) { | 793 | if (ksig->ka.sa.sa_flags & SA_SIGINFO) { |
795 | err |= get_user(regs->gpr[4], (unsigned long __user *)&frame->pinfo); | 794 | err |= get_user(regs->gpr[4], (unsigned long __user *)&frame->pinfo); |
796 | err |= get_user(regs->gpr[5], (unsigned long __user *)&frame->puc); | 795 | err |= get_user(regs->gpr[5], (unsigned long __user *)&frame->puc); |
797 | regs->gpr[6] = (unsigned long) frame; | 796 | regs->gpr[6] = (unsigned long) frame; |
@@ -801,7 +800,7 @@ int handle_rt_signal64(int signr, struct k_sigaction *ka, siginfo_t *info, | |||
801 | if (err) | 800 | if (err) |
802 | goto badframe; | 801 | goto badframe; |
803 | 802 | ||
804 | return 1; | 803 | return 0; |
805 | 804 | ||
806 | badframe: | 805 | badframe: |
807 | if (show_unhandled_signals) | 806 | if (show_unhandled_signals) |
@@ -809,6 +808,5 @@ badframe: | |||
809 | current->comm, current->pid, "setup_rt_frame", | 808 | current->comm, current->pid, "setup_rt_frame", |
810 | (long)frame, regs->nip, regs->link); | 809 | (long)frame, regs->nip, regs->link); |
811 | 810 | ||
812 | force_sigsegv(signr, current); | 811 | return 1; |
813 | return 0; | ||
814 | } | 812 | } |
diff --git a/arch/s390/kernel/compat_signal.c b/arch/s390/kernel/compat_signal.c index f204d6920368..598b0b42668b 100644 --- a/arch/s390/kernel/compat_signal.c +++ b/arch/s390/kernel/compat_signal.c | |||
@@ -320,38 +320,39 @@ static inline int map_signal(int sig) | |||
320 | return sig; | 320 | return sig; |
321 | } | 321 | } |
322 | 322 | ||
323 | static int setup_frame32(int sig, struct k_sigaction *ka, | 323 | static int setup_frame32(struct ksignal *ksig, sigset_t *set, |
324 | sigset_t *set, struct pt_regs * regs) | 324 | struct pt_regs *regs) |
325 | { | 325 | { |
326 | sigframe32 __user *frame = get_sigframe(ka, regs, sizeof(sigframe32)); | 326 | int sig = ksig->sig; |
327 | sigframe32 __user *frame = get_sigframe(&ksig->ka, regs, sizeof(sigframe32)); | ||
327 | 328 | ||
328 | if (frame == (void __user *) -1UL) | 329 | if (frame == (void __user *) -1UL) |
329 | goto give_sigsegv; | 330 | return -EFAULT; |
330 | 331 | ||
331 | if (__copy_to_user(&frame->sc.oldmask, &set->sig, _SIGMASK_COPY_SIZE32)) | 332 | if (__copy_to_user(&frame->sc.oldmask, &set->sig, _SIGMASK_COPY_SIZE32)) |
332 | goto give_sigsegv; | 333 | return -EFAULT; |
333 | 334 | ||
334 | if (save_sigregs32(regs, &frame->sregs)) | 335 | if (save_sigregs32(regs, &frame->sregs)) |
335 | goto give_sigsegv; | 336 | return -EFAULT; |
336 | if (save_sigregs_gprs_high(regs, frame->gprs_high)) | 337 | if (save_sigregs_gprs_high(regs, frame->gprs_high)) |
337 | goto give_sigsegv; | 338 | return -EFAULT; |
338 | if (__put_user((unsigned long) &frame->sregs, &frame->sc.sregs)) | 339 | if (__put_user((unsigned long) &frame->sregs, &frame->sc.sregs)) |
339 | goto give_sigsegv; | 340 | return -EFAULT; |
340 | 341 | ||
341 | /* Set up to return from userspace. If provided, use a stub | 342 | /* Set up to return from userspace. If provided, use a stub |
342 | already in userspace. */ | 343 | already in userspace. */ |
343 | if (ka->sa.sa_flags & SA_RESTORER) { | 344 | if (ksig->ka.sa.sa_flags & SA_RESTORER) { |
344 | regs->gprs[14] = (__u64 __force) ka->sa.sa_restorer | PSW32_ADDR_AMODE; | 345 | regs->gprs[14] = (__u64 __force) ksig->ka.sa.sa_restorer | PSW32_ADDR_AMODE; |
345 | } else { | 346 | } else { |
346 | regs->gprs[14] = (__u64 __force) frame->retcode | PSW32_ADDR_AMODE; | 347 | regs->gprs[14] = (__u64 __force) frame->retcode | PSW32_ADDR_AMODE; |
347 | if (__put_user(S390_SYSCALL_OPCODE | __NR_sigreturn, | 348 | if (__put_user(S390_SYSCALL_OPCODE | __NR_sigreturn, |
348 | (u16 __force __user *)(frame->retcode))) | 349 | (u16 __force __user *)(frame->retcode))) |
349 | goto give_sigsegv; | 350 | return -EFAULT; |
350 | } | 351 | } |
351 | 352 | ||
352 | /* Set up backchain. */ | 353 | /* Set up backchain. */ |
353 | if (__put_user(regs->gprs[15], (unsigned int __user *) frame)) | 354 | if (__put_user(regs->gprs[15], (unsigned int __user *) frame)) |
354 | goto give_sigsegv; | 355 | return -EFAULT; |
355 | 356 | ||
356 | /* Set up registers for signal handler */ | 357 | /* Set up registers for signal handler */ |
357 | regs->gprs[15] = (__force __u64) frame; | 358 | regs->gprs[15] = (__force __u64) frame; |
@@ -359,7 +360,7 @@ static int setup_frame32(int sig, struct k_sigaction *ka, | |||
359 | regs->psw.mask = PSW_MASK_BA | | 360 | regs->psw.mask = PSW_MASK_BA | |
360 | (PSW_USER_BITS & PSW_MASK_ASC) | | 361 | (PSW_USER_BITS & PSW_MASK_ASC) | |
361 | (regs->psw.mask & ~PSW_MASK_ASC); | 362 | (regs->psw.mask & ~PSW_MASK_ASC); |
362 | regs->psw.addr = (__force __u64) ka->sa.sa_handler; | 363 | regs->psw.addr = (__force __u64) ksig->ka.sa.sa_handler; |
363 | 364 | ||
364 | regs->gprs[2] = map_signal(sig); | 365 | regs->gprs[2] = map_signal(sig); |
365 | regs->gprs[3] = (__force __u64) &frame->sc; | 366 | regs->gprs[3] = (__force __u64) &frame->sc; |
@@ -376,25 +377,21 @@ static int setup_frame32(int sig, struct k_sigaction *ka, | |||
376 | 377 | ||
377 | /* Place signal number on stack to allow backtrace from handler. */ | 378 | /* Place signal number on stack to allow backtrace from handler. */ |
378 | if (__put_user(regs->gprs[2], (int __force __user *) &frame->signo)) | 379 | if (__put_user(regs->gprs[2], (int __force __user *) &frame->signo)) |
379 | goto give_sigsegv; | 380 | return -EFAULT; |
380 | return 0; | 381 | return 0; |
381 | |||
382 | give_sigsegv: | ||
383 | force_sigsegv(sig, current); | ||
384 | return -EFAULT; | ||
385 | } | 382 | } |
386 | 383 | ||
387 | static int setup_rt_frame32(int sig, struct k_sigaction *ka, siginfo_t *info, | 384 | static int setup_rt_frame32(struct ksignal *ksig, sigset_t *set, |
388 | sigset_t *set, struct pt_regs * regs) | 385 | struct pt_regs *regs) |
389 | { | 386 | { |
390 | int err = 0; | 387 | int err = 0; |
391 | rt_sigframe32 __user *frame = get_sigframe(ka, regs, sizeof(rt_sigframe32)); | 388 | rt_sigframe32 __user *frame = get_sigframe(&ksig->ka, regs, sizeof(rt_sigframe32)); |
392 | 389 | ||
393 | if (frame == (void __user *) -1UL) | 390 | if (frame == (void __user *) -1UL) |
394 | goto give_sigsegv; | 391 | return -EFAULT; |
395 | 392 | ||
396 | if (copy_siginfo_to_user32(&frame->info, info)) | 393 | if (copy_siginfo_to_user32(&frame->info, &ksig->info)) |
397 | goto give_sigsegv; | 394 | return -EFAULT; |
398 | 395 | ||
399 | /* Create the ucontext. */ | 396 | /* Create the ucontext. */ |
400 | err |= __put_user(UC_EXTENDED, &frame->uc.uc_flags); | 397 | err |= __put_user(UC_EXTENDED, &frame->uc.uc_flags); |
@@ -404,22 +401,22 @@ static int setup_rt_frame32(int sig, struct k_sigaction *ka, siginfo_t *info, | |||
404 | err |= save_sigregs_gprs_high(regs, frame->gprs_high); | 401 | err |= save_sigregs_gprs_high(regs, frame->gprs_high); |
405 | err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)); | 402 | err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)); |
406 | if (err) | 403 | if (err) |
407 | goto give_sigsegv; | 404 | return -EFAULT; |
408 | 405 | ||
409 | /* Set up to return from userspace. If provided, use a stub | 406 | /* Set up to return from userspace. If provided, use a stub |
410 | already in userspace. */ | 407 | already in userspace. */ |
411 | if (ka->sa.sa_flags & SA_RESTORER) { | 408 | if (ksig->ka.sa.sa_flags & SA_RESTORER) { |
412 | regs->gprs[14] = (__u64 __force) ka->sa.sa_restorer | PSW32_ADDR_AMODE; | 409 | regs->gprs[14] = (__u64 __force) ksig->ka.sa.sa_restorer | PSW32_ADDR_AMODE; |
413 | } else { | 410 | } else { |
414 | regs->gprs[14] = (__u64 __force) frame->retcode | PSW32_ADDR_AMODE; | 411 | regs->gprs[14] = (__u64 __force) frame->retcode | PSW32_ADDR_AMODE; |
415 | if (__put_user(S390_SYSCALL_OPCODE | __NR_rt_sigreturn, | 412 | if (__put_user(S390_SYSCALL_OPCODE | __NR_rt_sigreturn, |
416 | (u16 __force __user *)(frame->retcode))) | 413 | (u16 __force __user *)(frame->retcode))) |
417 | goto give_sigsegv; | 414 | return -EFAULT; |
418 | } | 415 | } |
419 | 416 | ||
420 | /* Set up backchain. */ | 417 | /* Set up backchain. */ |
421 | if (__put_user(regs->gprs[15], (unsigned int __force __user *) frame)) | 418 | if (__put_user(regs->gprs[15], (unsigned int __force __user *) frame)) |
422 | goto give_sigsegv; | 419 | return -EFAULT; |
423 | 420 | ||
424 | /* Set up registers for signal handler */ | 421 | /* Set up registers for signal handler */ |
425 | regs->gprs[15] = (__force __u64) frame; | 422 | regs->gprs[15] = (__force __u64) frame; |
@@ -427,36 +424,30 @@ static int setup_rt_frame32(int sig, struct k_sigaction *ka, siginfo_t *info, | |||
427 | regs->psw.mask = PSW_MASK_BA | | 424 | regs->psw.mask = PSW_MASK_BA | |
428 | (PSW_USER_BITS & PSW_MASK_ASC) | | 425 | (PSW_USER_BITS & PSW_MASK_ASC) | |
429 | (regs->psw.mask & ~PSW_MASK_ASC); | 426 | (regs->psw.mask & ~PSW_MASK_ASC); |
430 | regs->psw.addr = (__u64 __force) ka->sa.sa_handler; | 427 | regs->psw.addr = (__u64 __force) ksig->ka.sa.sa_handler; |
431 | 428 | ||
432 | regs->gprs[2] = map_signal(sig); | 429 | regs->gprs[2] = map_signal(ksig->sig); |
433 | regs->gprs[3] = (__force __u64) &frame->info; | 430 | regs->gprs[3] = (__force __u64) &frame->info; |
434 | regs->gprs[4] = (__force __u64) &frame->uc; | 431 | regs->gprs[4] = (__force __u64) &frame->uc; |
435 | regs->gprs[5] = task_thread_info(current)->last_break; | 432 | regs->gprs[5] = task_thread_info(current)->last_break; |
436 | return 0; | 433 | return 0; |
437 | |||
438 | give_sigsegv: | ||
439 | force_sigsegv(sig, current); | ||
440 | return -EFAULT; | ||
441 | } | 434 | } |
442 | 435 | ||
443 | /* | 436 | /* |
444 | * OK, we're invoking a handler | 437 | * OK, we're invoking a handler |
445 | */ | 438 | */ |
446 | 439 | ||
447 | void handle_signal32(unsigned long sig, struct k_sigaction *ka, | 440 | void handle_signal32(struct ksignal *ksig, sigset_t *oldset, |
448 | siginfo_t *info, sigset_t *oldset, struct pt_regs *regs) | 441 | struct pt_regs *regs) |
449 | { | 442 | { |
450 | int ret; | 443 | int ret; |
451 | 444 | ||
452 | /* Set up the stack frame */ | 445 | /* Set up the stack frame */ |
453 | if (ka->sa.sa_flags & SA_SIGINFO) | 446 | if (ksig->ka.sa.sa_flags & SA_SIGINFO) |
454 | ret = setup_rt_frame32(sig, ka, info, oldset, regs); | 447 | ret = setup_rt_frame32(ksig, oldset, regs); |
455 | else | 448 | else |
456 | ret = setup_frame32(sig, ka, oldset, regs); | 449 | ret = setup_frame32(ksig, oldset, regs); |
457 | if (ret) | 450 | |
458 | return; | 451 | signal_setup_done(ret, ksig, test_thread_flag(TIF_SINGLE_STEP)); |
459 | signal_delivered(sig, info, ka, regs, | ||
460 | test_thread_flag(TIF_SINGLE_STEP)); | ||
461 | } | 452 | } |
462 | 453 | ||
diff --git a/arch/s390/kernel/entry.h b/arch/s390/kernel/entry.h index 6ac78192455f..1aad48398d06 100644 --- a/arch/s390/kernel/entry.h +++ b/arch/s390/kernel/entry.h | |||
@@ -48,8 +48,8 @@ void do_per_trap(struct pt_regs *regs); | |||
48 | void syscall_trace(struct pt_regs *regs, int entryexit); | 48 | void syscall_trace(struct pt_regs *regs, int entryexit); |
49 | void kernel_stack_overflow(struct pt_regs * regs); | 49 | void kernel_stack_overflow(struct pt_regs * regs); |
50 | void do_signal(struct pt_regs *regs); | 50 | void do_signal(struct pt_regs *regs); |
51 | void handle_signal32(unsigned long sig, struct k_sigaction *ka, | 51 | void handle_signal32(struct ksignal *ksig, sigset_t *oldset, |
52 | siginfo_t *info, sigset_t *oldset, struct pt_regs *regs); | 52 | struct pt_regs *regs); |
53 | void do_notify_resume(struct pt_regs *regs); | 53 | void do_notify_resume(struct pt_regs *regs); |
54 | 54 | ||
55 | void __init init_IRQ(void); | 55 | void __init init_IRQ(void); |
diff --git a/arch/s390/kernel/signal.c b/arch/s390/kernel/signal.c index 42b49f9e19bf..469c4c6d9182 100644 --- a/arch/s390/kernel/signal.c +++ b/arch/s390/kernel/signal.c | |||
@@ -200,15 +200,15 @@ static int setup_frame(int sig, struct k_sigaction *ka, | |||
200 | frame = get_sigframe(ka, regs, sizeof(sigframe)); | 200 | frame = get_sigframe(ka, regs, sizeof(sigframe)); |
201 | 201 | ||
202 | if (frame == (void __user *) -1UL) | 202 | if (frame == (void __user *) -1UL) |
203 | goto give_sigsegv; | 203 | return -EFAULT; |
204 | 204 | ||
205 | if (__copy_to_user(&frame->sc.oldmask, &set->sig, _SIGMASK_COPY_SIZE)) | 205 | if (__copy_to_user(&frame->sc.oldmask, &set->sig, _SIGMASK_COPY_SIZE)) |
206 | goto give_sigsegv; | 206 | return -EFAULT; |
207 | 207 | ||
208 | if (save_sigregs(regs, &frame->sregs)) | 208 | if (save_sigregs(regs, &frame->sregs)) |
209 | goto give_sigsegv; | 209 | return -EFAULT; |
210 | if (__put_user(&frame->sregs, &frame->sc.sregs)) | 210 | if (__put_user(&frame->sregs, &frame->sc.sregs)) |
211 | goto give_sigsegv; | 211 | return -EFAULT; |
212 | 212 | ||
213 | /* Set up to return from userspace. If provided, use a stub | 213 | /* Set up to return from userspace. If provided, use a stub |
214 | already in userspace. */ | 214 | already in userspace. */ |
@@ -220,12 +220,12 @@ static int setup_frame(int sig, struct k_sigaction *ka, | |||
220 | frame->retcode | PSW_ADDR_AMODE; | 220 | frame->retcode | PSW_ADDR_AMODE; |
221 | if (__put_user(S390_SYSCALL_OPCODE | __NR_sigreturn, | 221 | if (__put_user(S390_SYSCALL_OPCODE | __NR_sigreturn, |
222 | (u16 __user *)(frame->retcode))) | 222 | (u16 __user *)(frame->retcode))) |
223 | goto give_sigsegv; | 223 | return -EFAULT; |
224 | } | 224 | } |
225 | 225 | ||
226 | /* Set up backchain. */ | 226 | /* Set up backchain. */ |
227 | if (__put_user(regs->gprs[15], (addr_t __user *) frame)) | 227 | if (__put_user(regs->gprs[15], (addr_t __user *) frame)) |
228 | goto give_sigsegv; | 228 | return -EFAULT; |
229 | 229 | ||
230 | /* Set up registers for signal handler */ | 230 | /* Set up registers for signal handler */ |
231 | regs->gprs[15] = (unsigned long) frame; | 231 | regs->gprs[15] = (unsigned long) frame; |
@@ -250,27 +250,23 @@ static int setup_frame(int sig, struct k_sigaction *ka, | |||
250 | 250 | ||
251 | /* Place signal number on stack to allow backtrace from handler. */ | 251 | /* Place signal number on stack to allow backtrace from handler. */ |
252 | if (__put_user(regs->gprs[2], (int __user *) &frame->signo)) | 252 | if (__put_user(regs->gprs[2], (int __user *) &frame->signo)) |
253 | goto give_sigsegv; | 253 | return -EFAULT; |
254 | return 0; | 254 | return 0; |
255 | |||
256 | give_sigsegv: | ||
257 | force_sigsegv(sig, current); | ||
258 | return -EFAULT; | ||
259 | } | 255 | } |
260 | 256 | ||
261 | static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, | 257 | static int setup_rt_frame(struct ksignal *ksig, sigset_t *set, |
262 | sigset_t *set, struct pt_regs * regs) | 258 | struct pt_regs *regs) |
263 | { | 259 | { |
264 | int err = 0; | 260 | int err = 0; |
265 | rt_sigframe __user *frame; | 261 | rt_sigframe __user *frame; |
266 | 262 | ||
267 | frame = get_sigframe(ka, regs, sizeof(rt_sigframe)); | 263 | frame = get_sigframe(&ksig->ka, regs, sizeof(rt_sigframe)); |
268 | 264 | ||
269 | if (frame == (void __user *) -1UL) | 265 | if (frame == (void __user *) -1UL) |
270 | goto give_sigsegv; | 266 | return -EFAULT; |
271 | 267 | ||
272 | if (copy_siginfo_to_user(&frame->info, info)) | 268 | if (copy_siginfo_to_user(&frame->info, &ksig->info)) |
273 | goto give_sigsegv; | 269 | return -EFAULT; |
274 | 270 | ||
275 | /* Create the ucontext. */ | 271 | /* Create the ucontext. */ |
276 | err |= __put_user(0, &frame->uc.uc_flags); | 272 | err |= __put_user(0, &frame->uc.uc_flags); |
@@ -279,24 +275,24 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, | |||
279 | err |= save_sigregs(regs, &frame->uc.uc_mcontext); | 275 | err |= save_sigregs(regs, &frame->uc.uc_mcontext); |
280 | err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)); | 276 | err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)); |
281 | if (err) | 277 | if (err) |
282 | goto give_sigsegv; | 278 | return -EFAULT; |
283 | 279 | ||
284 | /* Set up to return from userspace. If provided, use a stub | 280 | /* Set up to return from userspace. If provided, use a stub |
285 | already in userspace. */ | 281 | already in userspace. */ |
286 | if (ka->sa.sa_flags & SA_RESTORER) { | 282 | if (ksig->ka.sa.sa_flags & SA_RESTORER) { |
287 | regs->gprs[14] = (unsigned long) | 283 | regs->gprs[14] = (unsigned long) |
288 | ka->sa.sa_restorer | PSW_ADDR_AMODE; | 284 | ksig->ka.sa.sa_restorer | PSW_ADDR_AMODE; |
289 | } else { | 285 | } else { |
290 | regs->gprs[14] = (unsigned long) | 286 | regs->gprs[14] = (unsigned long) |
291 | frame->retcode | PSW_ADDR_AMODE; | 287 | frame->retcode | PSW_ADDR_AMODE; |
292 | if (__put_user(S390_SYSCALL_OPCODE | __NR_rt_sigreturn, | 288 | if (__put_user(S390_SYSCALL_OPCODE | __NR_rt_sigreturn, |
293 | (u16 __user *)(frame->retcode))) | 289 | (u16 __user *)(frame->retcode))) |
294 | goto give_sigsegv; | 290 | return -EFAULT; |
295 | } | 291 | } |
296 | 292 | ||
297 | /* Set up backchain. */ | 293 | /* Set up backchain. */ |
298 | if (__put_user(regs->gprs[15], (addr_t __user *) frame)) | 294 | if (__put_user(regs->gprs[15], (addr_t __user *) frame)) |
299 | goto give_sigsegv; | 295 | return -EFAULT; |
300 | 296 | ||
301 | /* Set up registers for signal handler */ | 297 | /* Set up registers for signal handler */ |
302 | regs->gprs[15] = (unsigned long) frame; | 298 | regs->gprs[15] = (unsigned long) frame; |
@@ -304,34 +300,27 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, | |||
304 | regs->psw.mask = PSW_MASK_EA | PSW_MASK_BA | | 300 | regs->psw.mask = PSW_MASK_EA | PSW_MASK_BA | |
305 | (PSW_USER_BITS & PSW_MASK_ASC) | | 301 | (PSW_USER_BITS & PSW_MASK_ASC) | |
306 | (regs->psw.mask & ~PSW_MASK_ASC); | 302 | (regs->psw.mask & ~PSW_MASK_ASC); |
307 | regs->psw.addr = (unsigned long) ka->sa.sa_handler | PSW_ADDR_AMODE; | 303 | regs->psw.addr = (unsigned long) ksig->ka.sa.sa_handler | PSW_ADDR_AMODE; |
308 | 304 | ||
309 | regs->gprs[2] = map_signal(sig); | 305 | regs->gprs[2] = map_signal(ksig->sig); |
310 | regs->gprs[3] = (unsigned long) &frame->info; | 306 | regs->gprs[3] = (unsigned long) &frame->info; |
311 | regs->gprs[4] = (unsigned long) &frame->uc; | 307 | regs->gprs[4] = (unsigned long) &frame->uc; |
312 | regs->gprs[5] = task_thread_info(current)->last_break; | 308 | regs->gprs[5] = task_thread_info(current)->last_break; |
313 | return 0; | 309 | return 0; |
314 | |||
315 | give_sigsegv: | ||
316 | force_sigsegv(sig, current); | ||
317 | return -EFAULT; | ||
318 | } | 310 | } |
319 | 311 | ||
320 | static void handle_signal(unsigned long sig, struct k_sigaction *ka, | 312 | static void handle_signal(struct ksignal *ksig, sigset_t *oldset, |
321 | siginfo_t *info, sigset_t *oldset, | 313 | struct pt_regs *regs) |
322 | struct pt_regs *regs) | ||
323 | { | 314 | { |
324 | int ret; | 315 | int ret; |
325 | 316 | ||
326 | /* Set up the stack frame */ | 317 | /* Set up the stack frame */ |
327 | if (ka->sa.sa_flags & SA_SIGINFO) | 318 | if (ksig->ka.sa.sa_flags & SA_SIGINFO) |
328 | ret = setup_rt_frame(sig, ka, info, oldset, regs); | 319 | ret = setup_rt_frame(ksig, oldset, regs); |
329 | else | 320 | else |
330 | ret = setup_frame(sig, ka, oldset, regs); | 321 | ret = setup_frame(ksig->sig, &ksig->ka, oldset, regs); |
331 | if (ret) | 322 | |
332 | return; | 323 | signal_setup_done(ret, ksig, test_thread_flag(TIF_SINGLE_STEP)); |
333 | signal_delivered(sig, info, ka, regs, | ||
334 | test_thread_flag(TIF_SINGLE_STEP)); | ||
335 | } | 324 | } |
336 | 325 | ||
337 | /* | 326 | /* |
@@ -345,9 +334,7 @@ static void handle_signal(unsigned long sig, struct k_sigaction *ka, | |||
345 | */ | 334 | */ |
346 | void do_signal(struct pt_regs *regs) | 335 | void do_signal(struct pt_regs *regs) |
347 | { | 336 | { |
348 | siginfo_t info; | 337 | struct ksignal ksig; |
349 | int signr; | ||
350 | struct k_sigaction ka; | ||
351 | sigset_t *oldset = sigmask_to_save(); | 338 | sigset_t *oldset = sigmask_to_save(); |
352 | 339 | ||
353 | /* | 340 | /* |
@@ -357,9 +344,8 @@ void do_signal(struct pt_regs *regs) | |||
357 | */ | 344 | */ |
358 | current_thread_info()->system_call = | 345 | current_thread_info()->system_call = |
359 | test_pt_regs_flag(regs, PIF_SYSCALL) ? regs->int_code : 0; | 346 | test_pt_regs_flag(regs, PIF_SYSCALL) ? regs->int_code : 0; |
360 | signr = get_signal_to_deliver(&info, &ka, regs, NULL); | ||
361 | 347 | ||
362 | if (signr > 0) { | 348 | if (get_signal(&ksig)) { |
363 | /* Whee! Actually deliver the signal. */ | 349 | /* Whee! Actually deliver the signal. */ |
364 | if (current_thread_info()->system_call) { | 350 | if (current_thread_info()->system_call) { |
365 | regs->int_code = current_thread_info()->system_call; | 351 | regs->int_code = current_thread_info()->system_call; |
@@ -370,7 +356,7 @@ void do_signal(struct pt_regs *regs) | |||
370 | regs->gprs[2] = -EINTR; | 356 | regs->gprs[2] = -EINTR; |
371 | break; | 357 | break; |
372 | case -ERESTARTSYS: | 358 | case -ERESTARTSYS: |
373 | if (!(ka.sa.sa_flags & SA_RESTART)) { | 359 | if (!(ksig.ka.sa.sa_flags & SA_RESTART)) { |
374 | regs->gprs[2] = -EINTR; | 360 | regs->gprs[2] = -EINTR; |
375 | break; | 361 | break; |
376 | } | 362 | } |
@@ -387,9 +373,9 @@ void do_signal(struct pt_regs *regs) | |||
387 | clear_pt_regs_flag(regs, PIF_SYSCALL); | 373 | clear_pt_regs_flag(regs, PIF_SYSCALL); |
388 | 374 | ||
389 | if (is_compat_task()) | 375 | if (is_compat_task()) |
390 | handle_signal32(signr, &ka, &info, oldset, regs); | 376 | handle_signal32(&ksig, oldset, regs); |
391 | else | 377 | else |
392 | handle_signal(signr, &ka, &info, oldset, regs); | 378 | handle_signal(&ksig, oldset, regs); |
393 | return; | 379 | return; |
394 | } | 380 | } |
395 | 381 | ||
diff --git a/arch/score/kernel/signal.c b/arch/score/kernel/signal.c index a00fba32b0eb..1651807774ad 100644 --- a/arch/score/kernel/signal.c +++ b/arch/score/kernel/signal.c | |||
@@ -173,15 +173,15 @@ badframe: | |||
173 | return 0; | 173 | return 0; |
174 | } | 174 | } |
175 | 175 | ||
176 | static int setup_rt_frame(struct k_sigaction *ka, struct pt_regs *regs, | 176 | static int setup_rt_frame(struct ksignal *ksig, struct pt_regs *regs, |
177 | int signr, sigset_t *set, siginfo_t *info) | 177 | sigset_t *set) |
178 | { | 178 | { |
179 | struct rt_sigframe __user *frame; | 179 | struct rt_sigframe __user *frame; |
180 | int err = 0; | 180 | int err = 0; |
181 | 181 | ||
182 | frame = get_sigframe(ka, regs, sizeof(*frame)); | 182 | frame = get_sigframe(&ksig->ka, regs, sizeof(*frame)); |
183 | if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) | 183 | if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) |
184 | goto give_sigsegv; | 184 | return -EFAULT; |
185 | 185 | ||
186 | /* | 186 | /* |
187 | * Set up the return code ... | 187 | * Set up the return code ... |
@@ -194,7 +194,7 @@ static int setup_rt_frame(struct k_sigaction *ka, struct pt_regs *regs, | |||
194 | err |= __put_user(0x80008002, frame->rs_code + 1); | 194 | err |= __put_user(0x80008002, frame->rs_code + 1); |
195 | flush_cache_sigtramp((unsigned long) frame->rs_code); | 195 | flush_cache_sigtramp((unsigned long) frame->rs_code); |
196 | 196 | ||
197 | err |= copy_siginfo_to_user(&frame->rs_info, info); | 197 | err |= copy_siginfo_to_user(&frame->rs_info, &ksig->info); |
198 | err |= __put_user(0, &frame->rs_uc.uc_flags); | 198 | err |= __put_user(0, &frame->rs_uc.uc_flags); |
199 | err |= __put_user(NULL, &frame->rs_uc.uc_link); | 199 | err |= __put_user(NULL, &frame->rs_uc.uc_link); |
200 | err |= __save_altstack(&frame->rs_uc.uc_stack, regs->regs[0]); | 200 | err |= __save_altstack(&frame->rs_uc.uc_stack, regs->regs[0]); |
@@ -202,26 +202,23 @@ static int setup_rt_frame(struct k_sigaction *ka, struct pt_regs *regs, | |||
202 | err |= __copy_to_user(&frame->rs_uc.uc_sigmask, set, sizeof(*set)); | 202 | err |= __copy_to_user(&frame->rs_uc.uc_sigmask, set, sizeof(*set)); |
203 | 203 | ||
204 | if (err) | 204 | if (err) |
205 | goto give_sigsegv; | 205 | return -EFAULT; |
206 | 206 | ||
207 | regs->regs[0] = (unsigned long) frame; | 207 | regs->regs[0] = (unsigned long) frame; |
208 | regs->regs[3] = (unsigned long) frame->rs_code; | 208 | regs->regs[3] = (unsigned long) frame->rs_code; |
209 | regs->regs[4] = signr; | 209 | regs->regs[4] = ksig->sig; |
210 | regs->regs[5] = (unsigned long) &frame->rs_info; | 210 | regs->regs[5] = (unsigned long) &frame->rs_info; |
211 | regs->regs[6] = (unsigned long) &frame->rs_uc; | 211 | regs->regs[6] = (unsigned long) &frame->rs_uc; |
212 | regs->regs[29] = (unsigned long) ka->sa.sa_handler; | 212 | regs->regs[29] = (unsigned long) ksig->ka.sa.sa_handler; |
213 | regs->cp0_epc = (unsigned long) ka->sa.sa_handler; | 213 | regs->cp0_epc = (unsigned long) ksig->ka.sa.sa_handler; |
214 | 214 | ||
215 | return 0; | 215 | return 0; |
216 | |||
217 | give_sigsegv: | ||
218 | force_sigsegv(signr, current); | ||
219 | return -EFAULT; | ||
220 | } | 216 | } |
221 | 217 | ||
222 | static void handle_signal(unsigned long sig, siginfo_t *info, | 218 | static void handle_signal(struct ksignal *ksig, struct pt_regs *regs) |
223 | struct k_sigaction *ka, struct pt_regs *regs) | ||
224 | { | 219 | { |
220 | int ret; | ||
221 | |||
225 | if (regs->is_syscall) { | 222 | if (regs->is_syscall) { |
226 | switch (regs->regs[4]) { | 223 | switch (regs->regs[4]) { |
227 | case ERESTART_RESTARTBLOCK: | 224 | case ERESTART_RESTARTBLOCK: |
@@ -229,7 +226,7 @@ static void handle_signal(unsigned long sig, siginfo_t *info, | |||
229 | regs->regs[4] = EINTR; | 226 | regs->regs[4] = EINTR; |
230 | break; | 227 | break; |
231 | case ERESTARTSYS: | 228 | case ERESTARTSYS: |
232 | if (!(ka->sa.sa_flags & SA_RESTART)) { | 229 | if (!(ksig->ka.sa.sa_flags & SA_RESTART)) { |
233 | regs->regs[4] = EINTR; | 230 | regs->regs[4] = EINTR; |
234 | break; | 231 | break; |
235 | } | 232 | } |
@@ -245,17 +242,14 @@ static void handle_signal(unsigned long sig, siginfo_t *info, | |||
245 | /* | 242 | /* |
246 | * Set up the stack frame | 243 | * Set up the stack frame |
247 | */ | 244 | */ |
248 | if (setup_rt_frame(ka, regs, sig, sigmask_to_save(), info) < 0) | 245 | ret = setup_rt_frame(ksig, regs, sigmask_to_save()); |
249 | return; | ||
250 | 246 | ||
251 | signal_delivered(sig, info, ka, regs, 0); | 247 | signal_setup_done(ret, ksig, 0); |
252 | } | 248 | } |
253 | 249 | ||
254 | static void do_signal(struct pt_regs *regs) | 250 | static void do_signal(struct pt_regs *regs) |
255 | { | 251 | { |
256 | struct k_sigaction ka; | 252 | struct ksignal ksig; |
257 | siginfo_t info; | ||
258 | int signr; | ||
259 | 253 | ||
260 | /* | 254 | /* |
261 | * We want the common case to go fast, which is why we may in certain | 255 | * We want the common case to go fast, which is why we may in certain |
@@ -265,10 +259,9 @@ static void do_signal(struct pt_regs *regs) | |||
265 | if (!user_mode(regs)) | 259 | if (!user_mode(regs)) |
266 | return; | 260 | return; |
267 | 261 | ||
268 | signr = get_signal_to_deliver(&info, &ka, regs, NULL); | 262 | if (get_signal(&ksig)) { |
269 | if (signr > 0) { | ||
270 | /* Actually deliver the signal. */ | 263 | /* Actually deliver the signal. */ |
271 | handle_signal(signr, &info, &ka, regs); | 264 | handle_signal(&ksig, regs); |
272 | return; | 265 | return; |
273 | } | 266 | } |
274 | 267 | ||
diff --git a/arch/sh/kernel/signal_32.c b/arch/sh/kernel/signal_32.c index 594cd371aa28..2f002b24fb92 100644 --- a/arch/sh/kernel/signal_32.c +++ b/arch/sh/kernel/signal_32.c | |||
@@ -262,17 +262,17 @@ get_sigframe(struct k_sigaction *ka, unsigned long sp, size_t frame_size) | |||
262 | extern void __kernel_sigreturn(void); | 262 | extern void __kernel_sigreturn(void); |
263 | extern void __kernel_rt_sigreturn(void); | 263 | extern void __kernel_rt_sigreturn(void); |
264 | 264 | ||
265 | static int setup_frame(int sig, struct k_sigaction *ka, | 265 | static int setup_frame(struct ksignal *ksig, sigset_t *set, |
266 | sigset_t *set, struct pt_regs *regs) | 266 | struct pt_regs *regs) |
267 | { | 267 | { |
268 | struct sigframe __user *frame; | 268 | struct sigframe __user *frame; |
269 | int err = 0; | 269 | int err = 0, sig = ksig->sig; |
270 | int signal; | 270 | int signal; |
271 | 271 | ||
272 | frame = get_sigframe(ka, regs->regs[15], sizeof(*frame)); | 272 | frame = get_sigframe(&ksig->ka, regs->regs[15], sizeof(*frame)); |
273 | 273 | ||
274 | if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) | 274 | if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) |
275 | goto give_sigsegv; | 275 | return -EFAULT; |
276 | 276 | ||
277 | signal = current_thread_info()->exec_domain | 277 | signal = current_thread_info()->exec_domain |
278 | && current_thread_info()->exec_domain->signal_invmap | 278 | && current_thread_info()->exec_domain->signal_invmap |
@@ -288,8 +288,8 @@ static int setup_frame(int sig, struct k_sigaction *ka, | |||
288 | 288 | ||
289 | /* Set up to return from userspace. If provided, use a stub | 289 | /* Set up to return from userspace. If provided, use a stub |
290 | already in userspace. */ | 290 | already in userspace. */ |
291 | if (ka->sa.sa_flags & SA_RESTORER) { | 291 | if (ksig->ka.sa.sa_flags & SA_RESTORER) { |
292 | regs->pr = (unsigned long) ka->sa.sa_restorer; | 292 | regs->pr = (unsigned long) ksig->ka.sa.sa_restorer; |
293 | #ifdef CONFIG_VSYSCALL | 293 | #ifdef CONFIG_VSYSCALL |
294 | } else if (likely(current->mm->context.vdso)) { | 294 | } else if (likely(current->mm->context.vdso)) { |
295 | regs->pr = VDSO_SYM(&__kernel_sigreturn); | 295 | regs->pr = VDSO_SYM(&__kernel_sigreturn); |
@@ -309,7 +309,7 @@ static int setup_frame(int sig, struct k_sigaction *ka, | |||
309 | } | 309 | } |
310 | 310 | ||
311 | if (err) | 311 | if (err) |
312 | goto give_sigsegv; | 312 | return -EFAULT; |
313 | 313 | ||
314 | /* Set up registers for signal handler */ | 314 | /* Set up registers for signal handler */ |
315 | regs->regs[15] = (unsigned long) frame; | 315 | regs->regs[15] = (unsigned long) frame; |
@@ -319,15 +319,15 @@ static int setup_frame(int sig, struct k_sigaction *ka, | |||
319 | 319 | ||
320 | if (current->personality & FDPIC_FUNCPTRS) { | 320 | if (current->personality & FDPIC_FUNCPTRS) { |
321 | struct fdpic_func_descriptor __user *funcptr = | 321 | struct fdpic_func_descriptor __user *funcptr = |
322 | (struct fdpic_func_descriptor __user *)ka->sa.sa_handler; | 322 | (struct fdpic_func_descriptor __user *)ksig->ka.sa.sa_handler; |
323 | 323 | ||
324 | err |= __get_user(regs->pc, &funcptr->text); | 324 | err |= __get_user(regs->pc, &funcptr->text); |
325 | err |= __get_user(regs->regs[12], &funcptr->GOT); | 325 | err |= __get_user(regs->regs[12], &funcptr->GOT); |
326 | } else | 326 | } else |
327 | regs->pc = (unsigned long)ka->sa.sa_handler; | 327 | regs->pc = (unsigned long)ksig->ka.sa.sa_handler; |
328 | 328 | ||
329 | if (err) | 329 | if (err) |
330 | goto give_sigsegv; | 330 | return -EFAULT; |
331 | 331 | ||
332 | set_fs(USER_DS); | 332 | set_fs(USER_DS); |
333 | 333 | ||
@@ -335,23 +335,19 @@ static int setup_frame(int sig, struct k_sigaction *ka, | |||
335 | current->comm, task_pid_nr(current), frame, regs->pc, regs->pr); | 335 | current->comm, task_pid_nr(current), frame, regs->pc, regs->pr); |
336 | 336 | ||
337 | return 0; | 337 | return 0; |
338 | |||
339 | give_sigsegv: | ||
340 | force_sigsegv(sig, current); | ||
341 | return -EFAULT; | ||
342 | } | 338 | } |
343 | 339 | ||
344 | static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, | 340 | static int setup_rt_frame(struct ksignal *ksig, sigset_t *set, |
345 | sigset_t *set, struct pt_regs *regs) | 341 | struct pt_regs *regs) |
346 | { | 342 | { |
347 | struct rt_sigframe __user *frame; | 343 | struct rt_sigframe __user *frame; |
348 | int err = 0; | 344 | int err = 0, sig = ksig->sig; |
349 | int signal; | 345 | int signal; |
350 | 346 | ||
351 | frame = get_sigframe(ka, regs->regs[15], sizeof(*frame)); | 347 | frame = get_sigframe(&ksig->ka, regs->regs[15], sizeof(*frame)); |
352 | 348 | ||
353 | if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) | 349 | if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) |
354 | goto give_sigsegv; | 350 | return -EFAULT; |
355 | 351 | ||
356 | signal = current_thread_info()->exec_domain | 352 | signal = current_thread_info()->exec_domain |
357 | && current_thread_info()->exec_domain->signal_invmap | 353 | && current_thread_info()->exec_domain->signal_invmap |
@@ -359,7 +355,7 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, | |||
359 | ? current_thread_info()->exec_domain->signal_invmap[sig] | 355 | ? current_thread_info()->exec_domain->signal_invmap[sig] |
360 | : sig; | 356 | : sig; |
361 | 357 | ||
362 | err |= copy_siginfo_to_user(&frame->info, info); | 358 | err |= copy_siginfo_to_user(&frame->info, &ksig->info); |
363 | 359 | ||
364 | /* Create the ucontext. */ | 360 | /* Create the ucontext. */ |
365 | err |= __put_user(0, &frame->uc.uc_flags); | 361 | err |= __put_user(0, &frame->uc.uc_flags); |
@@ -371,8 +367,8 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, | |||
371 | 367 | ||
372 | /* Set up to return from userspace. If provided, use a stub | 368 | /* Set up to return from userspace. If provided, use a stub |
373 | already in userspace. */ | 369 | already in userspace. */ |
374 | if (ka->sa.sa_flags & SA_RESTORER) { | 370 | if (ksig->ka.sa.sa_flags & SA_RESTORER) { |
375 | regs->pr = (unsigned long) ka->sa.sa_restorer; | 371 | regs->pr = (unsigned long) ksig->ka.sa.sa_restorer; |
376 | #ifdef CONFIG_VSYSCALL | 372 | #ifdef CONFIG_VSYSCALL |
377 | } else if (likely(current->mm->context.vdso)) { | 373 | } else if (likely(current->mm->context.vdso)) { |
378 | regs->pr = VDSO_SYM(&__kernel_rt_sigreturn); | 374 | regs->pr = VDSO_SYM(&__kernel_rt_sigreturn); |
@@ -392,7 +388,7 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, | |||
392 | } | 388 | } |
393 | 389 | ||
394 | if (err) | 390 | if (err) |
395 | goto give_sigsegv; | 391 | return -EFAULT; |
396 | 392 | ||
397 | /* Set up registers for signal handler */ | 393 | /* Set up registers for signal handler */ |
398 | regs->regs[15] = (unsigned long) frame; | 394 | regs->regs[15] = (unsigned long) frame; |
@@ -402,15 +398,15 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, | |||
402 | 398 | ||
403 | if (current->personality & FDPIC_FUNCPTRS) { | 399 | if (current->personality & FDPIC_FUNCPTRS) { |
404 | struct fdpic_func_descriptor __user *funcptr = | 400 | struct fdpic_func_descriptor __user *funcptr = |
405 | (struct fdpic_func_descriptor __user *)ka->sa.sa_handler; | 401 | (struct fdpic_func_descriptor __user *)ksig->ka.sa.sa_handler; |
406 | 402 | ||
407 | err |= __get_user(regs->pc, &funcptr->text); | 403 | err |= __get_user(regs->pc, &funcptr->text); |
408 | err |= __get_user(regs->regs[12], &funcptr->GOT); | 404 | err |= __get_user(regs->regs[12], &funcptr->GOT); |
409 | } else | 405 | } else |
410 | regs->pc = (unsigned long)ka->sa.sa_handler; | 406 | regs->pc = (unsigned long)ksig->ka.sa.sa_handler; |
411 | 407 | ||
412 | if (err) | 408 | if (err) |
413 | goto give_sigsegv; | 409 | return -EFAULT; |
414 | 410 | ||
415 | set_fs(USER_DS); | 411 | set_fs(USER_DS); |
416 | 412 | ||
@@ -418,10 +414,6 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, | |||
418 | current->comm, task_pid_nr(current), frame, regs->pc, regs->pr); | 414 | current->comm, task_pid_nr(current), frame, regs->pc, regs->pr); |
419 | 415 | ||
420 | return 0; | 416 | return 0; |
421 | |||
422 | give_sigsegv: | ||
423 | force_sigsegv(sig, current); | ||
424 | return -EFAULT; | ||
425 | } | 417 | } |
426 | 418 | ||
427 | static inline void | 419 | static inline void |
@@ -455,22 +447,18 @@ handle_syscall_restart(unsigned long save_r0, struct pt_regs *regs, | |||
455 | * OK, we're invoking a handler | 447 | * OK, we're invoking a handler |
456 | */ | 448 | */ |
457 | static void | 449 | static void |
458 | handle_signal(unsigned long sig, struct k_sigaction *ka, siginfo_t *info, | 450 | handle_signal(struct ksignal *ksig, struct pt_regs *regs, unsigned int save_r0) |
459 | struct pt_regs *regs, unsigned int save_r0) | ||
460 | { | 451 | { |
461 | sigset_t *oldset = sigmask_to_save(); | 452 | sigset_t *oldset = sigmask_to_save(); |
462 | int ret; | 453 | int ret; |
463 | 454 | ||
464 | /* Set up the stack frame */ | 455 | /* Set up the stack frame */ |
465 | if (ka->sa.sa_flags & SA_SIGINFO) | 456 | if (ksig->ka.sa.sa_flags & SA_SIGINFO) |
466 | ret = setup_rt_frame(sig, ka, info, oldset, regs); | 457 | ret = setup_rt_frame(ksig, oldset, regs); |
467 | else | 458 | else |
468 | ret = setup_frame(sig, ka, oldset, regs); | 459 | ret = setup_frame(ksig, oldset, regs); |
469 | 460 | ||
470 | if (ret) | 461 | signal_setup_done(ret, ksig, test_thread_flag(TIF_SINGLESTEP)); |
471 | return; | ||
472 | signal_delivered(sig, info, ka, regs, | ||
473 | test_thread_flag(TIF_SINGLESTEP)); | ||
474 | } | 462 | } |
475 | 463 | ||
476 | /* | 464 | /* |
@@ -484,9 +472,7 @@ handle_signal(unsigned long sig, struct k_sigaction *ka, siginfo_t *info, | |||
484 | */ | 472 | */ |
485 | static void do_signal(struct pt_regs *regs, unsigned int save_r0) | 473 | static void do_signal(struct pt_regs *regs, unsigned int save_r0) |
486 | { | 474 | { |
487 | siginfo_t info; | 475 | struct ksignal ksig; |
488 | int signr; | ||
489 | struct k_sigaction ka; | ||
490 | 476 | ||
491 | /* | 477 | /* |
492 | * We want the common case to go fast, which | 478 | * We want the common case to go fast, which |
@@ -497,12 +483,11 @@ static void do_signal(struct pt_regs *regs, unsigned int save_r0) | |||
497 | if (!user_mode(regs)) | 483 | if (!user_mode(regs)) |
498 | return; | 484 | return; |
499 | 485 | ||
500 | signr = get_signal_to_deliver(&info, &ka, regs, NULL); | 486 | if (get_signal(&ksig)) { |
501 | if (signr > 0) { | 487 | handle_syscall_restart(save_r0, regs, &ksig.ka.sa); |
502 | handle_syscall_restart(save_r0, regs, &ka.sa); | ||
503 | 488 | ||
504 | /* Whee! Actually deliver the signal. */ | 489 | /* Whee! Actually deliver the signal. */ |
505 | handle_signal(signr, &ka, &info, regs, save_r0); | 490 | handle_signal(&ksig, regs, save_r0); |
506 | return; | 491 | return; |
507 | } | 492 | } |
508 | 493 | ||
diff --git a/arch/sh/kernel/signal_64.c b/arch/sh/kernel/signal_64.c index 23d4c71c91af..897abe7b871e 100644 --- a/arch/sh/kernel/signal_64.c +++ b/arch/sh/kernel/signal_64.c | |||
@@ -41,8 +41,7 @@ | |||
41 | #define DEBUG_SIG 0 | 41 | #define DEBUG_SIG 0 |
42 | 42 | ||
43 | static void | 43 | static void |
44 | handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka, | 44 | handle_signal(struct ksignal *ksig, struct pt_regs *regs); |
45 | struct pt_regs * regs); | ||
46 | 45 | ||
47 | static inline void | 46 | static inline void |
48 | handle_syscall_restart(struct pt_regs *regs, struct sigaction *sa) | 47 | handle_syscall_restart(struct pt_regs *regs, struct sigaction *sa) |
@@ -82,9 +81,7 @@ handle_syscall_restart(struct pt_regs *regs, struct sigaction *sa) | |||
82 | */ | 81 | */ |
83 | static void do_signal(struct pt_regs *regs) | 82 | static void do_signal(struct pt_regs *regs) |
84 | { | 83 | { |
85 | siginfo_t info; | 84 | struct ksignal ksig; |
86 | int signr; | ||
87 | struct k_sigaction ka; | ||
88 | 85 | ||
89 | /* | 86 | /* |
90 | * We want the common case to go fast, which | 87 | * We want the common case to go fast, which |
@@ -95,12 +92,11 @@ static void do_signal(struct pt_regs *regs) | |||
95 | if (!user_mode(regs)) | 92 | if (!user_mode(regs)) |
96 | return; | 93 | return; |
97 | 94 | ||
98 | signr = get_signal_to_deliver(&info, &ka, regs, 0); | 95 | if (get_signal(&ksig)) { |
99 | if (signr > 0) { | 96 | handle_syscall_restart(regs, &ksig.ka.sa); |
100 | handle_syscall_restart(regs, &ka.sa); | ||
101 | 97 | ||
102 | /* Whee! Actually deliver the signal. */ | 98 | /* Whee! Actually deliver the signal. */ |
103 | handle_signal(signr, &info, &ka, regs); | 99 | handle_signal(&ksig, regs); |
104 | return; | 100 | return; |
105 | } | 101 | } |
106 | 102 | ||
@@ -378,17 +374,16 @@ get_sigframe(struct k_sigaction *ka, unsigned long sp, size_t frame_size) | |||
378 | void sa_default_restorer(void); /* See comments below */ | 374 | void sa_default_restorer(void); /* See comments below */ |
379 | void sa_default_rt_restorer(void); /* See comments below */ | 375 | void sa_default_rt_restorer(void); /* See comments below */ |
380 | 376 | ||
381 | static int setup_frame(int sig, struct k_sigaction *ka, | 377 | static int setup_frame(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs) |
382 | sigset_t *set, struct pt_regs *regs) | ||
383 | { | 378 | { |
384 | struct sigframe __user *frame; | 379 | struct sigframe __user *frame; |
385 | int err = 0; | 380 | int err = 0, sig = ksig->sig; |
386 | int signal; | 381 | int signal; |
387 | 382 | ||
388 | frame = get_sigframe(ka, regs->regs[REG_SP], sizeof(*frame)); | 383 | frame = get_sigframe(&ksig->ka, regs->regs[REG_SP], sizeof(*frame)); |
389 | 384 | ||
390 | if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) | 385 | if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) |
391 | goto give_sigsegv; | 386 | return -EFAULT; |
392 | 387 | ||
393 | signal = current_thread_info()->exec_domain | 388 | signal = current_thread_info()->exec_domain |
394 | && current_thread_info()->exec_domain->signal_invmap | 389 | && current_thread_info()->exec_domain->signal_invmap |
@@ -400,7 +395,7 @@ static int setup_frame(int sig, struct k_sigaction *ka, | |||
400 | 395 | ||
401 | /* Give up earlier as i386, in case */ | 396 | /* Give up earlier as i386, in case */ |
402 | if (err) | 397 | if (err) |
403 | goto give_sigsegv; | 398 | return -EFAULT; |
404 | 399 | ||
405 | if (_NSIG_WORDS > 1) { | 400 | if (_NSIG_WORDS > 1) { |
406 | err |= __copy_to_user(frame->extramask, &set->sig[1], | 401 | err |= __copy_to_user(frame->extramask, &set->sig[1], |
@@ -408,16 +403,16 @@ static int setup_frame(int sig, struct k_sigaction *ka, | |||
408 | 403 | ||
409 | /* Give up earlier as i386, in case */ | 404 | /* Give up earlier as i386, in case */ |
410 | if (err) | 405 | if (err) |
411 | goto give_sigsegv; | 406 | return -EFAULT; |
412 | 407 | ||
413 | /* Set up to return from userspace. If provided, use a stub | 408 | /* Set up to return from userspace. If provided, use a stub |
414 | already in userspace. */ | 409 | already in userspace. */ |
415 | if (ka->sa.sa_flags & SA_RESTORER) { | 410 | if (ksig->ka.sa.sa_flags & SA_RESTORER) { |
416 | /* | 411 | /* |
417 | * On SH5 all edited pointers are subject to NEFF | 412 | * On SH5 all edited pointers are subject to NEFF |
418 | */ | 413 | */ |
419 | DEREF_REG_PR = neff_sign_extend((unsigned long) | 414 | DEREF_REG_PR = neff_sign_extend((unsigned long) |
420 | ka->sa.sa_restorer | 0x1); | 415 | ksig->ka->sa.sa_restorer | 0x1); |
421 | } else { | 416 | } else { |
422 | /* | 417 | /* |
423 | * Different approach on SH5. | 418 | * Different approach on SH5. |
@@ -435,7 +430,7 @@ static int setup_frame(int sig, struct k_sigaction *ka, | |||
435 | 430 | ||
436 | if (__copy_to_user(frame->retcode, | 431 | if (__copy_to_user(frame->retcode, |
437 | (void *)((unsigned long)sa_default_restorer & (~1)), 16) != 0) | 432 | (void *)((unsigned long)sa_default_restorer & (~1)), 16) != 0) |
438 | goto give_sigsegv; | 433 | return -EFAULT; |
439 | 434 | ||
440 | /* Cohere the trampoline with the I-cache. */ | 435 | /* Cohere the trampoline with the I-cache. */ |
441 | flush_cache_sigtramp(DEREF_REG_PR-1); | 436 | flush_cache_sigtramp(DEREF_REG_PR-1); |
@@ -460,7 +455,7 @@ static int setup_frame(int sig, struct k_sigaction *ka, | |||
460 | regs->regs[REG_ARG2] = (unsigned long long)(unsigned long)(signed long)&frame->sc; | 455 | regs->regs[REG_ARG2] = (unsigned long long)(unsigned long)(signed long)&frame->sc; |
461 | regs->regs[REG_ARG3] = (unsigned long long)(unsigned long)(signed long)&frame->sc; | 456 | regs->regs[REG_ARG3] = (unsigned long long)(unsigned long)(signed long)&frame->sc; |
462 | 457 | ||
463 | regs->pc = neff_sign_extend((unsigned long)ka->sa.sa_handler); | 458 | regs->pc = neff_sign_extend((unsigned long)ksig->ka.sa.sa_handler); |
464 | 459 | ||
465 | set_fs(USER_DS); | 460 | set_fs(USER_DS); |
466 | 461 | ||
@@ -471,23 +466,19 @@ static int setup_frame(int sig, struct k_sigaction *ka, | |||
471 | DEREF_REG_PR >> 32, DEREF_REG_PR & 0xffffffff); | 466 | DEREF_REG_PR >> 32, DEREF_REG_PR & 0xffffffff); |
472 | 467 | ||
473 | return 0; | 468 | return 0; |
474 | |||
475 | give_sigsegv: | ||
476 | force_sigsegv(sig, current); | ||
477 | return -EFAULT; | ||
478 | } | 469 | } |
479 | 470 | ||
480 | static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, | 471 | static int setup_rt_frame(struct ksignal *kig, sigset_t *set, |
481 | sigset_t *set, struct pt_regs *regs) | 472 | struct pt_regs *regs) |
482 | { | 473 | { |
483 | struct rt_sigframe __user *frame; | 474 | struct rt_sigframe __user *frame; |
484 | int err = 0; | 475 | int err = 0, sig = ksig->sig; |
485 | int signal; | 476 | int signal; |
486 | 477 | ||
487 | frame = get_sigframe(ka, regs->regs[REG_SP], sizeof(*frame)); | 478 | frame = get_sigframe(&ksig->ka, regs->regs[REG_SP], sizeof(*frame)); |
488 | 479 | ||
489 | if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) | 480 | if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) |
490 | goto give_sigsegv; | 481 | return -EFAULT; |
491 | 482 | ||
492 | signal = current_thread_info()->exec_domain | 483 | signal = current_thread_info()->exec_domain |
493 | && current_thread_info()->exec_domain->signal_invmap | 484 | && current_thread_info()->exec_domain->signal_invmap |
@@ -497,11 +488,11 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, | |||
497 | 488 | ||
498 | err |= __put_user(&frame->info, &frame->pinfo); | 489 | err |= __put_user(&frame->info, &frame->pinfo); |
499 | err |= __put_user(&frame->uc, &frame->puc); | 490 | err |= __put_user(&frame->uc, &frame->puc); |
500 | err |= copy_siginfo_to_user(&frame->info, info); | 491 | err |= copy_siginfo_to_user(&frame->info, &ksig->info); |
501 | 492 | ||
502 | /* Give up earlier as i386, in case */ | 493 | /* Give up earlier as i386, in case */ |
503 | if (err) | 494 | if (err) |
504 | goto give_sigsegv; | 495 | return -EFAULT; |
505 | 496 | ||
506 | /* Create the ucontext. */ | 497 | /* Create the ucontext. */ |
507 | err |= __put_user(0, &frame->uc.uc_flags); | 498 | err |= __put_user(0, &frame->uc.uc_flags); |
@@ -513,16 +504,16 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, | |||
513 | 504 | ||
514 | /* Give up earlier as i386, in case */ | 505 | /* Give up earlier as i386, in case */ |
515 | if (err) | 506 | if (err) |
516 | goto give_sigsegv; | 507 | return -EFAULT; |
517 | 508 | ||
518 | /* Set up to return from userspace. If provided, use a stub | 509 | /* Set up to return from userspace. If provided, use a stub |
519 | already in userspace. */ | 510 | already in userspace. */ |
520 | if (ka->sa.sa_flags & SA_RESTORER) { | 511 | if (ksig->ka.sa.sa_flags & SA_RESTORER) { |
521 | /* | 512 | /* |
522 | * On SH5 all edited pointers are subject to NEFF | 513 | * On SH5 all edited pointers are subject to NEFF |
523 | */ | 514 | */ |
524 | DEREF_REG_PR = neff_sign_extend((unsigned long) | 515 | DEREF_REG_PR = neff_sign_extend((unsigned long) |
525 | ka->sa.sa_restorer | 0x1); | 516 | ksig->ka.sa.sa_restorer | 0x1); |
526 | } else { | 517 | } else { |
527 | /* | 518 | /* |
528 | * Different approach on SH5. | 519 | * Different approach on SH5. |
@@ -540,7 +531,7 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, | |||
540 | 531 | ||
541 | if (__copy_to_user(frame->retcode, | 532 | if (__copy_to_user(frame->retcode, |
542 | (void *)((unsigned long)sa_default_rt_restorer & (~1)), 16) != 0) | 533 | (void *)((unsigned long)sa_default_rt_restorer & (~1)), 16) != 0) |
543 | goto give_sigsegv; | 534 | return -EFAULT; |
544 | 535 | ||
545 | /* Cohere the trampoline with the I-cache. */ | 536 | /* Cohere the trampoline with the I-cache. */ |
546 | flush_icache_range(DEREF_REG_PR-1, DEREF_REG_PR-1+15); | 537 | flush_icache_range(DEREF_REG_PR-1, DEREF_REG_PR-1+15); |
@@ -554,7 +545,7 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, | |||
554 | regs->regs[REG_ARG1] = signal; /* Arg for signal handler */ | 545 | regs->regs[REG_ARG1] = signal; /* Arg for signal handler */ |
555 | regs->regs[REG_ARG2] = (unsigned long long)(unsigned long)(signed long)&frame->info; | 546 | regs->regs[REG_ARG2] = (unsigned long long)(unsigned long)(signed long)&frame->info; |
556 | regs->regs[REG_ARG3] = (unsigned long long)(unsigned long)(signed long)&frame->uc.uc_mcontext; | 547 | regs->regs[REG_ARG3] = (unsigned long long)(unsigned long)(signed long)&frame->uc.uc_mcontext; |
557 | regs->pc = neff_sign_extend((unsigned long)ka->sa.sa_handler); | 548 | regs->pc = neff_sign_extend((unsigned long)ksig->ka.sa.sa_handler); |
558 | 549 | ||
559 | set_fs(USER_DS); | 550 | set_fs(USER_DS); |
560 | 551 | ||
@@ -564,33 +555,24 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, | |||
564 | DEREF_REG_PR >> 32, DEREF_REG_PR & 0xffffffff); | 555 | DEREF_REG_PR >> 32, DEREF_REG_PR & 0xffffffff); |
565 | 556 | ||
566 | return 0; | 557 | return 0; |
567 | |||
568 | give_sigsegv: | ||
569 | force_sigsegv(sig, current); | ||
570 | return -EFAULT; | ||
571 | } | 558 | } |
572 | 559 | ||
573 | /* | 560 | /* |
574 | * OK, we're invoking a handler | 561 | * OK, we're invoking a handler |
575 | */ | 562 | */ |
576 | static void | 563 | static void |
577 | handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka, | 564 | handle_signal(struct ksignal *ksig, struct pt_regs *regs) |
578 | struct pt_regs * regs) | ||
579 | { | 565 | { |
580 | sigset_t *oldset = sigmask_to_save(); | 566 | sigset_t *oldset = sigmask_to_save(); |
581 | int ret; | 567 | int ret; |
582 | 568 | ||
583 | /* Set up the stack frame */ | 569 | /* Set up the stack frame */ |
584 | if (ka->sa.sa_flags & SA_SIGINFO) | 570 | if (ksig->ka.sa.sa_flags & SA_SIGINFO) |
585 | ret = setup_rt_frame(sig, ka, info, oldset, regs); | 571 | ret = setup_rt_frame(ksig, oldset, regs); |
586 | else | 572 | else |
587 | ret = setup_frame(sig, ka, oldset, regs); | 573 | ret = setup_frame(ksig, oldset, regs); |
588 | |||
589 | if (ret) | ||
590 | return; | ||
591 | 574 | ||
592 | signal_delivered(sig, info, ka, regs, | 575 | signal_setup_done(ret, ksig, test_thread_flag(TIF_SINGLESTEP)); |
593 | test_thread_flag(TIF_SINGLESTEP)); | ||
594 | } | 576 | } |
595 | 577 | ||
596 | asmlinkage void do_notify_resume(struct pt_regs *regs, unsigned long thread_info_flags) | 578 | asmlinkage void do_notify_resume(struct pt_regs *regs, unsigned long thread_info_flags) |
diff --git a/arch/tile/include/asm/compat.h b/arch/tile/include/asm/compat.h index ffd4493efc78..c14e36f008c8 100644 --- a/arch/tile/include/asm/compat.h +++ b/arch/tile/include/asm/compat.h | |||
@@ -267,8 +267,7 @@ static inline int is_compat_task(void) | |||
267 | return current_thread_info()->status & TS_COMPAT; | 267 | return current_thread_info()->status & TS_COMPAT; |
268 | } | 268 | } |
269 | 269 | ||
270 | extern int compat_setup_rt_frame(int sig, struct k_sigaction *ka, | 270 | extern int compat_setup_rt_frame(struct ksignal *ksig, sigset_t *set, |
271 | siginfo_t *info, sigset_t *set, | ||
272 | struct pt_regs *regs); | 271 | struct pt_regs *regs); |
273 | 272 | ||
274 | /* Compat syscalls. */ | 273 | /* Compat syscalls. */ |
diff --git a/arch/tile/kernel/compat_signal.c b/arch/tile/kernel/compat_signal.c index 19c04b5ce408..8c5abf2e4794 100644 --- a/arch/tile/kernel/compat_signal.c +++ b/arch/tile/kernel/compat_signal.c | |||
@@ -190,18 +190,18 @@ static inline void __user *compat_get_sigframe(struct k_sigaction *ka, | |||
190 | return (void __user *) sp; | 190 | return (void __user *) sp; |
191 | } | 191 | } |
192 | 192 | ||
193 | int compat_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, | 193 | int compat_setup_rt_frame(struct ksignal *ksig, sigset_t *set, |
194 | sigset_t *set, struct pt_regs *regs) | 194 | struct pt_regs *regs) |
195 | { | 195 | { |
196 | unsigned long restorer; | 196 | unsigned long restorer; |
197 | struct compat_rt_sigframe __user *frame; | 197 | struct compat_rt_sigframe __user *frame; |
198 | int err = 0; | 198 | int err = 0, sig = ksig->sig; |
199 | int usig; | 199 | int usig; |
200 | 200 | ||
201 | frame = compat_get_sigframe(ka, regs, sizeof(*frame)); | 201 | frame = compat_get_sigframe(&ksig->ka, regs, sizeof(*frame)); |
202 | 202 | ||
203 | if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) | 203 | if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) |
204 | goto give_sigsegv; | 204 | goto err; |
205 | 205 | ||
206 | usig = current_thread_info()->exec_domain | 206 | usig = current_thread_info()->exec_domain |
207 | && current_thread_info()->exec_domain->signal_invmap | 207 | && current_thread_info()->exec_domain->signal_invmap |
@@ -210,12 +210,12 @@ int compat_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, | |||
210 | : sig; | 210 | : sig; |
211 | 211 | ||
212 | /* Always write at least the signal number for the stack backtracer. */ | 212 | /* Always write at least the signal number for the stack backtracer. */ |
213 | if (ka->sa.sa_flags & SA_SIGINFO) { | 213 | if (ksig->ka.sa.sa_flags & SA_SIGINFO) { |
214 | /* At sigreturn time, restore the callee-save registers too. */ | 214 | /* At sigreturn time, restore the callee-save registers too. */ |
215 | err |= copy_siginfo_to_user32(&frame->info, info); | 215 | err |= copy_siginfo_to_user32(&frame->info, &ksig->info); |
216 | regs->flags |= PT_FLAGS_RESTORE_REGS; | 216 | regs->flags |= PT_FLAGS_RESTORE_REGS; |
217 | } else { | 217 | } else { |
218 | err |= __put_user(info->si_signo, &frame->info.si_signo); | 218 | err |= __put_user(ksig->info.si_signo, &frame->info.si_signo); |
219 | } | 219 | } |
220 | 220 | ||
221 | /* Create the ucontext. */ | 221 | /* Create the ucontext. */ |
@@ -226,11 +226,11 @@ int compat_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, | |||
226 | err |= setup_sigcontext(&frame->uc.uc_mcontext, regs); | 226 | err |= setup_sigcontext(&frame->uc.uc_mcontext, regs); |
227 | err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)); | 227 | err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)); |
228 | if (err) | 228 | if (err) |
229 | goto give_sigsegv; | 229 | goto err; |
230 | 230 | ||
231 | restorer = VDSO_SYM(&__vdso_rt_sigreturn); | 231 | restorer = VDSO_SYM(&__vdso_rt_sigreturn); |
232 | if (ka->sa.sa_flags & SA_RESTORER) | 232 | if (ksig->ka.sa.sa_flags & SA_RESTORER) |
233 | restorer = ptr_to_compat_reg(ka->sa.sa_restorer); | 233 | restorer = ptr_to_compat_reg(ksig->ka.sa.sa_restorer); |
234 | 234 | ||
235 | /* | 235 | /* |
236 | * Set up registers for signal handler. | 236 | * Set up registers for signal handler. |
@@ -239,7 +239,7 @@ int compat_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, | |||
239 | * We always pass siginfo and mcontext, regardless of SA_SIGINFO, | 239 | * We always pass siginfo and mcontext, regardless of SA_SIGINFO, |
240 | * since some things rely on this (e.g. glibc's debug/segfault.c). | 240 | * since some things rely on this (e.g. glibc's debug/segfault.c). |
241 | */ | 241 | */ |
242 | regs->pc = ptr_to_compat_reg(ka->sa.sa_handler); | 242 | regs->pc = ptr_to_compat_reg(ksig->ka.sa.sa_handler); |
243 | regs->ex1 = PL_ICS_EX1(USER_PL, 1); /* set crit sec in handler */ | 243 | regs->ex1 = PL_ICS_EX1(USER_PL, 1); /* set crit sec in handler */ |
244 | regs->sp = ptr_to_compat_reg(frame); | 244 | regs->sp = ptr_to_compat_reg(frame); |
245 | regs->lr = restorer; | 245 | regs->lr = restorer; |
@@ -249,7 +249,8 @@ int compat_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, | |||
249 | regs->flags |= PT_FLAGS_CALLER_SAVES; | 249 | regs->flags |= PT_FLAGS_CALLER_SAVES; |
250 | return 0; | 250 | return 0; |
251 | 251 | ||
252 | give_sigsegv: | 252 | err: |
253 | signal_fault("bad setup frame", regs, frame, sig); | 253 | trace_unhandled_signal("bad sigreturn frame", regs, |
254 | (unsigned long)frame, SIGSEGV); | ||
254 | return -EFAULT; | 255 | return -EFAULT; |
255 | } | 256 | } |
diff --git a/arch/tile/kernel/signal.c b/arch/tile/kernel/signal.c index d1d026f01267..7c2fecc52177 100644 --- a/arch/tile/kernel/signal.c +++ b/arch/tile/kernel/signal.c | |||
@@ -153,18 +153,18 @@ static inline void __user *get_sigframe(struct k_sigaction *ka, | |||
153 | return (void __user *) sp; | 153 | return (void __user *) sp; |
154 | } | 154 | } |
155 | 155 | ||
156 | static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, | 156 | static int setup_rt_frame(struct ksignal *ksig, sigset_t *set, |
157 | sigset_t *set, struct pt_regs *regs) | 157 | struct pt_regs *regs) |
158 | { | 158 | { |
159 | unsigned long restorer; | 159 | unsigned long restorer; |
160 | struct rt_sigframe __user *frame; | 160 | struct rt_sigframe __user *frame; |
161 | int err = 0; | 161 | int err = 0, sig = ksig->sig; |
162 | int usig; | 162 | int usig; |
163 | 163 | ||
164 | frame = get_sigframe(ka, regs, sizeof(*frame)); | 164 | frame = get_sigframe(&ksig->ka, regs, sizeof(*frame)); |
165 | 165 | ||
166 | if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) | 166 | if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) |
167 | goto give_sigsegv; | 167 | goto err; |
168 | 168 | ||
169 | usig = current_thread_info()->exec_domain | 169 | usig = current_thread_info()->exec_domain |
170 | && current_thread_info()->exec_domain->signal_invmap | 170 | && current_thread_info()->exec_domain->signal_invmap |
@@ -173,12 +173,12 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, | |||
173 | : sig; | 173 | : sig; |
174 | 174 | ||
175 | /* Always write at least the signal number for the stack backtracer. */ | 175 | /* Always write at least the signal number for the stack backtracer. */ |
176 | if (ka->sa.sa_flags & SA_SIGINFO) { | 176 | if (ksig->ka.sa.sa_flags & SA_SIGINFO) { |
177 | /* At sigreturn time, restore the callee-save registers too. */ | 177 | /* At sigreturn time, restore the callee-save registers too. */ |
178 | err |= copy_siginfo_to_user(&frame->info, info); | 178 | err |= copy_siginfo_to_user(&frame->info, &ksig->info); |
179 | regs->flags |= PT_FLAGS_RESTORE_REGS; | 179 | regs->flags |= PT_FLAGS_RESTORE_REGS; |
180 | } else { | 180 | } else { |
181 | err |= __put_user(info->si_signo, &frame->info.si_signo); | 181 | err |= __put_user(ksig->info.si_signo, &frame->info.si_signo); |
182 | } | 182 | } |
183 | 183 | ||
184 | /* Create the ucontext. */ | 184 | /* Create the ucontext. */ |
@@ -189,11 +189,11 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, | |||
189 | err |= setup_sigcontext(&frame->uc.uc_mcontext, regs); | 189 | err |= setup_sigcontext(&frame->uc.uc_mcontext, regs); |
190 | err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)); | 190 | err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)); |
191 | if (err) | 191 | if (err) |
192 | goto give_sigsegv; | 192 | goto err; |
193 | 193 | ||
194 | restorer = VDSO_SYM(&__vdso_rt_sigreturn); | 194 | restorer = VDSO_SYM(&__vdso_rt_sigreturn); |
195 | if (ka->sa.sa_flags & SA_RESTORER) | 195 | if (ksig->ka.sa.sa_flags & SA_RESTORER) |
196 | restorer = (unsigned long) ka->sa.sa_restorer; | 196 | restorer = (unsigned long) ksig->ka.sa.sa_restorer; |
197 | 197 | ||
198 | /* | 198 | /* |
199 | * Set up registers for signal handler. | 199 | * Set up registers for signal handler. |
@@ -202,7 +202,7 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, | |||
202 | * We always pass siginfo and mcontext, regardless of SA_SIGINFO, | 202 | * We always pass siginfo and mcontext, regardless of SA_SIGINFO, |
203 | * since some things rely on this (e.g. glibc's debug/segfault.c). | 203 | * since some things rely on this (e.g. glibc's debug/segfault.c). |
204 | */ | 204 | */ |
205 | regs->pc = (unsigned long) ka->sa.sa_handler; | 205 | regs->pc = (unsigned long) ksig->ka.sa.sa_handler; |
206 | regs->ex1 = PL_ICS_EX1(USER_PL, 1); /* set crit sec in handler */ | 206 | regs->ex1 = PL_ICS_EX1(USER_PL, 1); /* set crit sec in handler */ |
207 | regs->sp = (unsigned long) frame; | 207 | regs->sp = (unsigned long) frame; |
208 | regs->lr = restorer; | 208 | regs->lr = restorer; |
@@ -212,8 +212,9 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, | |||
212 | regs->flags |= PT_FLAGS_CALLER_SAVES; | 212 | regs->flags |= PT_FLAGS_CALLER_SAVES; |
213 | return 0; | 213 | return 0; |
214 | 214 | ||
215 | give_sigsegv: | 215 | err: |
216 | signal_fault("bad setup frame", regs, frame, sig); | 216 | trace_unhandled_signal("bad sigreturn frame", regs, |
217 | (unsigned long)frame, SIGSEGV); | ||
217 | return -EFAULT; | 218 | return -EFAULT; |
218 | } | 219 | } |
219 | 220 | ||
@@ -221,9 +222,7 @@ give_sigsegv: | |||
221 | * OK, we're invoking a handler | 222 | * OK, we're invoking a handler |
222 | */ | 223 | */ |
223 | 224 | ||
224 | static void handle_signal(unsigned long sig, siginfo_t *info, | 225 | static void handle_signal(struct ksignal *ksig, struct pt_regs *regs) |
225 | struct k_sigaction *ka, | ||
226 | struct pt_regs *regs) | ||
227 | { | 226 | { |
228 | sigset_t *oldset = sigmask_to_save(); | 227 | sigset_t *oldset = sigmask_to_save(); |
229 | int ret; | 228 | int ret; |
@@ -238,7 +237,7 @@ static void handle_signal(unsigned long sig, siginfo_t *info, | |||
238 | break; | 237 | break; |
239 | 238 | ||
240 | case -ERESTARTSYS: | 239 | case -ERESTARTSYS: |
241 | if (!(ka->sa.sa_flags & SA_RESTART)) { | 240 | if (!(ksig->ka.sa.sa_flags & SA_RESTART)) { |
242 | regs->regs[0] = -EINTR; | 241 | regs->regs[0] = -EINTR; |
243 | break; | 242 | break; |
244 | } | 243 | } |
@@ -254,14 +253,12 @@ static void handle_signal(unsigned long sig, siginfo_t *info, | |||
254 | /* Set up the stack frame */ | 253 | /* Set up the stack frame */ |
255 | #ifdef CONFIG_COMPAT | 254 | #ifdef CONFIG_COMPAT |
256 | if (is_compat_task()) | 255 | if (is_compat_task()) |
257 | ret = compat_setup_rt_frame(sig, ka, info, oldset, regs); | 256 | ret = compat_setup_rt_frame(ksig, oldset, regs); |
258 | else | 257 | else |
259 | #endif | 258 | #endif |
260 | ret = setup_rt_frame(sig, ka, info, oldset, regs); | 259 | ret = setup_rt_frame(ksig, oldset, regs); |
261 | if (ret) | 260 | |
262 | return; | 261 | signal_setup_done(ret, ksig, test_thread_flag(TIF_SINGLESTEP)); |
263 | signal_delivered(sig, info, ka, regs, | ||
264 | test_thread_flag(TIF_SINGLESTEP)); | ||
265 | } | 262 | } |
266 | 263 | ||
267 | /* | 264 | /* |
@@ -271,9 +268,7 @@ static void handle_signal(unsigned long sig, siginfo_t *info, | |||
271 | */ | 268 | */ |
272 | void do_signal(struct pt_regs *regs) | 269 | void do_signal(struct pt_regs *regs) |
273 | { | 270 | { |
274 | siginfo_t info; | 271 | struct ksignal ksig; |
275 | int signr; | ||
276 | struct k_sigaction ka; | ||
277 | 272 | ||
278 | /* | 273 | /* |
279 | * i386 will check if we're coming from kernel mode and bail out | 274 | * i386 will check if we're coming from kernel mode and bail out |
@@ -282,10 +277,9 @@ void do_signal(struct pt_regs *regs) | |||
282 | * helpful, we can reinstate the check on "!user_mode(regs)". | 277 | * helpful, we can reinstate the check on "!user_mode(regs)". |
283 | */ | 278 | */ |
284 | 279 | ||
285 | signr = get_signal_to_deliver(&info, &ka, regs, NULL); | 280 | if (get_signal(&ksig)) { |
286 | if (signr > 0) { | ||
287 | /* Whee! Actually deliver the signal. */ | 281 | /* Whee! Actually deliver the signal. */ |
288 | handle_signal(signr, &info, &ka, regs); | 282 | handle_signal(&ksig, regs); |
289 | goto done; | 283 | goto done; |
290 | } | 284 | } |
291 | 285 | ||
diff --git a/arch/um/include/shared/frame_kern.h b/arch/um/include/shared/frame_kern.h index f2ca5702a4e2..a5cde5c433b4 100644 --- a/arch/um/include/shared/frame_kern.h +++ b/arch/um/include/shared/frame_kern.h | |||
@@ -6,14 +6,10 @@ | |||
6 | #ifndef __FRAME_KERN_H_ | 6 | #ifndef __FRAME_KERN_H_ |
7 | #define __FRAME_KERN_H_ | 7 | #define __FRAME_KERN_H_ |
8 | 8 | ||
9 | extern int setup_signal_stack_sc(unsigned long stack_top, int sig, | 9 | extern int setup_signal_stack_sc(unsigned long stack_top, struct ksignal *ksig, |
10 | struct k_sigaction *ka, | 10 | struct pt_regs *regs, sigset_t *mask); |
11 | struct pt_regs *regs, | 11 | extern int setup_signal_stack_si(unsigned long stack_top, struct ksignal *ksig, |
12 | sigset_t *mask); | 12 | struct pt_regs *regs, sigset_t *mask); |
13 | extern int setup_signal_stack_si(unsigned long stack_top, int sig, | ||
14 | struct k_sigaction *ka, | ||
15 | struct pt_regs *regs, struct siginfo *info, | ||
16 | sigset_t *mask); | ||
17 | 13 | ||
18 | #endif | 14 | #endif |
19 | 15 | ||
diff --git a/arch/um/kernel/signal.c b/arch/um/kernel/signal.c index f57e02e7910f..4f60e4aad790 100644 --- a/arch/um/kernel/signal.c +++ b/arch/um/kernel/signal.c | |||
@@ -18,8 +18,7 @@ EXPORT_SYMBOL(unblock_signals); | |||
18 | /* | 18 | /* |
19 | * OK, we're invoking a handler | 19 | * OK, we're invoking a handler |
20 | */ | 20 | */ |
21 | static void handle_signal(struct pt_regs *regs, unsigned long signr, | 21 | static void handle_signal(struct ksignal *ksig, struct pt_regs *regs) |
22 | struct k_sigaction *ka, struct siginfo *info) | ||
23 | { | 22 | { |
24 | sigset_t *oldset = sigmask_to_save(); | 23 | sigset_t *oldset = sigmask_to_save(); |
25 | int singlestep = 0; | 24 | int singlestep = 0; |
@@ -39,7 +38,7 @@ static void handle_signal(struct pt_regs *regs, unsigned long signr, | |||
39 | break; | 38 | break; |
40 | 39 | ||
41 | case -ERESTARTSYS: | 40 | case -ERESTARTSYS: |
42 | if (!(ka->sa.sa_flags & SA_RESTART)) { | 41 | if (!(ksig->ka.sa.sa_flags & SA_RESTART)) { |
43 | PT_REGS_SYSCALL_RET(regs) = -EINTR; | 42 | PT_REGS_SYSCALL_RET(regs) = -EINTR; |
44 | break; | 43 | break; |
45 | } | 44 | } |
@@ -52,32 +51,28 @@ static void handle_signal(struct pt_regs *regs, unsigned long signr, | |||
52 | } | 51 | } |
53 | 52 | ||
54 | sp = PT_REGS_SP(regs); | 53 | sp = PT_REGS_SP(regs); |
55 | if ((ka->sa.sa_flags & SA_ONSTACK) && (sas_ss_flags(sp) == 0)) | 54 | if ((ksig->ka.sa.sa_flags & SA_ONSTACK) && (sas_ss_flags(sp) == 0)) |
56 | sp = current->sas_ss_sp + current->sas_ss_size; | 55 | sp = current->sas_ss_sp + current->sas_ss_size; |
57 | 56 | ||
58 | #ifdef CONFIG_ARCH_HAS_SC_SIGNALS | 57 | #ifdef CONFIG_ARCH_HAS_SC_SIGNALS |
59 | if (!(ka->sa.sa_flags & SA_SIGINFO)) | 58 | if (!(ksig->ka.sa.sa_flags & SA_SIGINFO)) |
60 | err = setup_signal_stack_sc(sp, signr, ka, regs, oldset); | 59 | err = setup_signal_stack_sc(sp, ksig, regs, oldset); |
61 | else | 60 | else |
62 | #endif | 61 | #endif |
63 | err = setup_signal_stack_si(sp, signr, ka, regs, info, oldset); | 62 | err = setup_signal_stack_si(sp, ksig, regs, oldset); |
64 | 63 | ||
65 | if (err) | 64 | signal_setup_done(err, ksig, singlestep); |
66 | force_sigsegv(signr, current); | ||
67 | else | ||
68 | signal_delivered(signr, info, ka, regs, singlestep); | ||
69 | } | 65 | } |
70 | 66 | ||
71 | static int kern_do_signal(struct pt_regs *regs) | 67 | static int kern_do_signal(struct pt_regs *regs) |
72 | { | 68 | { |
73 | struct k_sigaction ka_copy; | 69 | struct ksignal ksig; |
74 | struct siginfo info; | 70 | int handled_sig = 0; |
75 | int sig, handled_sig = 0; | ||
76 | 71 | ||
77 | while ((sig = get_signal_to_deliver(&info, &ka_copy, regs, NULL)) > 0) { | 72 | while (get_signal(&ksig)) { |
78 | handled_sig = 1; | 73 | handled_sig = 1; |
79 | /* Whee! Actually deliver the signal. */ | 74 | /* Whee! Actually deliver the signal. */ |
80 | handle_signal(regs, sig, &ka_copy, &info); | 75 | handle_signal(&ksig, regs); |
81 | } | 76 | } |
82 | 77 | ||
83 | /* Did we come from a system call? */ | 78 | /* Did we come from a system call? */ |
diff --git a/arch/unicore32/kernel/signal.c b/arch/unicore32/kernel/signal.c index 6905f0ebdc77..780d77388dec 100644 --- a/arch/unicore32/kernel/signal.c +++ b/arch/unicore32/kernel/signal.c | |||
@@ -238,10 +238,10 @@ static int setup_return(struct pt_regs *regs, struct k_sigaction *ka, | |||
238 | return 0; | 238 | return 0; |
239 | } | 239 | } |
240 | 240 | ||
241 | static int setup_frame(int usig, struct k_sigaction *ka, | 241 | static int setup_frame(struct ksignal *ksig, sigset_t *set, |
242 | sigset_t *set, struct pt_regs *regs) | 242 | struct pt_regs *regs) |
243 | { | 243 | { |
244 | struct sigframe __user *frame = get_sigframe(ka, regs, sizeof(*frame)); | 244 | struct sigframe __user *frame = get_sigframe(&ksig->ka, regs, sizeof(*frame)); |
245 | int err = 0; | 245 | int err = 0; |
246 | 246 | ||
247 | if (!frame) | 247 | if (!frame) |
@@ -254,29 +254,29 @@ static int setup_frame(int usig, struct k_sigaction *ka, | |||
254 | 254 | ||
255 | err |= setup_sigframe(frame, regs, set); | 255 | err |= setup_sigframe(frame, regs, set); |
256 | if (err == 0) | 256 | if (err == 0) |
257 | err |= setup_return(regs, ka, frame->retcode, frame, usig); | 257 | err |= setup_return(regs, &ksig->ka, frame->retcode, frame, usig); |
258 | 258 | ||
259 | return err; | 259 | return err; |
260 | } | 260 | } |
261 | 261 | ||
262 | static int setup_rt_frame(int usig, struct k_sigaction *ka, siginfo_t *info, | 262 | static int setup_rt_frame(struct ksignal *ksig, sigset_t *set, |
263 | sigset_t *set, struct pt_regs *regs) | 263 | struct pt_regs *regs) |
264 | { | 264 | { |
265 | struct rt_sigframe __user *frame = | 265 | struct rt_sigframe __user *frame = |
266 | get_sigframe(ka, regs, sizeof(*frame)); | 266 | get_sigframe(&ksig->ka, regs, sizeof(*frame)); |
267 | int err = 0; | 267 | int err = 0; |
268 | 268 | ||
269 | if (!frame) | 269 | if (!frame) |
270 | return 1; | 270 | return 1; |
271 | 271 | ||
272 | err |= copy_siginfo_to_user(&frame->info, info); | 272 | err |= copy_siginfo_to_user(&frame->info, &ksig->info); |
273 | 273 | ||
274 | err |= __put_user(0, &frame->sig.uc.uc_flags); | 274 | err |= __put_user(0, &frame->sig.uc.uc_flags); |
275 | err |= __put_user(NULL, &frame->sig.uc.uc_link); | 275 | err |= __put_user(NULL, &frame->sig.uc.uc_link); |
276 | err |= __save_altstack(&frame->sig.uc.uc_stack, regs->UCreg_sp); | 276 | err |= __save_altstack(&frame->sig.uc.uc_stack, regs->UCreg_sp); |
277 | err |= setup_sigframe(&frame->sig, regs, set); | 277 | err |= setup_sigframe(&frame->sig, regs, set); |
278 | if (err == 0) | 278 | if (err == 0) |
279 | err |= setup_return(regs, ka, frame->sig.retcode, frame, usig); | 279 | err |= setup_return(regs, &ksig->ka, frame->sig.retcode, frame, usig); |
280 | 280 | ||
281 | if (err == 0) { | 281 | if (err == 0) { |
282 | /* | 282 | /* |
@@ -299,13 +299,13 @@ static inline void setup_syscall_restart(struct pt_regs *regs) | |||
299 | /* | 299 | /* |
300 | * OK, we're invoking a handler | 300 | * OK, we're invoking a handler |
301 | */ | 301 | */ |
302 | static void handle_signal(unsigned long sig, struct k_sigaction *ka, | 302 | static void handle_signal(struct ksignal *ksig, struct pt_regs *regs, |
303 | siginfo_t *info, struct pt_regs *regs, int syscall) | 303 | int syscall) |
304 | { | 304 | { |
305 | struct thread_info *thread = current_thread_info(); | 305 | struct thread_info *thread = current_thread_info(); |
306 | struct task_struct *tsk = current; | 306 | struct task_struct *tsk = current; |
307 | sigset_t *oldset = sigmask_to_save(); | 307 | sigset_t *oldset = sigmask_to_save(); |
308 | int usig = sig; | 308 | int usig = ksig->sig; |
309 | int ret; | 309 | int ret; |
310 | 310 | ||
311 | /* | 311 | /* |
@@ -318,7 +318,7 @@ static void handle_signal(unsigned long sig, struct k_sigaction *ka, | |||
318 | regs->UCreg_00 = -EINTR; | 318 | regs->UCreg_00 = -EINTR; |
319 | break; | 319 | break; |
320 | case -ERESTARTSYS: | 320 | case -ERESTARTSYS: |
321 | if (!(ka->sa.sa_flags & SA_RESTART)) { | 321 | if (!(ksig->ka.sa.sa_flags & SA_RESTART)) { |
322 | regs->UCreg_00 = -EINTR; | 322 | regs->UCreg_00 = -EINTR; |
323 | break; | 323 | break; |
324 | } | 324 | } |
@@ -338,22 +338,17 @@ static void handle_signal(unsigned long sig, struct k_sigaction *ka, | |||
338 | /* | 338 | /* |
339 | * Set up the stack frame | 339 | * Set up the stack frame |
340 | */ | 340 | */ |
341 | if (ka->sa.sa_flags & SA_SIGINFO) | 341 | if (ksig->ka.sa.sa_flags & SA_SIGINFO) |
342 | ret = setup_rt_frame(usig, ka, info, oldset, regs); | 342 | ret = setup_rt_frame(ksig, oldset, regs); |
343 | else | 343 | else |
344 | ret = setup_frame(usig, ka, oldset, regs); | 344 | ret = setup_frame(ksig, oldset, regs); |
345 | 345 | ||
346 | /* | 346 | /* |
347 | * Check that the resulting registers are actually sane. | 347 | * Check that the resulting registers are actually sane. |
348 | */ | 348 | */ |
349 | ret |= !valid_user_regs(regs); | 349 | ret |= !valid_user_regs(regs); |
350 | 350 | ||
351 | if (ret != 0) { | 351 | signal_setup_done(ret, ksig, 0); |
352 | force_sigsegv(sig, tsk); | ||
353 | return; | ||
354 | } | ||
355 | |||
356 | signal_delivered(sig, info, ka, regs, 0); | ||
357 | } | 352 | } |
358 | 353 | ||
359 | /* | 354 | /* |
@@ -367,9 +362,7 @@ static void handle_signal(unsigned long sig, struct k_sigaction *ka, | |||
367 | */ | 362 | */ |
368 | static void do_signal(struct pt_regs *regs, int syscall) | 363 | static void do_signal(struct pt_regs *regs, int syscall) |
369 | { | 364 | { |
370 | struct k_sigaction ka; | 365 | struct ksignal ksig; |
371 | siginfo_t info; | ||
372 | int signr; | ||
373 | 366 | ||
374 | /* | 367 | /* |
375 | * We want the common case to go fast, which | 368 | * We want the common case to go fast, which |
@@ -380,9 +373,8 @@ static void do_signal(struct pt_regs *regs, int syscall) | |||
380 | if (!user_mode(regs)) | 373 | if (!user_mode(regs)) |
381 | return; | 374 | return; |
382 | 375 | ||
383 | signr = get_signal_to_deliver(&info, &ka, regs, NULL); | 376 | if (get_signsl(&ksig)) { |
384 | if (signr > 0) { | 377 | handle_signal(&ksig, regs, syscall); |
385 | handle_signal(signr, &ka, &info, regs, syscall); | ||
386 | return; | 378 | return; |
387 | } | 379 | } |
388 | 380 | ||
diff --git a/arch/x86/um/signal.c b/arch/x86/um/signal.c index 5e04a1c899fa..79d824551c1a 100644 --- a/arch/x86/um/signal.c +++ b/arch/x86/um/signal.c | |||
@@ -370,13 +370,12 @@ struct rt_sigframe | |||
370 | char retcode[8]; | 370 | char retcode[8]; |
371 | }; | 371 | }; |
372 | 372 | ||
373 | int setup_signal_stack_sc(unsigned long stack_top, int sig, | 373 | int setup_signal_stack_sc(unsigned long stack_top, struct ksignal *ksig, |
374 | struct k_sigaction *ka, struct pt_regs *regs, | 374 | struct pt_regs *regs, sigset_t *mask) |
375 | sigset_t *mask) | ||
376 | { | 375 | { |
377 | struct sigframe __user *frame; | 376 | struct sigframe __user *frame; |
378 | void __user *restorer; | 377 | void __user *restorer; |
379 | int err = 0; | 378 | int err = 0, sig = ksig->sig; |
380 | 379 | ||
381 | /* This is the same calculation as i386 - ((sp + 4) & 15) == 0 */ | 380 | /* This is the same calculation as i386 - ((sp + 4) & 15) == 0 */ |
382 | stack_top = ((stack_top + 4) & -16UL) - 4; | 381 | stack_top = ((stack_top + 4) & -16UL) - 4; |
@@ -385,8 +384,8 @@ int setup_signal_stack_sc(unsigned long stack_top, int sig, | |||
385 | return 1; | 384 | return 1; |
386 | 385 | ||
387 | restorer = frame->retcode; | 386 | restorer = frame->retcode; |
388 | if (ka->sa.sa_flags & SA_RESTORER) | 387 | if (ksig->ka.sa.sa_flags & SA_RESTORER) |
389 | restorer = ka->sa.sa_restorer; | 388 | restorer = ksig->ka.sa.sa_restorer; |
390 | 389 | ||
391 | err |= __put_user(restorer, &frame->pretcode); | 390 | err |= __put_user(restorer, &frame->pretcode); |
392 | err |= __put_user(sig, &frame->sig); | 391 | err |= __put_user(sig, &frame->sig); |
@@ -410,20 +409,19 @@ int setup_signal_stack_sc(unsigned long stack_top, int sig, | |||
410 | return err; | 409 | return err; |
411 | 410 | ||
412 | PT_REGS_SP(regs) = (unsigned long) frame; | 411 | PT_REGS_SP(regs) = (unsigned long) frame; |
413 | PT_REGS_IP(regs) = (unsigned long) ka->sa.sa_handler; | 412 | PT_REGS_IP(regs) = (unsigned long) ksig->ka.sa.sa_handler; |
414 | PT_REGS_AX(regs) = (unsigned long) sig; | 413 | PT_REGS_AX(regs) = (unsigned long) sig; |
415 | PT_REGS_DX(regs) = (unsigned long) 0; | 414 | PT_REGS_DX(regs) = (unsigned long) 0; |
416 | PT_REGS_CX(regs) = (unsigned long) 0; | 415 | PT_REGS_CX(regs) = (unsigned long) 0; |
417 | return 0; | 416 | return 0; |
418 | } | 417 | } |
419 | 418 | ||
420 | int setup_signal_stack_si(unsigned long stack_top, int sig, | 419 | int setup_signal_stack_si(unsigned long stack_top, struct ksignal *ksig, |
421 | struct k_sigaction *ka, struct pt_regs *regs, | 420 | struct pt_regs *regs, sigset_t *mask) |
422 | siginfo_t *info, sigset_t *mask) | ||
423 | { | 421 | { |
424 | struct rt_sigframe __user *frame; | 422 | struct rt_sigframe __user *frame; |
425 | void __user *restorer; | 423 | void __user *restorer; |
426 | int err = 0; | 424 | int err = 0, sig = ksig->sig; |
427 | 425 | ||
428 | stack_top &= -8UL; | 426 | stack_top &= -8UL; |
429 | frame = (struct rt_sigframe __user *) stack_top - 1; | 427 | frame = (struct rt_sigframe __user *) stack_top - 1; |
@@ -431,14 +429,14 @@ int setup_signal_stack_si(unsigned long stack_top, int sig, | |||
431 | return 1; | 429 | return 1; |
432 | 430 | ||
433 | restorer = frame->retcode; | 431 | restorer = frame->retcode; |
434 | if (ka->sa.sa_flags & SA_RESTORER) | 432 | if (ksig->ka.sa.sa_flags & SA_RESTORER) |
435 | restorer = ka->sa.sa_restorer; | 433 | restorer = ksig->ka.sa.sa_restorer; |
436 | 434 | ||
437 | err |= __put_user(restorer, &frame->pretcode); | 435 | err |= __put_user(restorer, &frame->pretcode); |
438 | err |= __put_user(sig, &frame->sig); | 436 | err |= __put_user(sig, &frame->sig); |
439 | err |= __put_user(&frame->info, &frame->pinfo); | 437 | err |= __put_user(&frame->info, &frame->pinfo); |
440 | err |= __put_user(&frame->uc, &frame->puc); | 438 | err |= __put_user(&frame->uc, &frame->puc); |
441 | err |= copy_siginfo_to_user(&frame->info, info); | 439 | err |= copy_siginfo_to_user(&frame->info, &ksig->info); |
442 | err |= copy_ucontext_to_user(&frame->uc, &frame->fpstate, mask, | 440 | err |= copy_ucontext_to_user(&frame->uc, &frame->fpstate, mask, |
443 | PT_REGS_SP(regs)); | 441 | PT_REGS_SP(regs)); |
444 | 442 | ||
@@ -457,7 +455,7 @@ int setup_signal_stack_si(unsigned long stack_top, int sig, | |||
457 | return err; | 455 | return err; |
458 | 456 | ||
459 | PT_REGS_SP(regs) = (unsigned long) frame; | 457 | PT_REGS_SP(regs) = (unsigned long) frame; |
460 | PT_REGS_IP(regs) = (unsigned long) ka->sa.sa_handler; | 458 | PT_REGS_IP(regs) = (unsigned long) ksig->ka.sa.sa_handler; |
461 | PT_REGS_AX(regs) = (unsigned long) sig; | 459 | PT_REGS_AX(regs) = (unsigned long) sig; |
462 | PT_REGS_DX(regs) = (unsigned long) &frame->info; | 460 | PT_REGS_DX(regs) = (unsigned long) &frame->info; |
463 | PT_REGS_CX(regs) = (unsigned long) &frame->uc; | 461 | PT_REGS_CX(regs) = (unsigned long) &frame->uc; |
@@ -502,12 +500,11 @@ struct rt_sigframe | |||
502 | struct _fpstate fpstate; | 500 | struct _fpstate fpstate; |
503 | }; | 501 | }; |
504 | 502 | ||
505 | int setup_signal_stack_si(unsigned long stack_top, int sig, | 503 | int setup_signal_stack_si(unsigned long stack_top, struct ksignal *ksig, |
506 | struct k_sigaction *ka, struct pt_regs * regs, | 504 | struct pt_regs *regs, sigset_t *set) |
507 | siginfo_t *info, sigset_t *set) | ||
508 | { | 505 | { |
509 | struct rt_sigframe __user *frame; | 506 | struct rt_sigframe __user *frame; |
510 | int err = 0; | 507 | int err = 0, sig = ksig->sig; |
511 | 508 | ||
512 | frame = (struct rt_sigframe __user *) | 509 | frame = (struct rt_sigframe __user *) |
513 | round_down(stack_top - sizeof(struct rt_sigframe), 16); | 510 | round_down(stack_top - sizeof(struct rt_sigframe), 16); |
@@ -517,8 +514,8 @@ int setup_signal_stack_si(unsigned long stack_top, int sig, | |||
517 | if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) | 514 | if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) |
518 | goto out; | 515 | goto out; |
519 | 516 | ||
520 | if (ka->sa.sa_flags & SA_SIGINFO) { | 517 | if (ksig->ka.sa.sa_flags & SA_SIGINFO) { |
521 | err |= copy_siginfo_to_user(&frame->info, info); | 518 | err |= copy_siginfo_to_user(&frame->info, &ksig->info); |
522 | if (err) | 519 | if (err) |
523 | goto out; | 520 | goto out; |
524 | } | 521 | } |
@@ -543,8 +540,8 @@ int setup_signal_stack_si(unsigned long stack_top, int sig, | |||
543 | * already in userspace. | 540 | * already in userspace. |
544 | */ | 541 | */ |
545 | /* x86-64 should always use SA_RESTORER. */ | 542 | /* x86-64 should always use SA_RESTORER. */ |
546 | if (ka->sa.sa_flags & SA_RESTORER) | 543 | if (ksig->ka.sa.sa_flags & SA_RESTORER) |
547 | err |= __put_user(ka->sa.sa_restorer, &frame->pretcode); | 544 | err |= __put_user(ksig->ka.sa.sa_restorer, &frame->pretcode); |
548 | else | 545 | else |
549 | /* could use a vstub here */ | 546 | /* could use a vstub here */ |
550 | return err; | 547 | return err; |
@@ -570,7 +567,7 @@ int setup_signal_stack_si(unsigned long stack_top, int sig, | |||
570 | */ | 567 | */ |
571 | PT_REGS_SI(regs) = (unsigned long) &frame->info; | 568 | PT_REGS_SI(regs) = (unsigned long) &frame->info; |
572 | PT_REGS_DX(regs) = (unsigned long) &frame->uc; | 569 | PT_REGS_DX(regs) = (unsigned long) &frame->uc; |
573 | PT_REGS_IP(regs) = (unsigned long) ka->sa.sa_handler; | 570 | PT_REGS_IP(regs) = (unsigned long) ksig->ka.sa.sa_handler; |
574 | out: | 571 | out: |
575 | return err; | 572 | return err; |
576 | } | 573 | } |
diff --git a/arch/xtensa/kernel/signal.c b/arch/xtensa/kernel/signal.c index 98b67d5f1514..4612321c73cc 100644 --- a/arch/xtensa/kernel/signal.c +++ b/arch/xtensa/kernel/signal.c | |||
@@ -331,17 +331,17 @@ gen_return_code(unsigned char *codemem) | |||
331 | } | 331 | } |
332 | 332 | ||
333 | 333 | ||
334 | static int setup_frame(int sig, struct k_sigaction *ka, siginfo_t *info, | 334 | static int setup_frame(struct ksignal *ksig, sigset_t *set, |
335 | sigset_t *set, struct pt_regs *regs) | 335 | struct pt_regs *regs) |
336 | { | 336 | { |
337 | struct rt_sigframe *frame; | 337 | struct rt_sigframe *frame; |
338 | int err = 0; | 338 | int err = 0, sig = ksig->sig; |
339 | int signal; | 339 | int signal; |
340 | unsigned long sp, ra, tp; | 340 | unsigned long sp, ra, tp; |
341 | 341 | ||
342 | sp = regs->areg[1]; | 342 | sp = regs->areg[1]; |
343 | 343 | ||
344 | if ((ka->sa.sa_flags & SA_ONSTACK) != 0 && sas_ss_flags(sp) == 0) { | 344 | if ((ksig->ka.sa.sa_flags & SA_ONSTACK) != 0 && sas_ss_flags(sp) == 0) { |
345 | sp = current->sas_ss_sp + current->sas_ss_size; | 345 | sp = current->sas_ss_sp + current->sas_ss_size; |
346 | } | 346 | } |
347 | 347 | ||
@@ -351,7 +351,7 @@ static int setup_frame(int sig, struct k_sigaction *ka, siginfo_t *info, | |||
351 | panic ("Double exception sys_sigreturn\n"); | 351 | panic ("Double exception sys_sigreturn\n"); |
352 | 352 | ||
353 | if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) { | 353 | if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) { |
354 | goto give_sigsegv; | 354 | return -EFAULT; |
355 | } | 355 | } |
356 | 356 | ||
357 | signal = current_thread_info()->exec_domain | 357 | signal = current_thread_info()->exec_domain |
@@ -360,8 +360,8 @@ static int setup_frame(int sig, struct k_sigaction *ka, siginfo_t *info, | |||
360 | ? current_thread_info()->exec_domain->signal_invmap[sig] | 360 | ? current_thread_info()->exec_domain->signal_invmap[sig] |
361 | : sig; | 361 | : sig; |
362 | 362 | ||
363 | if (ka->sa.sa_flags & SA_SIGINFO) { | 363 | if (ksig->ka.sa.sa_flags & SA_SIGINFO) { |
364 | err |= copy_siginfo_to_user(&frame->info, info); | 364 | err |= copy_siginfo_to_user(&frame->info, &ksig->info); |
365 | } | 365 | } |
366 | 366 | ||
367 | /* Create the user context. */ | 367 | /* Create the user context. */ |
@@ -372,8 +372,8 @@ static int setup_frame(int sig, struct k_sigaction *ka, siginfo_t *info, | |||
372 | err |= setup_sigcontext(frame, regs); | 372 | err |= setup_sigcontext(frame, regs); |
373 | err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)); | 373 | err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)); |
374 | 374 | ||
375 | if (ka->sa.sa_flags & SA_RESTORER) { | 375 | if (ksig->ka.sa.sa_flags & SA_RESTORER) { |
376 | ra = (unsigned long)ka->sa.sa_restorer; | 376 | ra = (unsigned long)ksig->ka.sa.sa_restorer; |
377 | } else { | 377 | } else { |
378 | 378 | ||
379 | /* Create sys_rt_sigreturn syscall in stack frame */ | 379 | /* Create sys_rt_sigreturn syscall in stack frame */ |
@@ -381,7 +381,7 @@ static int setup_frame(int sig, struct k_sigaction *ka, siginfo_t *info, | |||
381 | err |= gen_return_code(frame->retcode); | 381 | err |= gen_return_code(frame->retcode); |
382 | 382 | ||
383 | if (err) { | 383 | if (err) { |
384 | goto give_sigsegv; | 384 | return -EFAULT; |
385 | } | 385 | } |
386 | ra = (unsigned long) frame->retcode; | 386 | ra = (unsigned long) frame->retcode; |
387 | } | 387 | } |
@@ -393,7 +393,7 @@ static int setup_frame(int sig, struct k_sigaction *ka, siginfo_t *info, | |||
393 | 393 | ||
394 | /* Set up registers for signal handler; preserve the threadptr */ | 394 | /* Set up registers for signal handler; preserve the threadptr */ |
395 | tp = regs->threadptr; | 395 | tp = regs->threadptr; |
396 | start_thread(regs, (unsigned long) ka->sa.sa_handler, | 396 | start_thread(regs, (unsigned long) ksig->ka.sa.sa_handler, |
397 | (unsigned long) frame); | 397 | (unsigned long) frame); |
398 | 398 | ||
399 | /* Set up a stack frame for a call4 | 399 | /* Set up a stack frame for a call4 |
@@ -416,10 +416,6 @@ static int setup_frame(int sig, struct k_sigaction *ka, siginfo_t *info, | |||
416 | #endif | 416 | #endif |
417 | 417 | ||
418 | return 0; | 418 | return 0; |
419 | |||
420 | give_sigsegv: | ||
421 | force_sigsegv(sig, current); | ||
422 | return -EFAULT; | ||
423 | } | 419 | } |
424 | 420 | ||
425 | /* | 421 | /* |
@@ -433,15 +429,11 @@ give_sigsegv: | |||
433 | */ | 429 | */ |
434 | static void do_signal(struct pt_regs *regs) | 430 | static void do_signal(struct pt_regs *regs) |
435 | { | 431 | { |
436 | siginfo_t info; | 432 | struct ksignal ksig; |
437 | int signr; | ||
438 | struct k_sigaction ka; | ||
439 | 433 | ||
440 | task_pt_regs(current)->icountlevel = 0; | 434 | task_pt_regs(current)->icountlevel = 0; |
441 | 435 | ||
442 | signr = get_signal_to_deliver(&info, &ka, regs, NULL); | 436 | if (get_signal(&ksig)) { |
443 | |||
444 | if (signr > 0) { | ||
445 | int ret; | 437 | int ret; |
446 | 438 | ||
447 | /* Are we from a system call? */ | 439 | /* Are we from a system call? */ |
@@ -457,7 +449,7 @@ static void do_signal(struct pt_regs *regs) | |||
457 | break; | 449 | break; |
458 | 450 | ||
459 | case -ERESTARTSYS: | 451 | case -ERESTARTSYS: |
460 | if (!(ka.sa.sa_flags & SA_RESTART)) { | 452 | if (!(ksig.ka.sa.sa_flags & SA_RESTART)) { |
461 | regs->areg[2] = -EINTR; | 453 | regs->areg[2] = -EINTR; |
462 | break; | 454 | break; |
463 | } | 455 | } |
@@ -476,11 +468,8 @@ static void do_signal(struct pt_regs *regs) | |||
476 | 468 | ||
477 | /* Whee! Actually deliver the signal. */ | 469 | /* Whee! Actually deliver the signal. */ |
478 | /* Set up the stack frame */ | 470 | /* Set up the stack frame */ |
479 | ret = setup_frame(signr, &ka, &info, sigmask_to_save(), regs); | 471 | ret = setup_frame(&ksig, sigmask_to_save(), regs); |
480 | if (ret) | 472 | signal_setup_done(ret, &ksig, 0); |
481 | return; | ||
482 | |||
483 | signal_delivered(signr, &info, &ka, regs, 0); | ||
484 | if (current->ptrace & PT_SINGLESTEP) | 473 | if (current->ptrace & PT_SINGLESTEP) |
485 | task_pt_regs(current)->icountlevel = 1; | 474 | task_pt_regs(current)->icountlevel = 1; |
486 | 475 | ||
diff --git a/drivers/gpu/drm/nouveau/Kconfig b/drivers/gpu/drm/nouveau/Kconfig index 637c29a33127..40afc69a3778 100644 --- a/drivers/gpu/drm/nouveau/Kconfig +++ b/drivers/gpu/drm/nouveau/Kconfig | |||
@@ -1,5 +1,5 @@ | |||
1 | config DRM_NOUVEAU | 1 | config DRM_NOUVEAU |
2 | tristate "Nouveau (nVidia) cards" | 2 | tristate "Nouveau (NVIDIA) cards" |
3 | depends on DRM && PCI | 3 | depends on DRM && PCI |
4 | select FW_LOADER | 4 | select FW_LOADER |
5 | select DRM_KMS_HELPER | 5 | select DRM_KMS_HELPER |
@@ -23,7 +23,15 @@ config DRM_NOUVEAU | |||
23 | select THERMAL if ACPI && X86 | 23 | select THERMAL if ACPI && X86 |
24 | select ACPI_VIDEO if ACPI && X86 | 24 | select ACPI_VIDEO if ACPI && X86 |
25 | help | 25 | help |
26 | Choose this option for open-source nVidia support. | 26 | Choose this option for open-source NVIDIA support. |
27 | |||
28 | config NOUVEAU_PLATFORM_DRIVER | ||
29 | tristate "Nouveau (NVIDIA) SoC GPUs" | ||
30 | depends on DRM_NOUVEAU && ARCH_TEGRA | ||
31 | default y | ||
32 | help | ||
33 | Support for Nouveau platform driver, used for SoC GPUs as found | ||
34 | on NVIDIA Tegra K1. | ||
27 | 35 | ||
28 | config NOUVEAU_DEBUG | 36 | config NOUVEAU_DEBUG |
29 | int "Maximum debug level" | 37 | int "Maximum debug level" |
diff --git a/drivers/gpu/drm/nouveau/Makefile b/drivers/gpu/drm/nouveau/Makefile index 8b307e143632..f5d7f7ce4bc6 100644 --- a/drivers/gpu/drm/nouveau/Makefile +++ b/drivers/gpu/drm/nouveau/Makefile | |||
@@ -14,8 +14,10 @@ nouveau-y += core/core/enum.o | |||
14 | nouveau-y += core/core/event.o | 14 | nouveau-y += core/core/event.o |
15 | nouveau-y += core/core/gpuobj.o | 15 | nouveau-y += core/core/gpuobj.o |
16 | nouveau-y += core/core/handle.o | 16 | nouveau-y += core/core/handle.o |
17 | nouveau-y += core/core/ioctl.o | ||
17 | nouveau-y += core/core/mm.o | 18 | nouveau-y += core/core/mm.o |
18 | nouveau-y += core/core/namedb.o | 19 | nouveau-y += core/core/namedb.o |
20 | nouveau-y += core/core/notify.o | ||
19 | nouveau-y += core/core/object.o | 21 | nouveau-y += core/core/object.o |
20 | nouveau-y += core/core/option.o | 22 | nouveau-y += core/core/option.o |
21 | nouveau-y += core/core/parent.o | 23 | nouveau-y += core/core/parent.o |
@@ -26,6 +28,7 @@ nouveau-y += core/core/subdev.o | |||
26 | nouveau-y += core/subdev/bar/base.o | 28 | nouveau-y += core/subdev/bar/base.o |
27 | nouveau-y += core/subdev/bar/nv50.o | 29 | nouveau-y += core/subdev/bar/nv50.o |
28 | nouveau-y += core/subdev/bar/nvc0.o | 30 | nouveau-y += core/subdev/bar/nvc0.o |
31 | nouveau-y += core/subdev/bar/gk20a.o | ||
29 | nouveau-y += core/subdev/bios/base.o | 32 | nouveau-y += core/subdev/bios/base.o |
30 | nouveau-y += core/subdev/bios/bit.o | 33 | nouveau-y += core/subdev/bios/bit.o |
31 | nouveau-y += core/subdev/bios/boost.o | 34 | nouveau-y += core/subdev/bios/boost.o |
@@ -64,6 +67,7 @@ nouveau-y += core/subdev/clock/nva3.o | |||
64 | nouveau-y += core/subdev/clock/nvaa.o | 67 | nouveau-y += core/subdev/clock/nvaa.o |
65 | nouveau-y += core/subdev/clock/nvc0.o | 68 | nouveau-y += core/subdev/clock/nvc0.o |
66 | nouveau-y += core/subdev/clock/nve0.o | 69 | nouveau-y += core/subdev/clock/nve0.o |
70 | nouveau-y += core/subdev/clock/gk20a.o | ||
67 | nouveau-y += core/subdev/clock/pllnv04.o | 71 | nouveau-y += core/subdev/clock/pllnv04.o |
68 | nouveau-y += core/subdev/clock/pllnva3.o | 72 | nouveau-y += core/subdev/clock/pllnva3.o |
69 | nouveau-y += core/subdev/devinit/base.o | 73 | nouveau-y += core/subdev/devinit/base.o |
@@ -149,8 +153,10 @@ nouveau-y += core/subdev/instmem/base.o | |||
149 | nouveau-y += core/subdev/instmem/nv04.o | 153 | nouveau-y += core/subdev/instmem/nv04.o |
150 | nouveau-y += core/subdev/instmem/nv40.o | 154 | nouveau-y += core/subdev/instmem/nv40.o |
151 | nouveau-y += core/subdev/instmem/nv50.o | 155 | nouveau-y += core/subdev/instmem/nv50.o |
152 | nouveau-y += core/subdev/ltcg/gf100.o | 156 | nouveau-y += core/subdev/ltc/base.o |
153 | nouveau-y += core/subdev/ltcg/gm107.o | 157 | nouveau-y += core/subdev/ltc/gf100.o |
158 | nouveau-y += core/subdev/ltc/gk104.o | ||
159 | nouveau-y += core/subdev/ltc/gm107.o | ||
154 | nouveau-y += core/subdev/mc/base.o | 160 | nouveau-y += core/subdev/mc/base.o |
155 | nouveau-y += core/subdev/mc/nv04.o | 161 | nouveau-y += core/subdev/mc/nv04.o |
156 | nouveau-y += core/subdev/mc/nv40.o | 162 | nouveau-y += core/subdev/mc/nv40.o |
@@ -161,6 +167,7 @@ nouveau-y += core/subdev/mc/nv94.o | |||
161 | nouveau-y += core/subdev/mc/nv98.o | 167 | nouveau-y += core/subdev/mc/nv98.o |
162 | nouveau-y += core/subdev/mc/nvc0.o | 168 | nouveau-y += core/subdev/mc/nvc0.o |
163 | nouveau-y += core/subdev/mc/nvc3.o | 169 | nouveau-y += core/subdev/mc/nvc3.o |
170 | nouveau-y += core/subdev/mc/gk20a.o | ||
164 | nouveau-y += core/subdev/mxm/base.o | 171 | nouveau-y += core/subdev/mxm/base.o |
165 | nouveau-y += core/subdev/mxm/mxms.o | 172 | nouveau-y += core/subdev/mxm/mxms.o |
166 | nouveau-y += core/subdev/mxm/nv50.o | 173 | nouveau-y += core/subdev/mxm/nv50.o |
@@ -169,6 +176,7 @@ nouveau-y += core/subdev/pwr/memx.o | |||
169 | nouveau-y += core/subdev/pwr/nva3.o | 176 | nouveau-y += core/subdev/pwr/nva3.o |
170 | nouveau-y += core/subdev/pwr/nvc0.o | 177 | nouveau-y += core/subdev/pwr/nvc0.o |
171 | nouveau-y += core/subdev/pwr/nvd0.o | 178 | nouveau-y += core/subdev/pwr/nvd0.o |
179 | nouveau-y += core/subdev/pwr/gk104.o | ||
172 | nouveau-y += core/subdev/pwr/nv108.o | 180 | nouveau-y += core/subdev/pwr/nv108.o |
173 | nouveau-y += core/subdev/therm/base.o | 181 | nouveau-y += core/subdev/therm/base.o |
174 | nouveau-y += core/subdev/therm/fan.o | 182 | nouveau-y += core/subdev/therm/fan.o |
@@ -211,6 +219,7 @@ nouveau-y += core/engine/copy/nvc0.o | |||
211 | nouveau-y += core/engine/copy/nve0.o | 219 | nouveau-y += core/engine/copy/nve0.o |
212 | nouveau-y += core/engine/crypt/nv84.o | 220 | nouveau-y += core/engine/crypt/nv84.o |
213 | nouveau-y += core/engine/crypt/nv98.o | 221 | nouveau-y += core/engine/crypt/nv98.o |
222 | nouveau-y += core/engine/device/acpi.o | ||
214 | nouveau-y += core/engine/device/base.o | 223 | nouveau-y += core/engine/device/base.o |
215 | nouveau-y += core/engine/device/ctrl.o | 224 | nouveau-y += core/engine/device/ctrl.o |
216 | nouveau-y += core/engine/device/nv04.o | 225 | nouveau-y += core/engine/device/nv04.o |
@@ -270,6 +279,7 @@ nouveau-y += core/engine/graph/ctxnvd9.o | |||
270 | nouveau-y += core/engine/graph/ctxnve4.o | 279 | nouveau-y += core/engine/graph/ctxnve4.o |
271 | nouveau-y += core/engine/graph/ctxgk20a.o | 280 | nouveau-y += core/engine/graph/ctxgk20a.o |
272 | nouveau-y += core/engine/graph/ctxnvf0.o | 281 | nouveau-y += core/engine/graph/ctxnvf0.o |
282 | nouveau-y += core/engine/graph/ctxgk110b.o | ||
273 | nouveau-y += core/engine/graph/ctxnv108.o | 283 | nouveau-y += core/engine/graph/ctxnv108.o |
274 | nouveau-y += core/engine/graph/ctxgm107.o | 284 | nouveau-y += core/engine/graph/ctxgm107.o |
275 | nouveau-y += core/engine/graph/nv04.o | 285 | nouveau-y += core/engine/graph/nv04.o |
@@ -291,6 +301,7 @@ nouveau-y += core/engine/graph/nvd9.o | |||
291 | nouveau-y += core/engine/graph/nve4.o | 301 | nouveau-y += core/engine/graph/nve4.o |
292 | nouveau-y += core/engine/graph/gk20a.o | 302 | nouveau-y += core/engine/graph/gk20a.o |
293 | nouveau-y += core/engine/graph/nvf0.o | 303 | nouveau-y += core/engine/graph/nvf0.o |
304 | nouveau-y += core/engine/graph/gk110b.o | ||
294 | nouveau-y += core/engine/graph/nv108.o | 305 | nouveau-y += core/engine/graph/nv108.o |
295 | nouveau-y += core/engine/graph/gm107.o | 306 | nouveau-y += core/engine/graph/gm107.o |
296 | nouveau-y += core/engine/mpeg/nv31.o | 307 | nouveau-y += core/engine/mpeg/nv31.o |
@@ -318,11 +329,18 @@ nouveau-y += core/engine/vp/nv98.o | |||
318 | nouveau-y += core/engine/vp/nvc0.o | 329 | nouveau-y += core/engine/vp/nvc0.o |
319 | nouveau-y += core/engine/vp/nve0.o | 330 | nouveau-y += core/engine/vp/nve0.o |
320 | 331 | ||
332 | # nvif | ||
333 | nouveau-y += nvif/object.o | ||
334 | nouveau-y += nvif/client.o | ||
335 | nouveau-y += nvif/device.o | ||
336 | nouveau-y += nvif/notify.o | ||
337 | |||
321 | # drm/core | 338 | # drm/core |
322 | nouveau-y += nouveau_drm.o nouveau_chan.o nouveau_dma.o nouveau_fence.o | 339 | nouveau-y += nouveau_drm.o nouveau_chan.o nouveau_dma.o nouveau_fence.o |
323 | nouveau-y += nouveau_vga.o nouveau_agp.o | 340 | nouveau-y += nouveau_vga.o nouveau_agp.o |
324 | nouveau-y += nouveau_ttm.o nouveau_sgdma.o nouveau_bo.o nouveau_gem.o | 341 | nouveau-y += nouveau_ttm.o nouveau_sgdma.o nouveau_bo.o nouveau_gem.o |
325 | nouveau-y += nouveau_prime.o nouveau_abi16.o | 342 | nouveau-y += nouveau_prime.o nouveau_abi16.o |
343 | nouveau-y += nouveau_nvif.o nouveau_usif.o | ||
326 | nouveau-y += nv04_fence.o nv10_fence.o nv17_fence.o | 344 | nouveau-y += nv04_fence.o nv10_fence.o nv17_fence.o |
327 | nouveau-y += nv50_fence.o nv84_fence.o nvc0_fence.o | 345 | nouveau-y += nv50_fence.o nv84_fence.o nvc0_fence.o |
328 | 346 | ||
@@ -349,3 +367,6 @@ nouveau-$(CONFIG_DRM_NOUVEAU_BACKLIGHT) += nouveau_backlight.o | |||
349 | nouveau-$(CONFIG_DEBUG_FS) += nouveau_debugfs.o | 367 | nouveau-$(CONFIG_DEBUG_FS) += nouveau_debugfs.o |
350 | 368 | ||
351 | obj-$(CONFIG_DRM_NOUVEAU)+= nouveau.o | 369 | obj-$(CONFIG_DRM_NOUVEAU)+= nouveau.o |
370 | |||
371 | # platform driver | ||
372 | obj-$(CONFIG_NOUVEAU_PLATFORM_DRIVER) += nouveau_platform.o | ||
diff --git a/drivers/gpu/drm/nouveau/core/core/client.c b/drivers/gpu/drm/nouveau/core/core/client.c index 9079c0ac58e6..10598dede9e9 100644 --- a/drivers/gpu/drm/nouveau/core/core/client.c +++ b/drivers/gpu/drm/nouveau/core/core/client.c | |||
@@ -26,13 +26,167 @@ | |||
26 | #include <core/client.h> | 26 | #include <core/client.h> |
27 | #include <core/handle.h> | 27 | #include <core/handle.h> |
28 | #include <core/option.h> | 28 | #include <core/option.h> |
29 | #include <nvif/unpack.h> | ||
30 | #include <nvif/class.h> | ||
31 | |||
32 | #include <nvif/unpack.h> | ||
33 | #include <nvif/event.h> | ||
29 | 34 | ||
30 | #include <engine/device.h> | 35 | #include <engine/device.h> |
31 | 36 | ||
37 | struct nvkm_client_notify { | ||
38 | struct nouveau_client *client; | ||
39 | struct nvkm_notify n; | ||
40 | u8 version; | ||
41 | u8 size; | ||
42 | union { | ||
43 | struct nvif_notify_rep_v0 v0; | ||
44 | } rep; | ||
45 | }; | ||
46 | |||
47 | static int | ||
48 | nvkm_client_notify(struct nvkm_notify *n) | ||
49 | { | ||
50 | struct nvkm_client_notify *notify = container_of(n, typeof(*notify), n); | ||
51 | struct nouveau_client *client = notify->client; | ||
52 | return client->ntfy(¬ify->rep, notify->size, n->data, n->size); | ||
53 | } | ||
54 | |||
55 | int | ||
56 | nvkm_client_notify_put(struct nouveau_client *client, int index) | ||
57 | { | ||
58 | if (index < ARRAY_SIZE(client->notify)) { | ||
59 | if (client->notify[index]) { | ||
60 | nvkm_notify_put(&client->notify[index]->n); | ||
61 | return 0; | ||
62 | } | ||
63 | } | ||
64 | return -ENOENT; | ||
65 | } | ||
66 | |||
67 | int | ||
68 | nvkm_client_notify_get(struct nouveau_client *client, int index) | ||
69 | { | ||
70 | if (index < ARRAY_SIZE(client->notify)) { | ||
71 | if (client->notify[index]) { | ||
72 | nvkm_notify_get(&client->notify[index]->n); | ||
73 | return 0; | ||
74 | } | ||
75 | } | ||
76 | return -ENOENT; | ||
77 | } | ||
78 | |||
79 | int | ||
80 | nvkm_client_notify_del(struct nouveau_client *client, int index) | ||
81 | { | ||
82 | if (index < ARRAY_SIZE(client->notify)) { | ||
83 | if (client->notify[index]) { | ||
84 | nvkm_notify_fini(&client->notify[index]->n); | ||
85 | kfree(client->notify[index]); | ||
86 | client->notify[index] = NULL; | ||
87 | return 0; | ||
88 | } | ||
89 | } | ||
90 | return -ENOENT; | ||
91 | } | ||
92 | |||
93 | int | ||
94 | nvkm_client_notify_new(struct nouveau_client *client, | ||
95 | struct nvkm_event *event, void *data, u32 size) | ||
96 | { | ||
97 | struct nvkm_client_notify *notify; | ||
98 | union { | ||
99 | struct nvif_notify_req_v0 v0; | ||
100 | } *req = data; | ||
101 | u8 index, reply; | ||
102 | int ret; | ||
103 | |||
104 | for (index = 0; index < ARRAY_SIZE(client->notify); index++) { | ||
105 | if (!client->notify[index]) | ||
106 | break; | ||
107 | } | ||
108 | |||
109 | if (index == ARRAY_SIZE(client->notify)) | ||
110 | return -ENOSPC; | ||
111 | |||
112 | notify = kzalloc(sizeof(*notify), GFP_KERNEL); | ||
113 | if (!notify) | ||
114 | return -ENOMEM; | ||
115 | |||
116 | nv_ioctl(client, "notify new size %d\n", size); | ||
117 | if (nvif_unpack(req->v0, 0, 0, true)) { | ||
118 | nv_ioctl(client, "notify new vers %d reply %d route %02x " | ||
119 | "token %llx\n", req->v0.version, | ||
120 | req->v0.reply, req->v0.route, req->v0.token); | ||
121 | notify->version = req->v0.version; | ||
122 | notify->size = sizeof(notify->rep.v0); | ||
123 | notify->rep.v0.version = req->v0.version; | ||
124 | notify->rep.v0.route = req->v0.route; | ||
125 | notify->rep.v0.token = req->v0.token; | ||
126 | reply = req->v0.reply; | ||
127 | } | ||
128 | |||
129 | if (ret == 0) { | ||
130 | ret = nvkm_notify_init(event, nvkm_client_notify, false, | ||
131 | data, size, reply, ¬ify->n); | ||
132 | if (ret == 0) { | ||
133 | client->notify[index] = notify; | ||
134 | notify->client = client; | ||
135 | return 0; | ||
136 | } | ||
137 | } | ||
138 | |||
139 | kfree(notify); | ||
140 | return 0; | ||
141 | } | ||
142 | |||
143 | static int | ||
144 | nouveau_client_devlist(struct nouveau_object *object, void *data, u32 size) | ||
145 | { | ||
146 | union { | ||
147 | struct nv_client_devlist_v0 v0; | ||
148 | } *args = data; | ||
149 | int ret; | ||
150 | |||
151 | nv_ioctl(object, "client devlist size %d\n", size); | ||
152 | if (nvif_unpack(args->v0, 0, 0, true)) { | ||
153 | nv_ioctl(object, "client devlist vers %d count %d\n", | ||
154 | args->v0.version, args->v0.count); | ||
155 | if (size == sizeof(args->v0.device[0]) * args->v0.count) { | ||
156 | ret = nouveau_device_list(args->v0.device, | ||
157 | args->v0.count); | ||
158 | if (ret >= 0) { | ||
159 | args->v0.count = ret; | ||
160 | ret = 0; | ||
161 | } | ||
162 | } else { | ||
163 | ret = -EINVAL; | ||
164 | } | ||
165 | } | ||
166 | |||
167 | return ret; | ||
168 | } | ||
169 | |||
170 | static int | ||
171 | nouveau_client_mthd(struct nouveau_object *object, u32 mthd, | ||
172 | void *data, u32 size) | ||
173 | { | ||
174 | switch (mthd) { | ||
175 | case NV_CLIENT_DEVLIST: | ||
176 | return nouveau_client_devlist(object, data, size); | ||
177 | default: | ||
178 | break; | ||
179 | } | ||
180 | return -EINVAL; | ||
181 | } | ||
182 | |||
32 | static void | 183 | static void |
33 | nouveau_client_dtor(struct nouveau_object *object) | 184 | nouveau_client_dtor(struct nouveau_object *object) |
34 | { | 185 | { |
35 | struct nouveau_client *client = (void *)object; | 186 | struct nouveau_client *client = (void *)object; |
187 | int i; | ||
188 | for (i = 0; i < ARRAY_SIZE(client->notify); i++) | ||
189 | nvkm_client_notify_del(client, i); | ||
36 | nouveau_object_ref(NULL, &client->device); | 190 | nouveau_object_ref(NULL, &client->device); |
37 | nouveau_handle_destroy(client->root); | 191 | nouveau_handle_destroy(client->root); |
38 | nouveau_namedb_destroy(&client->base); | 192 | nouveau_namedb_destroy(&client->base); |
@@ -42,6 +196,7 @@ static struct nouveau_oclass | |||
42 | nouveau_client_oclass = { | 196 | nouveau_client_oclass = { |
43 | .ofuncs = &(struct nouveau_ofuncs) { | 197 | .ofuncs = &(struct nouveau_ofuncs) { |
44 | .dtor = nouveau_client_dtor, | 198 | .dtor = nouveau_client_dtor, |
199 | .mthd = nouveau_client_mthd, | ||
45 | }, | 200 | }, |
46 | }; | 201 | }; |
47 | 202 | ||
@@ -93,9 +248,12 @@ int | |||
93 | nouveau_client_fini(struct nouveau_client *client, bool suspend) | 248 | nouveau_client_fini(struct nouveau_client *client, bool suspend) |
94 | { | 249 | { |
95 | const char *name[2] = { "fini", "suspend" }; | 250 | const char *name[2] = { "fini", "suspend" }; |
96 | int ret; | 251 | int ret, i; |
97 | |||
98 | nv_debug(client, "%s running\n", name[suspend]); | 252 | nv_debug(client, "%s running\n", name[suspend]); |
253 | nv_debug(client, "%s notify\n", name[suspend]); | ||
254 | for (i = 0; i < ARRAY_SIZE(client->notify); i++) | ||
255 | nvkm_client_notify_put(client, i); | ||
256 | nv_debug(client, "%s object\n", name[suspend]); | ||
99 | ret = nouveau_handle_fini(client->root, suspend); | 257 | ret = nouveau_handle_fini(client->root, suspend); |
100 | nv_debug(client, "%s completed with %d\n", name[suspend], ret); | 258 | nv_debug(client, "%s completed with %d\n", name[suspend], ret); |
101 | return ret; | 259 | return ret; |
diff --git a/drivers/gpu/drm/nouveau/core/core/event.c b/drivers/gpu/drm/nouveau/core/core/event.c index ae81d3b5d8b7..0540a48c5678 100644 --- a/drivers/gpu/drm/nouveau/core/core/event.c +++ b/drivers/gpu/drm/nouveau/core/core/event.c | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright 2013 Red Hat Inc. | 2 | * Copyright 2013-2014 Red Hat Inc. |
3 | * | 3 | * |
4 | * Permission is hereby granted, free of charge, to any person obtaining a | 4 | * Permission is hereby granted, free of charge, to any person obtaining a |
5 | * copy of this software and associated documentation files (the "Software"), | 5 | * copy of this software and associated documentation files (the "Software"), |
@@ -24,173 +24,77 @@ | |||
24 | #include <core/event.h> | 24 | #include <core/event.h> |
25 | 25 | ||
26 | void | 26 | void |
27 | nouveau_event_put(struct nouveau_eventh *handler) | 27 | nvkm_event_put(struct nvkm_event *event, u32 types, int index) |
28 | { | 28 | { |
29 | struct nouveau_event *event = handler->event; | 29 | BUG_ON(!spin_is_locked(&event->refs_lock)); |
30 | unsigned long flags; | 30 | while (types) { |
31 | u32 m, t; | 31 | int type = __ffs(types); types &= ~(1 << type); |
32 | 32 | if (--event->refs[index * event->types_nr + type] == 0) { | |
33 | if (!__test_and_clear_bit(NVKM_EVENT_ENABLE, &handler->flags)) | 33 | if (event->func->fini) |
34 | return; | 34 | event->func->fini(event, 1 << type, index); |
35 | |||
36 | spin_lock_irqsave(&event->refs_lock, flags); | ||
37 | for (m = handler->types; t = __ffs(m), m; m &= ~(1 << t)) { | ||
38 | if (!--event->refs[handler->index * event->types_nr + t]) { | ||
39 | if (event->disable) | ||
40 | event->disable(event, 1 << t, handler->index); | ||
41 | } | 35 | } |
42 | |||
43 | } | 36 | } |
44 | spin_unlock_irqrestore(&event->refs_lock, flags); | ||
45 | } | 37 | } |
46 | 38 | ||
47 | void | 39 | void |
48 | nouveau_event_get(struct nouveau_eventh *handler) | 40 | nvkm_event_get(struct nvkm_event *event, u32 types, int index) |
49 | { | 41 | { |
50 | struct nouveau_event *event = handler->event; | 42 | BUG_ON(!spin_is_locked(&event->refs_lock)); |
51 | unsigned long flags; | 43 | while (types) { |
52 | u32 m, t; | 44 | int type = __ffs(types); types &= ~(1 << type); |
53 | 45 | if (++event->refs[index * event->types_nr + type] == 1) { | |
54 | if (__test_and_set_bit(NVKM_EVENT_ENABLE, &handler->flags)) | 46 | if (event->func->init) |
55 | return; | 47 | event->func->init(event, 1 << type, index); |
56 | |||
57 | spin_lock_irqsave(&event->refs_lock, flags); | ||
58 | for (m = handler->types; t = __ffs(m), m; m &= ~(1 << t)) { | ||
59 | if (!event->refs[handler->index * event->types_nr + t]++) { | ||
60 | if (event->enable) | ||
61 | event->enable(event, 1 << t, handler->index); | ||
62 | } | 48 | } |
63 | |||
64 | } | 49 | } |
65 | spin_unlock_irqrestore(&event->refs_lock, flags); | ||
66 | } | ||
67 | |||
68 | static void | ||
69 | nouveau_event_fini(struct nouveau_eventh *handler) | ||
70 | { | ||
71 | struct nouveau_event *event = handler->event; | ||
72 | unsigned long flags; | ||
73 | nouveau_event_put(handler); | ||
74 | spin_lock_irqsave(&event->list_lock, flags); | ||
75 | list_del(&handler->head); | ||
76 | spin_unlock_irqrestore(&event->list_lock, flags); | ||
77 | } | ||
78 | |||
79 | static int | ||
80 | nouveau_event_init(struct nouveau_event *event, u32 types, int index, | ||
81 | int (*func)(void *, u32, int), void *priv, | ||
82 | struct nouveau_eventh *handler) | ||
83 | { | ||
84 | unsigned long flags; | ||
85 | |||
86 | if (types & ~((1 << event->types_nr) - 1)) | ||
87 | return -EINVAL; | ||
88 | if (index >= event->index_nr) | ||
89 | return -EINVAL; | ||
90 | |||
91 | handler->event = event; | ||
92 | handler->flags = 0; | ||
93 | handler->types = types; | ||
94 | handler->index = index; | ||
95 | handler->func = func; | ||
96 | handler->priv = priv; | ||
97 | |||
98 | spin_lock_irqsave(&event->list_lock, flags); | ||
99 | list_add_tail(&handler->head, &event->list[index]); | ||
100 | spin_unlock_irqrestore(&event->list_lock, flags); | ||
101 | return 0; | ||
102 | } | ||
103 | |||
104 | int | ||
105 | nouveau_event_new(struct nouveau_event *event, u32 types, int index, | ||
106 | int (*func)(void *, u32, int), void *priv, | ||
107 | struct nouveau_eventh **phandler) | ||
108 | { | ||
109 | struct nouveau_eventh *handler; | ||
110 | int ret = -ENOMEM; | ||
111 | |||
112 | if (event->check) { | ||
113 | ret = event->check(event, types, index); | ||
114 | if (ret) | ||
115 | return ret; | ||
116 | } | ||
117 | |||
118 | handler = *phandler = kmalloc(sizeof(*handler), GFP_KERNEL); | ||
119 | if (handler) { | ||
120 | ret = nouveau_event_init(event, types, index, func, priv, handler); | ||
121 | if (ret) | ||
122 | kfree(handler); | ||
123 | } | ||
124 | |||
125 | return ret; | ||
126 | } | ||
127 | |||
128 | void | ||
129 | nouveau_event_ref(struct nouveau_eventh *handler, struct nouveau_eventh **ref) | ||
130 | { | ||
131 | BUG_ON(handler != NULL); | ||
132 | if (*ref) { | ||
133 | nouveau_event_fini(*ref); | ||
134 | kfree(*ref); | ||
135 | } | ||
136 | *ref = handler; | ||
137 | } | 50 | } |
138 | 51 | ||
139 | void | 52 | void |
140 | nouveau_event_trigger(struct nouveau_event *event, u32 types, int index) | 53 | nvkm_event_send(struct nvkm_event *event, u32 types, int index, |
54 | void *data, u32 size) | ||
141 | { | 55 | { |
142 | struct nouveau_eventh *handler; | 56 | struct nvkm_notify *notify; |
143 | unsigned long flags; | 57 | unsigned long flags; |
144 | 58 | ||
145 | if (WARN_ON(index >= event->index_nr)) | 59 | if (!event->refs || WARN_ON(index >= event->index_nr)) |
146 | return; | 60 | return; |
147 | 61 | ||
148 | spin_lock_irqsave(&event->list_lock, flags); | 62 | spin_lock_irqsave(&event->list_lock, flags); |
149 | list_for_each_entry(handler, &event->list[index], head) { | 63 | list_for_each_entry(notify, &event->list, head) { |
150 | if (!test_bit(NVKM_EVENT_ENABLE, &handler->flags)) | 64 | if (notify->index == index && (notify->types & types)) { |
151 | continue; | 65 | if (event->func->send) { |
152 | if (!(handler->types & types)) | 66 | event->func->send(data, size, notify); |
153 | continue; | 67 | continue; |
154 | if (handler->func(handler->priv, handler->types & types, index) | 68 | } |
155 | != NVKM_EVENT_DROP) | 69 | nvkm_notify_send(notify, data, size); |
156 | continue; | 70 | } |
157 | nouveau_event_put(handler); | ||
158 | } | 71 | } |
159 | spin_unlock_irqrestore(&event->list_lock, flags); | 72 | spin_unlock_irqrestore(&event->list_lock, flags); |
160 | } | 73 | } |
161 | 74 | ||
162 | void | 75 | void |
163 | nouveau_event_destroy(struct nouveau_event **pevent) | 76 | nvkm_event_fini(struct nvkm_event *event) |
164 | { | 77 | { |
165 | struct nouveau_event *event = *pevent; | 78 | if (event->refs) { |
166 | if (event) { | 79 | kfree(event->refs); |
167 | kfree(event); | 80 | event->refs = NULL; |
168 | *pevent = NULL; | ||
169 | } | 81 | } |
170 | } | 82 | } |
171 | 83 | ||
172 | int | 84 | int |
173 | nouveau_event_create(int types_nr, int index_nr, struct nouveau_event **pevent) | 85 | nvkm_event_init(const struct nvkm_event_func *func, int types_nr, int index_nr, |
86 | struct nvkm_event *event) | ||
174 | { | 87 | { |
175 | struct nouveau_event *event; | 88 | event->refs = kzalloc(sizeof(*event->refs) * index_nr * types_nr, |
176 | int i; | 89 | GFP_KERNEL); |
177 | 90 | if (!event->refs) | |
178 | event = *pevent = kzalloc(sizeof(*event) + (index_nr * types_nr) * | ||
179 | sizeof(event->refs[0]), GFP_KERNEL); | ||
180 | if (!event) | ||
181 | return -ENOMEM; | ||
182 | |||
183 | event->list = kmalloc(sizeof(*event->list) * index_nr, GFP_KERNEL); | ||
184 | if (!event->list) { | ||
185 | kfree(event); | ||
186 | return -ENOMEM; | 91 | return -ENOMEM; |
187 | } | ||
188 | 92 | ||
189 | spin_lock_init(&event->list_lock); | 93 | event->func = func; |
190 | spin_lock_init(&event->refs_lock); | ||
191 | for (i = 0; i < index_nr; i++) | ||
192 | INIT_LIST_HEAD(&event->list[i]); | ||
193 | event->types_nr = types_nr; | 94 | event->types_nr = types_nr; |
194 | event->index_nr = index_nr; | 95 | event->index_nr = index_nr; |
96 | spin_lock_init(&event->refs_lock); | ||
97 | spin_lock_init(&event->list_lock); | ||
98 | INIT_LIST_HEAD(&event->list); | ||
195 | return 0; | 99 | return 0; |
196 | } | 100 | } |
diff --git a/drivers/gpu/drm/nouveau/core/core/handle.c b/drivers/gpu/drm/nouveau/core/core/handle.c index 264c2b338ac3..a490b805d7e3 100644 --- a/drivers/gpu/drm/nouveau/core/core/handle.c +++ b/drivers/gpu/drm/nouveau/core/core/handle.c | |||
@@ -146,9 +146,7 @@ nouveau_handle_create(struct nouveau_object *parent, u32 _parent, u32 _handle, | |||
146 | } | 146 | } |
147 | 147 | ||
148 | hprintk(handle, TRACE, "created\n"); | 148 | hprintk(handle, TRACE, "created\n"); |
149 | |||
150 | *phandle = handle; | 149 | *phandle = handle; |
151 | |||
152 | return 0; | 150 | return 0; |
153 | } | 151 | } |
154 | 152 | ||
@@ -224,3 +222,116 @@ nouveau_handle_put(struct nouveau_handle *handle) | |||
224 | if (handle) | 222 | if (handle) |
225 | nouveau_namedb_put(handle); | 223 | nouveau_namedb_put(handle); |
226 | } | 224 | } |
225 | |||
226 | int | ||
227 | nouveau_handle_new(struct nouveau_object *client, u32 _parent, u32 _handle, | ||
228 | u16 _oclass, void *data, u32 size, | ||
229 | struct nouveau_object **pobject) | ||
230 | { | ||
231 | struct nouveau_object *parent = NULL; | ||
232 | struct nouveau_object *engctx = NULL; | ||
233 | struct nouveau_object *object = NULL; | ||
234 | struct nouveau_object *engine; | ||
235 | struct nouveau_oclass *oclass; | ||
236 | struct nouveau_handle *handle; | ||
237 | int ret; | ||
238 | |||
239 | /* lookup parent object and ensure it *is* a parent */ | ||
240 | parent = nouveau_handle_ref(client, _parent); | ||
241 | if (!parent) { | ||
242 | nv_error(client, "parent 0x%08x not found\n", _parent); | ||
243 | return -ENOENT; | ||
244 | } | ||
245 | |||
246 | if (!nv_iclass(parent, NV_PARENT_CLASS)) { | ||
247 | nv_error(parent, "cannot have children\n"); | ||
248 | ret = -EINVAL; | ||
249 | goto fail_class; | ||
250 | } | ||
251 | |||
252 | /* check that parent supports the requested subclass */ | ||
253 | ret = nouveau_parent_sclass(parent, _oclass, &engine, &oclass); | ||
254 | if (ret) { | ||
255 | nv_debug(parent, "illegal class 0x%04x\n", _oclass); | ||
256 | goto fail_class; | ||
257 | } | ||
258 | |||
259 | /* make sure engine init has been completed *before* any objects | ||
260 | * it controls are created - the constructors may depend on | ||
261 | * state calculated at init (ie. default context construction) | ||
262 | */ | ||
263 | if (engine) { | ||
264 | ret = nouveau_object_inc(engine); | ||
265 | if (ret) | ||
266 | goto fail_class; | ||
267 | } | ||
268 | |||
269 | /* if engine requires it, create a context object to insert | ||
270 | * between the parent and its children (eg. PGRAPH context) | ||
271 | */ | ||
272 | if (engine && nv_engine(engine)->cclass) { | ||
273 | ret = nouveau_object_ctor(parent, engine, | ||
274 | nv_engine(engine)->cclass, | ||
275 | data, size, &engctx); | ||
276 | if (ret) | ||
277 | goto fail_engctx; | ||
278 | } else { | ||
279 | nouveau_object_ref(parent, &engctx); | ||
280 | } | ||
281 | |||
282 | /* finally, create new object and bind it to its handle */ | ||
283 | ret = nouveau_object_ctor(engctx, engine, oclass, data, size, &object); | ||
284 | *pobject = object; | ||
285 | if (ret) | ||
286 | goto fail_ctor; | ||
287 | |||
288 | ret = nouveau_object_inc(object); | ||
289 | if (ret) | ||
290 | goto fail_init; | ||
291 | |||
292 | ret = nouveau_handle_create(parent, _parent, _handle, object, &handle); | ||
293 | if (ret) | ||
294 | goto fail_handle; | ||
295 | |||
296 | ret = nouveau_handle_init(handle); | ||
297 | if (ret) | ||
298 | nouveau_handle_destroy(handle); | ||
299 | |||
300 | fail_handle: | ||
301 | nouveau_object_dec(object, false); | ||
302 | fail_init: | ||
303 | nouveau_object_ref(NULL, &object); | ||
304 | fail_ctor: | ||
305 | nouveau_object_ref(NULL, &engctx); | ||
306 | fail_engctx: | ||
307 | if (engine) | ||
308 | nouveau_object_dec(engine, false); | ||
309 | fail_class: | ||
310 | nouveau_object_ref(NULL, &parent); | ||
311 | return ret; | ||
312 | } | ||
313 | |||
314 | int | ||
315 | nouveau_handle_del(struct nouveau_object *client, u32 _parent, u32 _handle) | ||
316 | { | ||
317 | struct nouveau_object *parent = NULL; | ||
318 | struct nouveau_object *namedb = NULL; | ||
319 | struct nouveau_handle *handle = NULL; | ||
320 | |||
321 | parent = nouveau_handle_ref(client, _parent); | ||
322 | if (!parent) | ||
323 | return -ENOENT; | ||
324 | |||
325 | namedb = nv_pclass(parent, NV_NAMEDB_CLASS); | ||
326 | if (namedb) { | ||
327 | handle = nouveau_namedb_get(nv_namedb(namedb), _handle); | ||
328 | if (handle) { | ||
329 | nouveau_namedb_put(handle); | ||
330 | nouveau_handle_fini(handle, false); | ||
331 | nouveau_handle_destroy(handle); | ||
332 | } | ||
333 | } | ||
334 | |||
335 | nouveau_object_ref(NULL, &parent); | ||
336 | return handle ? 0 : -EINVAL; | ||
337 | } | ||
diff --git a/drivers/gpu/drm/nouveau/core/core/ioctl.c b/drivers/gpu/drm/nouveau/core/core/ioctl.c new file mode 100644 index 000000000000..f7e19bfb489c --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/core/ioctl.c | |||
@@ -0,0 +1,531 @@ | |||
1 | /* | ||
2 | * Copyright 2014 Red Hat Inc. | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice shall be included in | ||
12 | * all copies or substantial portions of the Software. | ||
13 | * | ||
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
21 | * | ||
22 | * Authors: Ben Skeggs <bskeggs@redhat.com> | ||
23 | */ | ||
24 | |||
25 | #include <core/object.h> | ||
26 | #include <core/parent.h> | ||
27 | #include <core/handle.h> | ||
28 | #include <core/namedb.h> | ||
29 | #include <core/client.h> | ||
30 | #include <core/device.h> | ||
31 | #include <core/ioctl.h> | ||
32 | #include <core/event.h> | ||
33 | |||
34 | #include <nvif/unpack.h> | ||
35 | #include <nvif/ioctl.h> | ||
36 | |||
37 | static int | ||
38 | nvkm_ioctl_nop(struct nouveau_handle *handle, void *data, u32 size) | ||
39 | { | ||
40 | struct nouveau_object *object = handle->object; | ||
41 | union { | ||
42 | struct nvif_ioctl_nop none; | ||
43 | } *args = data; | ||
44 | int ret; | ||
45 | |||
46 | nv_ioctl(object, "nop size %d\n", size); | ||
47 | if (nvif_unvers(args->none)) { | ||
48 | nv_ioctl(object, "nop\n"); | ||
49 | } | ||
50 | |||
51 | return ret; | ||
52 | } | ||
53 | |||
54 | static int | ||
55 | nvkm_ioctl_sclass(struct nouveau_handle *handle, void *data, u32 size) | ||
56 | { | ||
57 | struct nouveau_object *object = handle->object; | ||
58 | union { | ||
59 | struct nvif_ioctl_sclass_v0 v0; | ||
60 | } *args = data; | ||
61 | int ret; | ||
62 | |||
63 | if (!nv_iclass(object, NV_PARENT_CLASS)) { | ||
64 | nv_debug(object, "cannot have children (sclass)\n"); | ||
65 | return -ENODEV; | ||
66 | } | ||
67 | |||
68 | nv_ioctl(object, "sclass size %d\n", size); | ||
69 | if (nvif_unpack(args->v0, 0, 0, true)) { | ||
70 | nv_ioctl(object, "sclass vers %d count %d\n", | ||
71 | args->v0.version, args->v0.count); | ||
72 | if (size == args->v0.count * sizeof(args->v0.oclass[0])) { | ||
73 | ret = nouveau_parent_lclass(object, args->v0.oclass, | ||
74 | args->v0.count); | ||
75 | if (ret >= 0) { | ||
76 | args->v0.count = ret; | ||
77 | ret = 0; | ||
78 | } | ||
79 | } else { | ||
80 | ret = -EINVAL; | ||
81 | } | ||
82 | } | ||
83 | |||
84 | return ret; | ||
85 | } | ||
86 | |||
87 | static int | ||
88 | nvkm_ioctl_new(struct nouveau_handle *parent, void *data, u32 size) | ||
89 | { | ||
90 | union { | ||
91 | struct nvif_ioctl_new_v0 v0; | ||
92 | } *args = data; | ||
93 | struct nouveau_client *client = nouveau_client(parent->object); | ||
94 | struct nouveau_object *engctx = NULL; | ||
95 | struct nouveau_object *object = NULL; | ||
96 | struct nouveau_object *engine; | ||
97 | struct nouveau_oclass *oclass; | ||
98 | struct nouveau_handle *handle; | ||
99 | u32 _handle, _oclass; | ||
100 | int ret; | ||
101 | |||
102 | nv_ioctl(client, "new size %d\n", size); | ||
103 | if (nvif_unpack(args->v0, 0, 0, true)) { | ||
104 | _handle = args->v0.handle; | ||
105 | _oclass = args->v0.oclass; | ||
106 | } else | ||
107 | return ret; | ||
108 | |||
109 | nv_ioctl(client, "new vers %d handle %08x class %08x " | ||
110 | "route %02x token %llx\n", | ||
111 | args->v0.version, _handle, _oclass, | ||
112 | args->v0.route, args->v0.token); | ||
113 | |||
114 | if (!nv_iclass(parent->object, NV_PARENT_CLASS)) { | ||
115 | nv_debug(parent->object, "cannot have children (ctor)\n"); | ||
116 | ret = -ENODEV; | ||
117 | goto fail_class; | ||
118 | } | ||
119 | |||
120 | /* check that parent supports the requested subclass */ | ||
121 | ret = nouveau_parent_sclass(parent->object, _oclass, &engine, &oclass); | ||
122 | if (ret) { | ||
123 | nv_debug(parent->object, "illegal class 0x%04x\n", _oclass); | ||
124 | goto fail_class; | ||
125 | } | ||
126 | |||
127 | /* make sure engine init has been completed *before* any objects | ||
128 | * it controls are created - the constructors may depend on | ||
129 | * state calculated at init (ie. default context construction) | ||
130 | */ | ||
131 | if (engine) { | ||
132 | ret = nouveau_object_inc(engine); | ||
133 | if (ret) | ||
134 | goto fail_class; | ||
135 | } | ||
136 | |||
137 | /* if engine requires it, create a context object to insert | ||
138 | * between the parent and its children (eg. PGRAPH context) | ||
139 | */ | ||
140 | if (engine && nv_engine(engine)->cclass) { | ||
141 | ret = nouveau_object_ctor(parent->object, engine, | ||
142 | nv_engine(engine)->cclass, | ||
143 | data, size, &engctx); | ||
144 | if (ret) | ||
145 | goto fail_engctx; | ||
146 | } else { | ||
147 | nouveau_object_ref(parent->object, &engctx); | ||
148 | } | ||
149 | |||
150 | /* finally, create new object and bind it to its handle */ | ||
151 | ret = nouveau_object_ctor(engctx, engine, oclass, data, size, &object); | ||
152 | client->data = object; | ||
153 | if (ret) | ||
154 | goto fail_ctor; | ||
155 | |||
156 | ret = nouveau_object_inc(object); | ||
157 | if (ret) | ||
158 | goto fail_init; | ||
159 | |||
160 | ret = nouveau_handle_create(parent->object, parent->name, | ||
161 | _handle, object, &handle); | ||
162 | if (ret) | ||
163 | goto fail_handle; | ||
164 | |||
165 | ret = nouveau_handle_init(handle); | ||
166 | handle->route = args->v0.route; | ||
167 | handle->token = args->v0.token; | ||
168 | if (ret) | ||
169 | nouveau_handle_destroy(handle); | ||
170 | |||
171 | fail_handle: | ||
172 | nouveau_object_dec(object, false); | ||
173 | fail_init: | ||
174 | nouveau_object_ref(NULL, &object); | ||
175 | fail_ctor: | ||
176 | nouveau_object_ref(NULL, &engctx); | ||
177 | fail_engctx: | ||
178 | if (engine) | ||
179 | nouveau_object_dec(engine, false); | ||
180 | fail_class: | ||
181 | return ret; | ||
182 | } | ||
183 | |||
184 | static int | ||
185 | nvkm_ioctl_del(struct nouveau_handle *handle, void *data, u32 size) | ||
186 | { | ||
187 | struct nouveau_object *object = handle->object; | ||
188 | union { | ||
189 | struct nvif_ioctl_del none; | ||
190 | } *args = data; | ||
191 | int ret; | ||
192 | |||
193 | nv_ioctl(object, "delete size %d\n", size); | ||
194 | if (nvif_unvers(args->none)) { | ||
195 | nv_ioctl(object, "delete\n"); | ||
196 | nouveau_handle_fini(handle, false); | ||
197 | nouveau_handle_destroy(handle); | ||
198 | } | ||
199 | |||
200 | return ret; | ||
201 | } | ||
202 | |||
203 | static int | ||
204 | nvkm_ioctl_mthd(struct nouveau_handle *handle, void *data, u32 size) | ||
205 | { | ||
206 | struct nouveau_object *object = handle->object; | ||
207 | struct nouveau_ofuncs *ofuncs = object->oclass->ofuncs; | ||
208 | union { | ||
209 | struct nvif_ioctl_mthd_v0 v0; | ||
210 | } *args = data; | ||
211 | int ret; | ||
212 | |||
213 | nv_ioctl(object, "mthd size %d\n", size); | ||
214 | if (nvif_unpack(args->v0, 0, 0, true)) { | ||
215 | nv_ioctl(object, "mthd vers %d mthd %02x\n", | ||
216 | args->v0.version, args->v0.method); | ||
217 | if (ret = -ENODEV, ofuncs->mthd) | ||
218 | ret = ofuncs->mthd(object, args->v0.method, data, size); | ||
219 | } | ||
220 | |||
221 | return ret; | ||
222 | } | ||
223 | |||
224 | |||
225 | static int | ||
226 | nvkm_ioctl_rd(struct nouveau_handle *handle, void *data, u32 size) | ||
227 | { | ||
228 | struct nouveau_object *object = handle->object; | ||
229 | struct nouveau_ofuncs *ofuncs = object->oclass->ofuncs; | ||
230 | union { | ||
231 | struct nvif_ioctl_rd_v0 v0; | ||
232 | } *args = data; | ||
233 | int ret; | ||
234 | |||
235 | nv_ioctl(object, "rd size %d\n", size); | ||
236 | if (nvif_unpack(args->v0, 0, 0, false)) { | ||
237 | nv_ioctl(object, "rd vers %d size %d addr %016llx\n", | ||
238 | args->v0.version, args->v0.size, args->v0.addr); | ||
239 | switch (args->v0.size) { | ||
240 | case 1: | ||
241 | if (ret = -ENODEV, ofuncs->rd08) { | ||
242 | args->v0.data = nv_ro08(object, args->v0.addr); | ||
243 | ret = 0; | ||
244 | } | ||
245 | break; | ||
246 | case 2: | ||
247 | if (ret = -ENODEV, ofuncs->rd16) { | ||
248 | args->v0.data = nv_ro16(object, args->v0.addr); | ||
249 | ret = 0; | ||
250 | } | ||
251 | break; | ||
252 | case 4: | ||
253 | if (ret = -ENODEV, ofuncs->rd32) { | ||
254 | args->v0.data = nv_ro32(object, args->v0.addr); | ||
255 | ret = 0; | ||
256 | } | ||
257 | break; | ||
258 | default: | ||
259 | ret = -EINVAL; | ||
260 | break; | ||
261 | } | ||
262 | } | ||
263 | |||
264 | return ret; | ||
265 | } | ||
266 | |||
267 | static int | ||
268 | nvkm_ioctl_wr(struct nouveau_handle *handle, void *data, u32 size) | ||
269 | { | ||
270 | struct nouveau_object *object = handle->object; | ||
271 | struct nouveau_ofuncs *ofuncs = object->oclass->ofuncs; | ||
272 | union { | ||
273 | struct nvif_ioctl_wr_v0 v0; | ||
274 | } *args = data; | ||
275 | int ret; | ||
276 | |||
277 | nv_ioctl(object, "wr size %d\n", size); | ||
278 | if (nvif_unpack(args->v0, 0, 0, false)) { | ||
279 | nv_ioctl(object, "wr vers %d size %d addr %016llx data %08x\n", | ||
280 | args->v0.version, args->v0.size, args->v0.addr, | ||
281 | args->v0.data); | ||
282 | switch (args->v0.size) { | ||
283 | case 1: | ||
284 | if (ret = -ENODEV, ofuncs->wr08) { | ||
285 | nv_wo08(object, args->v0.addr, args->v0.data); | ||
286 | ret = 0; | ||
287 | } | ||
288 | break; | ||
289 | case 2: | ||
290 | if (ret = -ENODEV, ofuncs->wr16) { | ||
291 | nv_wo16(object, args->v0.addr, args->v0.data); | ||
292 | ret = 0; | ||
293 | } | ||
294 | break; | ||
295 | case 4: | ||
296 | if (ret = -ENODEV, ofuncs->wr32) { | ||
297 | nv_wo32(object, args->v0.addr, args->v0.data); | ||
298 | ret = 0; | ||
299 | } | ||
300 | break; | ||
301 | default: | ||
302 | ret = -EINVAL; | ||
303 | break; | ||
304 | } | ||
305 | } | ||
306 | |||
307 | return ret; | ||
308 | } | ||
309 | |||
310 | static int | ||
311 | nvkm_ioctl_map(struct nouveau_handle *handle, void *data, u32 size) | ||
312 | { | ||
313 | struct nouveau_object *object = handle->object; | ||
314 | struct nouveau_ofuncs *ofuncs = object->oclass->ofuncs; | ||
315 | union { | ||
316 | struct nvif_ioctl_map_v0 v0; | ||
317 | } *args = data; | ||
318 | int ret; | ||
319 | |||
320 | nv_ioctl(object, "map size %d\n", size); | ||
321 | if (nvif_unpack(args->v0, 0, 0, false)) { | ||
322 | nv_ioctl(object, "map vers %d\n", args->v0.version); | ||
323 | if (ret = -ENODEV, ofuncs->map) { | ||
324 | ret = ofuncs->map(object, &args->v0.handle, | ||
325 | &args->v0.length); | ||
326 | } | ||
327 | } | ||
328 | |||
329 | return ret; | ||
330 | } | ||
331 | |||
332 | static int | ||
333 | nvkm_ioctl_unmap(struct nouveau_handle *handle, void *data, u32 size) | ||
334 | { | ||
335 | struct nouveau_object *object = handle->object; | ||
336 | union { | ||
337 | struct nvif_ioctl_unmap none; | ||
338 | } *args = data; | ||
339 | int ret; | ||
340 | |||
341 | nv_ioctl(object, "unmap size %d\n", size); | ||
342 | if (nvif_unvers(args->none)) { | ||
343 | nv_ioctl(object, "unmap\n"); | ||
344 | } | ||
345 | |||
346 | return ret; | ||
347 | } | ||
348 | |||
349 | static int | ||
350 | nvkm_ioctl_ntfy_new(struct nouveau_handle *handle, void *data, u32 size) | ||
351 | { | ||
352 | struct nouveau_client *client = nouveau_client(handle->object); | ||
353 | struct nouveau_object *object = handle->object; | ||
354 | struct nouveau_ofuncs *ofuncs = object->oclass->ofuncs; | ||
355 | union { | ||
356 | struct nvif_ioctl_ntfy_new_v0 v0; | ||
357 | } *args = data; | ||
358 | struct nvkm_event *event; | ||
359 | int ret; | ||
360 | |||
361 | nv_ioctl(object, "ntfy new size %d\n", size); | ||
362 | if (nvif_unpack(args->v0, 0, 0, true)) { | ||
363 | nv_ioctl(object, "ntfy new vers %d event %02x\n", | ||
364 | args->v0.version, args->v0.event); | ||
365 | if (ret = -ENODEV, ofuncs->ntfy) | ||
366 | ret = ofuncs->ntfy(object, args->v0.event, &event); | ||
367 | if (ret == 0) { | ||
368 | ret = nvkm_client_notify_new(client, event, data, size); | ||
369 | if (ret >= 0) { | ||
370 | args->v0.index = ret; | ||
371 | ret = 0; | ||
372 | } | ||
373 | } | ||
374 | } | ||
375 | |||
376 | return ret; | ||
377 | } | ||
378 | |||
379 | static int | ||
380 | nvkm_ioctl_ntfy_del(struct nouveau_handle *handle, void *data, u32 size) | ||
381 | { | ||
382 | struct nouveau_client *client = nouveau_client(handle->object); | ||
383 | struct nouveau_object *object = handle->object; | ||
384 | union { | ||
385 | struct nvif_ioctl_ntfy_del_v0 v0; | ||
386 | } *args = data; | ||
387 | int ret; | ||
388 | |||
389 | nv_ioctl(object, "ntfy del size %d\n", size); | ||
390 | if (nvif_unpack(args->v0, 0, 0, false)) { | ||
391 | nv_ioctl(object, "ntfy del vers %d index %d\n", | ||
392 | args->v0.version, args->v0.index); | ||
393 | ret = nvkm_client_notify_del(client, args->v0.index); | ||
394 | } | ||
395 | |||
396 | return ret; | ||
397 | } | ||
398 | |||
399 | static int | ||
400 | nvkm_ioctl_ntfy_get(struct nouveau_handle *handle, void *data, u32 size) | ||
401 | { | ||
402 | struct nouveau_client *client = nouveau_client(handle->object); | ||
403 | struct nouveau_object *object = handle->object; | ||
404 | union { | ||
405 | struct nvif_ioctl_ntfy_get_v0 v0; | ||
406 | } *args = data; | ||
407 | int ret; | ||
408 | |||
409 | nv_ioctl(object, "ntfy get size %d\n", size); | ||
410 | if (nvif_unpack(args->v0, 0, 0, false)) { | ||
411 | nv_ioctl(object, "ntfy get vers %d index %d\n", | ||
412 | args->v0.version, args->v0.index); | ||
413 | ret = nvkm_client_notify_get(client, args->v0.index); | ||
414 | } | ||
415 | |||
416 | return ret; | ||
417 | } | ||
418 | |||
419 | static int | ||
420 | nvkm_ioctl_ntfy_put(struct nouveau_handle *handle, void *data, u32 size) | ||
421 | { | ||
422 | struct nouveau_client *client = nouveau_client(handle->object); | ||
423 | struct nouveau_object *object = handle->object; | ||
424 | union { | ||
425 | struct nvif_ioctl_ntfy_put_v0 v0; | ||
426 | } *args = data; | ||
427 | int ret; | ||
428 | |||
429 | nv_ioctl(object, "ntfy put size %d\n", size); | ||
430 | if (nvif_unpack(args->v0, 0, 0, false)) { | ||
431 | nv_ioctl(object, "ntfy put vers %d index %d\n", | ||
432 | args->v0.version, args->v0.index); | ||
433 | ret = nvkm_client_notify_put(client, args->v0.index); | ||
434 | } | ||
435 | |||
436 | return ret; | ||
437 | } | ||
438 | |||
439 | static struct { | ||
440 | int version; | ||
441 | int (*func)(struct nouveau_handle *, void *, u32); | ||
442 | } | ||
443 | nvkm_ioctl_v0[] = { | ||
444 | { 0x00, nvkm_ioctl_nop }, | ||
445 | { 0x00, nvkm_ioctl_sclass }, | ||
446 | { 0x00, nvkm_ioctl_new }, | ||
447 | { 0x00, nvkm_ioctl_del }, | ||
448 | { 0x00, nvkm_ioctl_mthd }, | ||
449 | { 0x00, nvkm_ioctl_rd }, | ||
450 | { 0x00, nvkm_ioctl_wr }, | ||
451 | { 0x00, nvkm_ioctl_map }, | ||
452 | { 0x00, nvkm_ioctl_unmap }, | ||
453 | { 0x00, nvkm_ioctl_ntfy_new }, | ||
454 | { 0x00, nvkm_ioctl_ntfy_del }, | ||
455 | { 0x00, nvkm_ioctl_ntfy_get }, | ||
456 | { 0x00, nvkm_ioctl_ntfy_put }, | ||
457 | }; | ||
458 | |||
459 | static int | ||
460 | nvkm_ioctl_path(struct nouveau_handle *parent, u32 type, u32 nr, | ||
461 | u32 *path, void *data, u32 size, | ||
462 | u8 owner, u8 *route, u64 *token) | ||
463 | { | ||
464 | struct nouveau_handle *handle = parent; | ||
465 | struct nouveau_namedb *namedb; | ||
466 | struct nouveau_object *object; | ||
467 | int ret; | ||
468 | |||
469 | while ((object = parent->object), nr--) { | ||
470 | nv_ioctl(object, "path 0x%08x\n", path[nr]); | ||
471 | if (!nv_iclass(object, NV_PARENT_CLASS)) { | ||
472 | nv_debug(object, "cannot have children (path)\n"); | ||
473 | return -EINVAL; | ||
474 | } | ||
475 | |||
476 | if (!(namedb = (void *)nv_pclass(object, NV_NAMEDB_CLASS)) || | ||
477 | !(handle = nouveau_namedb_get(namedb, path[nr]))) { | ||
478 | nv_debug(object, "handle 0x%08x not found\n", path[nr]); | ||
479 | return -ENOENT; | ||
480 | } | ||
481 | nouveau_namedb_put(handle); | ||
482 | parent = handle; | ||
483 | } | ||
484 | |||
485 | if (owner != NVIF_IOCTL_V0_OWNER_ANY && | ||
486 | owner != handle->route) { | ||
487 | nv_ioctl(object, "object route != owner\n"); | ||
488 | return -EACCES; | ||
489 | } | ||
490 | *route = handle->route; | ||
491 | *token = handle->token; | ||
492 | |||
493 | if (ret = -EINVAL, type < ARRAY_SIZE(nvkm_ioctl_v0)) { | ||
494 | if (nvkm_ioctl_v0[type].version == 0) { | ||
495 | ret = nvkm_ioctl_v0[type].func(handle, data, size); | ||
496 | } | ||
497 | } | ||
498 | |||
499 | return ret; | ||
500 | } | ||
501 | |||
502 | int | ||
503 | nvkm_ioctl(struct nouveau_client *client, bool supervisor, | ||
504 | void *data, u32 size, void **hack) | ||
505 | { | ||
506 | union { | ||
507 | struct nvif_ioctl_v0 v0; | ||
508 | } *args = data; | ||
509 | int ret; | ||
510 | |||
511 | client->super = supervisor; | ||
512 | nv_ioctl(client, "size %d\n", size); | ||
513 | |||
514 | if (nvif_unpack(args->v0, 0, 0, true)) { | ||
515 | nv_ioctl(client, "vers %d type %02x path %d owner %02x\n", | ||
516 | args->v0.version, args->v0.type, args->v0.path_nr, | ||
517 | args->v0.owner); | ||
518 | ret = nvkm_ioctl_path(client->root, args->v0.type, | ||
519 | args->v0.path_nr, args->v0.path, | ||
520 | data, size, args->v0.owner, | ||
521 | &args->v0.route, &args->v0.token); | ||
522 | } | ||
523 | |||
524 | nv_ioctl(client, "return %d\n", ret); | ||
525 | if (hack) { | ||
526 | *hack = client->data; | ||
527 | client->data = NULL; | ||
528 | } | ||
529 | client->super = false; | ||
530 | return ret; | ||
531 | } | ||
diff --git a/drivers/gpu/drm/nouveau/core/core/notify.c b/drivers/gpu/drm/nouveau/core/core/notify.c new file mode 100644 index 000000000000..76adb81bdea2 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/core/notify.c | |||
@@ -0,0 +1,167 @@ | |||
1 | /* | ||
2 | * Copyright 2014 Red Hat Inc. | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice shall be included in | ||
12 | * all copies or substantial portions of the Software. | ||
13 | * | ||
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
21 | * | ||
22 | * Authors: Ben Skeggs <bskeggs@redhat.com> | ||
23 | */ | ||
24 | |||
25 | #include <core/client.h> | ||
26 | #include <core/event.h> | ||
27 | #include <core/notify.h> | ||
28 | |||
29 | #include <nvif/unpack.h> | ||
30 | #include <nvif/event.h> | ||
31 | |||
32 | static inline void | ||
33 | nvkm_notify_put_locked(struct nvkm_notify *notify) | ||
34 | { | ||
35 | if (notify->block++ == 0) | ||
36 | nvkm_event_put(notify->event, notify->types, notify->index); | ||
37 | } | ||
38 | |||
39 | void | ||
40 | nvkm_notify_put(struct nvkm_notify *notify) | ||
41 | { | ||
42 | struct nvkm_event *event = notify->event; | ||
43 | unsigned long flags; | ||
44 | if (likely(event) && | ||
45 | test_and_clear_bit(NVKM_NOTIFY_USER, ¬ify->flags)) { | ||
46 | spin_lock_irqsave(&event->refs_lock, flags); | ||
47 | nvkm_notify_put_locked(notify); | ||
48 | spin_unlock_irqrestore(&event->refs_lock, flags); | ||
49 | if (test_bit(NVKM_NOTIFY_WORK, ¬ify->flags)) | ||
50 | flush_work(¬ify->work); | ||
51 | } | ||
52 | } | ||
53 | |||
54 | static inline void | ||
55 | nvkm_notify_get_locked(struct nvkm_notify *notify) | ||
56 | { | ||
57 | if (--notify->block == 0) | ||
58 | nvkm_event_get(notify->event, notify->types, notify->index); | ||
59 | } | ||
60 | |||
61 | void | ||
62 | nvkm_notify_get(struct nvkm_notify *notify) | ||
63 | { | ||
64 | struct nvkm_event *event = notify->event; | ||
65 | unsigned long flags; | ||
66 | if (likely(event) && | ||
67 | !test_and_set_bit(NVKM_NOTIFY_USER, ¬ify->flags)) { | ||
68 | spin_lock_irqsave(&event->refs_lock, flags); | ||
69 | nvkm_notify_get_locked(notify); | ||
70 | spin_unlock_irqrestore(&event->refs_lock, flags); | ||
71 | } | ||
72 | } | ||
73 | |||
74 | static inline void | ||
75 | nvkm_notify_func(struct nvkm_notify *notify) | ||
76 | { | ||
77 | struct nvkm_event *event = notify->event; | ||
78 | int ret = notify->func(notify); | ||
79 | unsigned long flags; | ||
80 | if ((ret == NVKM_NOTIFY_KEEP) || | ||
81 | !test_and_clear_bit(NVKM_NOTIFY_USER, ¬ify->flags)) { | ||
82 | spin_lock_irqsave(&event->refs_lock, flags); | ||
83 | nvkm_notify_get_locked(notify); | ||
84 | spin_unlock_irqrestore(&event->refs_lock, flags); | ||
85 | } | ||
86 | } | ||
87 | |||
88 | static void | ||
89 | nvkm_notify_work(struct work_struct *work) | ||
90 | { | ||
91 | struct nvkm_notify *notify = container_of(work, typeof(*notify), work); | ||
92 | nvkm_notify_func(notify); | ||
93 | } | ||
94 | |||
95 | void | ||
96 | nvkm_notify_send(struct nvkm_notify *notify, void *data, u32 size) | ||
97 | { | ||
98 | struct nvkm_event *event = notify->event; | ||
99 | unsigned long flags; | ||
100 | |||
101 | BUG_ON(!spin_is_locked(&event->list_lock)); | ||
102 | BUG_ON(size != notify->size); | ||
103 | |||
104 | spin_lock_irqsave(&event->refs_lock, flags); | ||
105 | if (notify->block) { | ||
106 | spin_unlock_irqrestore(&event->refs_lock, flags); | ||
107 | return; | ||
108 | } | ||
109 | nvkm_notify_put_locked(notify); | ||
110 | spin_unlock_irqrestore(&event->refs_lock, flags); | ||
111 | |||
112 | if (test_bit(NVKM_NOTIFY_WORK, ¬ify->flags)) { | ||
113 | memcpy((void *)notify->data, data, size); | ||
114 | schedule_work(¬ify->work); | ||
115 | } else { | ||
116 | notify->data = data; | ||
117 | nvkm_notify_func(notify); | ||
118 | notify->data = NULL; | ||
119 | } | ||
120 | } | ||
121 | |||
122 | void | ||
123 | nvkm_notify_fini(struct nvkm_notify *notify) | ||
124 | { | ||
125 | unsigned long flags; | ||
126 | if (notify->event) { | ||
127 | nvkm_notify_put(notify); | ||
128 | spin_lock_irqsave(¬ify->event->list_lock, flags); | ||
129 | list_del(¬ify->head); | ||
130 | spin_unlock_irqrestore(¬ify->event->list_lock, flags); | ||
131 | kfree((void *)notify->data); | ||
132 | notify->event = NULL; | ||
133 | } | ||
134 | } | ||
135 | |||
136 | int | ||
137 | nvkm_notify_init(struct nvkm_event *event, int (*func)(struct nvkm_notify *), | ||
138 | bool work, void *data, u32 size, u32 reply, | ||
139 | struct nvkm_notify *notify) | ||
140 | { | ||
141 | unsigned long flags; | ||
142 | int ret = -ENODEV; | ||
143 | if ((notify->event = event), event->refs) { | ||
144 | ret = event->func->ctor(data, size, notify); | ||
145 | if (ret == 0 && (ret = -EINVAL, notify->size == reply)) { | ||
146 | notify->flags = 0; | ||
147 | notify->block = 1; | ||
148 | notify->func = func; | ||
149 | notify->data = NULL; | ||
150 | if (ret = 0, work) { | ||
151 | INIT_WORK(¬ify->work, nvkm_notify_work); | ||
152 | set_bit(NVKM_NOTIFY_WORK, ¬ify->flags); | ||
153 | notify->data = kmalloc(reply, GFP_KERNEL); | ||
154 | if (!notify->data) | ||
155 | ret = -ENOMEM; | ||
156 | } | ||
157 | } | ||
158 | if (ret == 0) { | ||
159 | spin_lock_irqsave(&event->list_lock, flags); | ||
160 | list_add_tail(¬ify->head, &event->list); | ||
161 | spin_unlock_irqrestore(&event->list_lock, flags); | ||
162 | } | ||
163 | } | ||
164 | if (ret) | ||
165 | notify->event = NULL; | ||
166 | return ret; | ||
167 | } | ||
diff --git a/drivers/gpu/drm/nouveau/core/core/object.c b/drivers/gpu/drm/nouveau/core/core/object.c index 124538555904..b08630577c82 100644 --- a/drivers/gpu/drm/nouveau/core/core/object.c +++ b/drivers/gpu/drm/nouveau/core/core/object.c | |||
@@ -23,9 +23,6 @@ | |||
23 | */ | 23 | */ |
24 | 24 | ||
25 | #include <core/object.h> | 25 | #include <core/object.h> |
26 | #include <core/parent.h> | ||
27 | #include <core/namedb.h> | ||
28 | #include <core/handle.h> | ||
29 | #include <core/engine.h> | 26 | #include <core/engine.h> |
30 | 27 | ||
31 | #ifdef NOUVEAU_OBJECT_MAGIC | 28 | #ifdef NOUVEAU_OBJECT_MAGIC |
@@ -61,21 +58,15 @@ nouveau_object_create_(struct nouveau_object *parent, | |||
61 | return 0; | 58 | return 0; |
62 | } | 59 | } |
63 | 60 | ||
64 | static int | 61 | int |
65 | _nouveau_object_ctor(struct nouveau_object *parent, | 62 | _nouveau_object_ctor(struct nouveau_object *parent, |
66 | struct nouveau_object *engine, | 63 | struct nouveau_object *engine, |
67 | struct nouveau_oclass *oclass, void *data, u32 size, | 64 | struct nouveau_oclass *oclass, void *data, u32 size, |
68 | struct nouveau_object **pobject) | 65 | struct nouveau_object **pobject) |
69 | { | 66 | { |
70 | struct nouveau_object *object; | 67 | if (size != 0) |
71 | int ret; | 68 | return -ENOSYS; |
72 | 69 | return nouveau_object_create(parent, engine, oclass, 0, pobject); | |
73 | ret = nouveau_object_create(parent, engine, oclass, 0, &object); | ||
74 | *pobject = nv_object(object); | ||
75 | if (ret) | ||
76 | return ret; | ||
77 | |||
78 | return 0; | ||
79 | } | 70 | } |
80 | 71 | ||
81 | void | 72 | void |
@@ -91,42 +82,24 @@ nouveau_object_destroy(struct nouveau_object *object) | |||
91 | kfree(object); | 82 | kfree(object); |
92 | } | 83 | } |
93 | 84 | ||
94 | static void | ||
95 | _nouveau_object_dtor(struct nouveau_object *object) | ||
96 | { | ||
97 | nouveau_object_destroy(object); | ||
98 | } | ||
99 | |||
100 | int | 85 | int |
101 | nouveau_object_init(struct nouveau_object *object) | 86 | nouveau_object_init(struct nouveau_object *object) |
102 | { | 87 | { |
103 | return 0; | 88 | return 0; |
104 | } | 89 | } |
105 | 90 | ||
106 | static int | ||
107 | _nouveau_object_init(struct nouveau_object *object) | ||
108 | { | ||
109 | return nouveau_object_init(object); | ||
110 | } | ||
111 | |||
112 | int | 91 | int |
113 | nouveau_object_fini(struct nouveau_object *object, bool suspend) | 92 | nouveau_object_fini(struct nouveau_object *object, bool suspend) |
114 | { | 93 | { |
115 | return 0; | 94 | return 0; |
116 | } | 95 | } |
117 | 96 | ||
118 | static int | ||
119 | _nouveau_object_fini(struct nouveau_object *object, bool suspend) | ||
120 | { | ||
121 | return nouveau_object_fini(object, suspend); | ||
122 | } | ||
123 | |||
124 | struct nouveau_ofuncs | 97 | struct nouveau_ofuncs |
125 | nouveau_object_ofuncs = { | 98 | nouveau_object_ofuncs = { |
126 | .ctor = _nouveau_object_ctor, | 99 | .ctor = _nouveau_object_ctor, |
127 | .dtor = _nouveau_object_dtor, | 100 | .dtor = nouveau_object_destroy, |
128 | .init = _nouveau_object_init, | 101 | .init = nouveau_object_init, |
129 | .fini = _nouveau_object_fini, | 102 | .fini = nouveau_object_fini, |
130 | }; | 103 | }; |
131 | 104 | ||
132 | int | 105 | int |
@@ -189,119 +162,6 @@ nouveau_object_ref(struct nouveau_object *obj, struct nouveau_object **ref) | |||
189 | } | 162 | } |
190 | 163 | ||
191 | int | 164 | int |
192 | nouveau_object_new(struct nouveau_object *client, u32 _parent, u32 _handle, | ||
193 | u16 _oclass, void *data, u32 size, | ||
194 | struct nouveau_object **pobject) | ||
195 | { | ||
196 | struct nouveau_object *parent = NULL; | ||
197 | struct nouveau_object *engctx = NULL; | ||
198 | struct nouveau_object *object = NULL; | ||
199 | struct nouveau_object *engine; | ||
200 | struct nouveau_oclass *oclass; | ||
201 | struct nouveau_handle *handle; | ||
202 | int ret; | ||
203 | |||
204 | /* lookup parent object and ensure it *is* a parent */ | ||
205 | parent = nouveau_handle_ref(client, _parent); | ||
206 | if (!parent) { | ||
207 | nv_error(client, "parent 0x%08x not found\n", _parent); | ||
208 | return -ENOENT; | ||
209 | } | ||
210 | |||
211 | if (!nv_iclass(parent, NV_PARENT_CLASS)) { | ||
212 | nv_error(parent, "cannot have children\n"); | ||
213 | ret = -EINVAL; | ||
214 | goto fail_class; | ||
215 | } | ||
216 | |||
217 | /* check that parent supports the requested subclass */ | ||
218 | ret = nouveau_parent_sclass(parent, _oclass, &engine, &oclass); | ||
219 | if (ret) { | ||
220 | nv_debug(parent, "illegal class 0x%04x\n", _oclass); | ||
221 | goto fail_class; | ||
222 | } | ||
223 | |||
224 | /* make sure engine init has been completed *before* any objects | ||
225 | * it controls are created - the constructors may depend on | ||
226 | * state calculated at init (ie. default context construction) | ||
227 | */ | ||
228 | if (engine) { | ||
229 | ret = nouveau_object_inc(engine); | ||
230 | if (ret) | ||
231 | goto fail_class; | ||
232 | } | ||
233 | |||
234 | /* if engine requires it, create a context object to insert | ||
235 | * between the parent and its children (eg. PGRAPH context) | ||
236 | */ | ||
237 | if (engine && nv_engine(engine)->cclass) { | ||
238 | ret = nouveau_object_ctor(parent, engine, | ||
239 | nv_engine(engine)->cclass, | ||
240 | data, size, &engctx); | ||
241 | if (ret) | ||
242 | goto fail_engctx; | ||
243 | } else { | ||
244 | nouveau_object_ref(parent, &engctx); | ||
245 | } | ||
246 | |||
247 | /* finally, create new object and bind it to its handle */ | ||
248 | ret = nouveau_object_ctor(engctx, engine, oclass, data, size, &object); | ||
249 | *pobject = object; | ||
250 | if (ret) | ||
251 | goto fail_ctor; | ||
252 | |||
253 | ret = nouveau_object_inc(object); | ||
254 | if (ret) | ||
255 | goto fail_init; | ||
256 | |||
257 | ret = nouveau_handle_create(parent, _parent, _handle, object, &handle); | ||
258 | if (ret) | ||
259 | goto fail_handle; | ||
260 | |||
261 | ret = nouveau_handle_init(handle); | ||
262 | if (ret) | ||
263 | nouveau_handle_destroy(handle); | ||
264 | |||
265 | fail_handle: | ||
266 | nouveau_object_dec(object, false); | ||
267 | fail_init: | ||
268 | nouveau_object_ref(NULL, &object); | ||
269 | fail_ctor: | ||
270 | nouveau_object_ref(NULL, &engctx); | ||
271 | fail_engctx: | ||
272 | if (engine) | ||
273 | nouveau_object_dec(engine, false); | ||
274 | fail_class: | ||
275 | nouveau_object_ref(NULL, &parent); | ||
276 | return ret; | ||
277 | } | ||
278 | |||
279 | int | ||
280 | nouveau_object_del(struct nouveau_object *client, u32 _parent, u32 _handle) | ||
281 | { | ||
282 | struct nouveau_object *parent = NULL; | ||
283 | struct nouveau_object *namedb = NULL; | ||
284 | struct nouveau_handle *handle = NULL; | ||
285 | |||
286 | parent = nouveau_handle_ref(client, _parent); | ||
287 | if (!parent) | ||
288 | return -ENOENT; | ||
289 | |||
290 | namedb = nv_pclass(parent, NV_NAMEDB_CLASS); | ||
291 | if (namedb) { | ||
292 | handle = nouveau_namedb_get(nv_namedb(namedb), _handle); | ||
293 | if (handle) { | ||
294 | nouveau_namedb_put(handle); | ||
295 | nouveau_handle_fini(handle, false); | ||
296 | nouveau_handle_destroy(handle); | ||
297 | } | ||
298 | } | ||
299 | |||
300 | nouveau_object_ref(NULL, &parent); | ||
301 | return handle ? 0 : -EINVAL; | ||
302 | } | ||
303 | |||
304 | int | ||
305 | nouveau_object_inc(struct nouveau_object *object) | 165 | nouveau_object_inc(struct nouveau_object *object) |
306 | { | 166 | { |
307 | int ref = atomic_add_return(1, &object->usecount); | 167 | int ref = atomic_add_return(1, &object->usecount); |
diff --git a/drivers/gpu/drm/nouveau/core/core/parent.c b/drivers/gpu/drm/nouveau/core/core/parent.c index dee5d1235e9b..8701968a9743 100644 --- a/drivers/gpu/drm/nouveau/core/core/parent.c +++ b/drivers/gpu/drm/nouveau/core/core/parent.c | |||
@@ -75,6 +75,39 @@ nouveau_parent_sclass(struct nouveau_object *parent, u16 handle, | |||
75 | } | 75 | } |
76 | 76 | ||
77 | int | 77 | int |
78 | nouveau_parent_lclass(struct nouveau_object *parent, u32 *lclass, int size) | ||
79 | { | ||
80 | struct nouveau_sclass *sclass; | ||
81 | struct nouveau_engine *engine; | ||
82 | struct nouveau_oclass *oclass; | ||
83 | int nr = -1, i; | ||
84 | u64 mask; | ||
85 | |||
86 | sclass = nv_parent(parent)->sclass; | ||
87 | while (sclass) { | ||
88 | if (++nr < size) | ||
89 | lclass[nr] = sclass->oclass->handle; | ||
90 | sclass = sclass->sclass; | ||
91 | } | ||
92 | |||
93 | mask = nv_parent(parent)->engine; | ||
94 | while (i = __ffs64(mask), mask) { | ||
95 | engine = nouveau_engine(parent, i); | ||
96 | if (engine && (oclass = engine->sclass)) { | ||
97 | while (oclass->ofuncs) { | ||
98 | if (++nr < size) | ||
99 | lclass[nr] = oclass->handle; | ||
100 | oclass++; | ||
101 | } | ||
102 | } | ||
103 | |||
104 | mask &= ~(1ULL << i); | ||
105 | } | ||
106 | |||
107 | return nr + 1; | ||
108 | } | ||
109 | |||
110 | int | ||
78 | nouveau_parent_create_(struct nouveau_object *parent, | 111 | nouveau_parent_create_(struct nouveau_object *parent, |
79 | struct nouveau_object *engine, | 112 | struct nouveau_object *engine, |
80 | struct nouveau_oclass *oclass, u32 pclass, | 113 | struct nouveau_oclass *oclass, u32 pclass, |
diff --git a/drivers/gpu/drm/nouveau/core/engine/copy/nva3.c b/drivers/gpu/drm/nouveau/core/engine/copy/nva3.c index f31527733e00..abb410ef09ea 100644 --- a/drivers/gpu/drm/nouveau/core/engine/copy/nva3.c +++ b/drivers/gpu/drm/nouveau/core/engine/copy/nva3.c | |||
@@ -30,7 +30,6 @@ | |||
30 | #include <subdev/vm.h> | 30 | #include <subdev/vm.h> |
31 | 31 | ||
32 | #include <core/client.h> | 32 | #include <core/client.h> |
33 | #include <core/class.h> | ||
34 | #include <core/enum.h> | 33 | #include <core/enum.h> |
35 | 34 | ||
36 | 35 | ||
diff --git a/drivers/gpu/drm/nouveau/core/engine/copy/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/copy/nvc0.c index ac3291f781f6..9261694d0d35 100644 --- a/drivers/gpu/drm/nouveau/core/engine/copy/nvc0.c +++ b/drivers/gpu/drm/nouveau/core/engine/copy/nvc0.c | |||
@@ -26,9 +26,7 @@ | |||
26 | #include <engine/fifo.h> | 26 | #include <engine/fifo.h> |
27 | #include <engine/copy.h> | 27 | #include <engine/copy.h> |
28 | 28 | ||
29 | #include <core/class.h> | ||
30 | #include <core/enum.h> | 29 | #include <core/enum.h> |
31 | #include <core/class.h> | ||
32 | #include <core/enum.h> | 30 | #include <core/enum.h> |
33 | 31 | ||
34 | #include "fuc/nvc0.fuc.h" | 32 | #include "fuc/nvc0.fuc.h" |
diff --git a/drivers/gpu/drm/nouveau/core/engine/copy/nve0.c b/drivers/gpu/drm/nouveau/core/engine/copy/nve0.c index 748a61eb3c6f..c7194b354605 100644 --- a/drivers/gpu/drm/nouveau/core/engine/copy/nve0.c +++ b/drivers/gpu/drm/nouveau/core/engine/copy/nve0.c | |||
@@ -24,7 +24,6 @@ | |||
24 | 24 | ||
25 | #include <core/os.h> | 25 | #include <core/os.h> |
26 | #include <core/enum.h> | 26 | #include <core/enum.h> |
27 | #include <core/class.h> | ||
28 | #include <core/engctx.h> | 27 | #include <core/engctx.h> |
29 | 28 | ||
30 | #include <engine/copy.h> | 29 | #include <engine/copy.h> |
diff --git a/drivers/gpu/drm/nouveau/core/engine/crypt/nv84.c b/drivers/gpu/drm/nouveau/core/engine/crypt/nv84.c index 2551dafbec73..ea5c42f31791 100644 --- a/drivers/gpu/drm/nouveau/core/engine/crypt/nv84.c +++ b/drivers/gpu/drm/nouveau/core/engine/crypt/nv84.c | |||
@@ -25,7 +25,6 @@ | |||
25 | #include <core/client.h> | 25 | #include <core/client.h> |
26 | #include <core/os.h> | 26 | #include <core/os.h> |
27 | #include <core/enum.h> | 27 | #include <core/enum.h> |
28 | #include <core/class.h> | ||
29 | #include <core/engctx.h> | 28 | #include <core/engctx.h> |
30 | #include <core/gpuobj.h> | 29 | #include <core/gpuobj.h> |
31 | 30 | ||
diff --git a/drivers/gpu/drm/nouveau/core/engine/crypt/nv98.c b/drivers/gpu/drm/nouveau/core/engine/crypt/nv98.c index c7082377ec76..5571c09534cb 100644 --- a/drivers/gpu/drm/nouveau/core/engine/crypt/nv98.c +++ b/drivers/gpu/drm/nouveau/core/engine/crypt/nv98.c | |||
@@ -25,7 +25,6 @@ | |||
25 | #include <core/client.h> | 25 | #include <core/client.h> |
26 | #include <core/os.h> | 26 | #include <core/os.h> |
27 | #include <core/enum.h> | 27 | #include <core/enum.h> |
28 | #include <core/class.h> | ||
29 | #include <core/engctx.h> | 28 | #include <core/engctx.h> |
30 | 29 | ||
31 | #include <subdev/timer.h> | 30 | #include <subdev/timer.h> |
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/acpi.c b/drivers/gpu/drm/nouveau/core/engine/device/acpi.c new file mode 100644 index 000000000000..4dbf0ba89e5c --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/engine/device/acpi.c | |||
@@ -0,0 +1,59 @@ | |||
1 | /* | ||
2 | * Copyright 2014 Red Hat Inc. | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice shall be included in | ||
12 | * all copies or substantial portions of the Software. | ||
13 | * | ||
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
21 | * | ||
22 | * Authors: Ben Skeggs | ||
23 | */ | ||
24 | |||
25 | #include "acpi.h" | ||
26 | |||
27 | #ifdef CONFIG_ACPI | ||
28 | static int | ||
29 | nvkm_acpi_ntfy(struct notifier_block *nb, unsigned long val, void *data) | ||
30 | { | ||
31 | struct nouveau_device *device = | ||
32 | container_of(nb, typeof(*device), acpi.nb); | ||
33 | struct acpi_bus_event *info = data; | ||
34 | |||
35 | if (!strcmp(info->device_class, "ac_adapter")) | ||
36 | nvkm_event_send(&device->event, 1, 0, NULL, 0); | ||
37 | |||
38 | return NOTIFY_DONE; | ||
39 | } | ||
40 | #endif | ||
41 | |||
42 | int | ||
43 | nvkm_acpi_fini(struct nouveau_device *device, bool suspend) | ||
44 | { | ||
45 | #ifdef CONFIG_ACPI | ||
46 | unregister_acpi_notifier(&device->acpi.nb); | ||
47 | #endif | ||
48 | return 0; | ||
49 | } | ||
50 | |||
51 | int | ||
52 | nvkm_acpi_init(struct nouveau_device *device) | ||
53 | { | ||
54 | #ifdef CONFIG_ACPI | ||
55 | device->acpi.nb.notifier_call = nvkm_acpi_ntfy; | ||
56 | register_acpi_notifier(&device->acpi.nb); | ||
57 | #endif | ||
58 | return 0; | ||
59 | } | ||
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/acpi.h b/drivers/gpu/drm/nouveau/core/engine/device/acpi.h new file mode 100644 index 000000000000..cc49f4f568cd --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/engine/device/acpi.h | |||
@@ -0,0 +1,9 @@ | |||
1 | #ifndef __NVKM_DEVICE_ACPI_H__ | ||
2 | #define __NVKM_DEVICE_ACPI_H__ | ||
3 | |||
4 | #include <engine/device.h> | ||
5 | |||
6 | int nvkm_acpi_init(struct nouveau_device *); | ||
7 | int nvkm_acpi_fini(struct nouveau_device *, bool); | ||
8 | |||
9 | #endif | ||
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/base.c b/drivers/gpu/drm/nouveau/core/engine/device/base.c index 18c8c7245b73..8928f7981d4a 100644 --- a/drivers/gpu/drm/nouveau/core/engine/device/base.c +++ b/drivers/gpu/drm/nouveau/core/engine/device/base.c | |||
@@ -26,10 +26,14 @@ | |||
26 | #include <core/device.h> | 26 | #include <core/device.h> |
27 | #include <core/client.h> | 27 | #include <core/client.h> |
28 | #include <core/option.h> | 28 | #include <core/option.h> |
29 | #include <nvif/unpack.h> | ||
30 | #include <nvif/class.h> | ||
29 | 31 | ||
30 | #include <core/class.h> | 32 | #include <subdev/fb.h> |
33 | #include <subdev/instmem.h> | ||
31 | 34 | ||
32 | #include "priv.h" | 35 | #include "priv.h" |
36 | #include "acpi.h" | ||
33 | 37 | ||
34 | static DEFINE_MUTEX(nv_devices_mutex); | 38 | static DEFINE_MUTEX(nv_devices_mutex); |
35 | static LIST_HEAD(nv_devices); | 39 | static LIST_HEAD(nv_devices); |
@@ -49,74 +53,258 @@ nouveau_device_find(u64 name) | |||
49 | return match; | 53 | return match; |
50 | } | 54 | } |
51 | 55 | ||
56 | int | ||
57 | nouveau_device_list(u64 *name, int size) | ||
58 | { | ||
59 | struct nouveau_device *device; | ||
60 | int nr = 0; | ||
61 | mutex_lock(&nv_devices_mutex); | ||
62 | list_for_each_entry(device, &nv_devices, head) { | ||
63 | if (nr++ < size) | ||
64 | name[nr - 1] = device->handle; | ||
65 | } | ||
66 | mutex_unlock(&nv_devices_mutex); | ||
67 | return nr; | ||
68 | } | ||
69 | |||
52 | /****************************************************************************** | 70 | /****************************************************************************** |
53 | * nouveau_devobj (0x0080): class implementation | 71 | * nouveau_devobj (0x0080): class implementation |
54 | *****************************************************************************/ | 72 | *****************************************************************************/ |
73 | |||
55 | struct nouveau_devobj { | 74 | struct nouveau_devobj { |
56 | struct nouveau_parent base; | 75 | struct nouveau_parent base; |
57 | struct nouveau_object *subdev[NVDEV_SUBDEV_NR]; | 76 | struct nouveau_object *subdev[NVDEV_SUBDEV_NR]; |
58 | }; | 77 | }; |
59 | 78 | ||
79 | static int | ||
80 | nouveau_devobj_info(struct nouveau_object *object, void *data, u32 size) | ||
81 | { | ||
82 | struct nouveau_device *device = nv_device(object); | ||
83 | struct nouveau_fb *pfb = nouveau_fb(device); | ||
84 | struct nouveau_instmem *imem = nouveau_instmem(device); | ||
85 | union { | ||
86 | struct nv_device_info_v0 v0; | ||
87 | } *args = data; | ||
88 | int ret; | ||
89 | |||
90 | nv_ioctl(object, "device info size %d\n", size); | ||
91 | if (nvif_unpack(args->v0, 0, 0, false)) { | ||
92 | nv_ioctl(object, "device info vers %d\n", args->v0.version); | ||
93 | } else | ||
94 | return ret; | ||
95 | |||
96 | switch (device->chipset) { | ||
97 | case 0x01a: | ||
98 | case 0x01f: | ||
99 | case 0x04c: | ||
100 | case 0x04e: | ||
101 | case 0x063: | ||
102 | case 0x067: | ||
103 | case 0x068: | ||
104 | case 0x0aa: | ||
105 | case 0x0ac: | ||
106 | case 0x0af: | ||
107 | args->v0.platform = NV_DEVICE_INFO_V0_IGP; | ||
108 | break; | ||
109 | default: | ||
110 | if (device->pdev) { | ||
111 | if (pci_find_capability(device->pdev, PCI_CAP_ID_AGP)) | ||
112 | args->v0.platform = NV_DEVICE_INFO_V0_AGP; | ||
113 | else | ||
114 | if (pci_is_pcie(device->pdev)) | ||
115 | args->v0.platform = NV_DEVICE_INFO_V0_PCIE; | ||
116 | else | ||
117 | args->v0.platform = NV_DEVICE_INFO_V0_PCI; | ||
118 | } else { | ||
119 | args->v0.platform = NV_DEVICE_INFO_V0_SOC; | ||
120 | } | ||
121 | break; | ||
122 | } | ||
123 | |||
124 | switch (device->card_type) { | ||
125 | case NV_04: args->v0.family = NV_DEVICE_INFO_V0_TNT; break; | ||
126 | case NV_10: | ||
127 | case NV_11: args->v0.family = NV_DEVICE_INFO_V0_CELSIUS; break; | ||
128 | case NV_20: args->v0.family = NV_DEVICE_INFO_V0_KELVIN; break; | ||
129 | case NV_30: args->v0.family = NV_DEVICE_INFO_V0_RANKINE; break; | ||
130 | case NV_40: args->v0.family = NV_DEVICE_INFO_V0_CURIE; break; | ||
131 | case NV_50: args->v0.family = NV_DEVICE_INFO_V0_TESLA; break; | ||
132 | case NV_C0: args->v0.family = NV_DEVICE_INFO_V0_FERMI; break; | ||
133 | case NV_E0: args->v0.family = NV_DEVICE_INFO_V0_KEPLER; break; | ||
134 | case GM100: args->v0.family = NV_DEVICE_INFO_V0_MAXWELL; break; | ||
135 | default: | ||
136 | args->v0.family = 0; | ||
137 | break; | ||
138 | } | ||
139 | |||
140 | args->v0.chipset = device->chipset; | ||
141 | args->v0.revision = device->chipset >= 0x10 ? nv_rd32(device, 0) : 0x00; | ||
142 | if (pfb) args->v0.ram_size = args->v0.ram_user = pfb->ram->size; | ||
143 | else args->v0.ram_size = args->v0.ram_user = 0; | ||
144 | if (imem) args->v0.ram_user = args->v0.ram_user - imem->reserved; | ||
145 | return 0; | ||
146 | } | ||
147 | |||
148 | static int | ||
149 | nouveau_devobj_mthd(struct nouveau_object *object, u32 mthd, | ||
150 | void *data, u32 size) | ||
151 | { | ||
152 | switch (mthd) { | ||
153 | case NV_DEVICE_V0_INFO: | ||
154 | return nouveau_devobj_info(object, data, size); | ||
155 | default: | ||
156 | break; | ||
157 | } | ||
158 | return -EINVAL; | ||
159 | } | ||
160 | |||
161 | static u8 | ||
162 | nouveau_devobj_rd08(struct nouveau_object *object, u64 addr) | ||
163 | { | ||
164 | return nv_rd08(object->engine, addr); | ||
165 | } | ||
166 | |||
167 | static u16 | ||
168 | nouveau_devobj_rd16(struct nouveau_object *object, u64 addr) | ||
169 | { | ||
170 | return nv_rd16(object->engine, addr); | ||
171 | } | ||
172 | |||
173 | static u32 | ||
174 | nouveau_devobj_rd32(struct nouveau_object *object, u64 addr) | ||
175 | { | ||
176 | return nv_rd32(object->engine, addr); | ||
177 | } | ||
178 | |||
179 | static void | ||
180 | nouveau_devobj_wr08(struct nouveau_object *object, u64 addr, u8 data) | ||
181 | { | ||
182 | nv_wr08(object->engine, addr, data); | ||
183 | } | ||
184 | |||
185 | static void | ||
186 | nouveau_devobj_wr16(struct nouveau_object *object, u64 addr, u16 data) | ||
187 | { | ||
188 | nv_wr16(object->engine, addr, data); | ||
189 | } | ||
190 | |||
191 | static void | ||
192 | nouveau_devobj_wr32(struct nouveau_object *object, u64 addr, u32 data) | ||
193 | { | ||
194 | nv_wr32(object->engine, addr, data); | ||
195 | } | ||
196 | |||
197 | static int | ||
198 | nouveau_devobj_map(struct nouveau_object *object, u64 *addr, u32 *size) | ||
199 | { | ||
200 | struct nouveau_device *device = nv_device(object); | ||
201 | *addr = nv_device_resource_start(device, 0); | ||
202 | *size = nv_device_resource_len(device, 0); | ||
203 | return 0; | ||
204 | } | ||
205 | |||
60 | static const u64 disable_map[] = { | 206 | static const u64 disable_map[] = { |
61 | [NVDEV_SUBDEV_VBIOS] = NV_DEVICE_DISABLE_VBIOS, | 207 | [NVDEV_SUBDEV_VBIOS] = NV_DEVICE_V0_DISABLE_VBIOS, |
62 | [NVDEV_SUBDEV_DEVINIT] = NV_DEVICE_DISABLE_CORE, | 208 | [NVDEV_SUBDEV_DEVINIT] = NV_DEVICE_V0_DISABLE_CORE, |
63 | [NVDEV_SUBDEV_GPIO] = NV_DEVICE_DISABLE_CORE, | 209 | [NVDEV_SUBDEV_GPIO] = NV_DEVICE_V0_DISABLE_CORE, |
64 | [NVDEV_SUBDEV_I2C] = NV_DEVICE_DISABLE_CORE, | 210 | [NVDEV_SUBDEV_I2C] = NV_DEVICE_V0_DISABLE_CORE, |
65 | [NVDEV_SUBDEV_CLOCK] = NV_DEVICE_DISABLE_CORE, | 211 | [NVDEV_SUBDEV_CLOCK] = NV_DEVICE_V0_DISABLE_CORE, |
66 | [NVDEV_SUBDEV_MXM] = NV_DEVICE_DISABLE_CORE, | 212 | [NVDEV_SUBDEV_MXM] = NV_DEVICE_V0_DISABLE_CORE, |
67 | [NVDEV_SUBDEV_MC] = NV_DEVICE_DISABLE_CORE, | 213 | [NVDEV_SUBDEV_MC] = NV_DEVICE_V0_DISABLE_CORE, |
68 | [NVDEV_SUBDEV_BUS] = NV_DEVICE_DISABLE_CORE, | 214 | [NVDEV_SUBDEV_BUS] = NV_DEVICE_V0_DISABLE_CORE, |
69 | [NVDEV_SUBDEV_TIMER] = NV_DEVICE_DISABLE_CORE, | 215 | [NVDEV_SUBDEV_TIMER] = NV_DEVICE_V0_DISABLE_CORE, |
70 | [NVDEV_SUBDEV_FB] = NV_DEVICE_DISABLE_CORE, | 216 | [NVDEV_SUBDEV_FB] = NV_DEVICE_V0_DISABLE_CORE, |
71 | [NVDEV_SUBDEV_LTCG] = NV_DEVICE_DISABLE_CORE, | 217 | [NVDEV_SUBDEV_LTC] = NV_DEVICE_V0_DISABLE_CORE, |
72 | [NVDEV_SUBDEV_IBUS] = NV_DEVICE_DISABLE_CORE, | 218 | [NVDEV_SUBDEV_IBUS] = NV_DEVICE_V0_DISABLE_CORE, |
73 | [NVDEV_SUBDEV_INSTMEM] = NV_DEVICE_DISABLE_CORE, | 219 | [NVDEV_SUBDEV_INSTMEM] = NV_DEVICE_V0_DISABLE_CORE, |
74 | [NVDEV_SUBDEV_VM] = NV_DEVICE_DISABLE_CORE, | 220 | [NVDEV_SUBDEV_VM] = NV_DEVICE_V0_DISABLE_CORE, |
75 | [NVDEV_SUBDEV_BAR] = NV_DEVICE_DISABLE_CORE, | 221 | [NVDEV_SUBDEV_BAR] = NV_DEVICE_V0_DISABLE_CORE, |
76 | [NVDEV_SUBDEV_VOLT] = NV_DEVICE_DISABLE_CORE, | 222 | [NVDEV_SUBDEV_VOLT] = NV_DEVICE_V0_DISABLE_CORE, |
77 | [NVDEV_SUBDEV_THERM] = NV_DEVICE_DISABLE_CORE, | 223 | [NVDEV_SUBDEV_THERM] = NV_DEVICE_V0_DISABLE_CORE, |
78 | [NVDEV_SUBDEV_PWR] = NV_DEVICE_DISABLE_CORE, | 224 | [NVDEV_SUBDEV_PWR] = NV_DEVICE_V0_DISABLE_CORE, |
79 | [NVDEV_ENGINE_DMAOBJ] = NV_DEVICE_DISABLE_CORE, | 225 | [NVDEV_ENGINE_DMAOBJ] = NV_DEVICE_V0_DISABLE_CORE, |
80 | [NVDEV_ENGINE_PERFMON] = NV_DEVICE_DISABLE_CORE, | 226 | [NVDEV_ENGINE_PERFMON] = NV_DEVICE_V0_DISABLE_CORE, |
81 | [NVDEV_ENGINE_FIFO] = NV_DEVICE_DISABLE_FIFO, | 227 | [NVDEV_ENGINE_FIFO] = NV_DEVICE_V0_DISABLE_FIFO, |
82 | [NVDEV_ENGINE_SW] = NV_DEVICE_DISABLE_FIFO, | 228 | [NVDEV_ENGINE_SW] = NV_DEVICE_V0_DISABLE_FIFO, |
83 | [NVDEV_ENGINE_GR] = NV_DEVICE_DISABLE_GRAPH, | 229 | [NVDEV_ENGINE_GR] = NV_DEVICE_V0_DISABLE_GRAPH, |
84 | [NVDEV_ENGINE_MPEG] = NV_DEVICE_DISABLE_MPEG, | 230 | [NVDEV_ENGINE_MPEG] = NV_DEVICE_V0_DISABLE_MPEG, |
85 | [NVDEV_ENGINE_ME] = NV_DEVICE_DISABLE_ME, | 231 | [NVDEV_ENGINE_ME] = NV_DEVICE_V0_DISABLE_ME, |
86 | [NVDEV_ENGINE_VP] = NV_DEVICE_DISABLE_VP, | 232 | [NVDEV_ENGINE_VP] = NV_DEVICE_V0_DISABLE_VP, |
87 | [NVDEV_ENGINE_CRYPT] = NV_DEVICE_DISABLE_CRYPT, | 233 | [NVDEV_ENGINE_CRYPT] = NV_DEVICE_V0_DISABLE_CRYPT, |
88 | [NVDEV_ENGINE_BSP] = NV_DEVICE_DISABLE_BSP, | 234 | [NVDEV_ENGINE_BSP] = NV_DEVICE_V0_DISABLE_BSP, |
89 | [NVDEV_ENGINE_PPP] = NV_DEVICE_DISABLE_PPP, | 235 | [NVDEV_ENGINE_PPP] = NV_DEVICE_V0_DISABLE_PPP, |
90 | [NVDEV_ENGINE_COPY0] = NV_DEVICE_DISABLE_COPY0, | 236 | [NVDEV_ENGINE_COPY0] = NV_DEVICE_V0_DISABLE_COPY0, |
91 | [NVDEV_ENGINE_COPY1] = NV_DEVICE_DISABLE_COPY1, | 237 | [NVDEV_ENGINE_COPY1] = NV_DEVICE_V0_DISABLE_COPY1, |
92 | [NVDEV_ENGINE_VIC] = NV_DEVICE_DISABLE_VIC, | 238 | [NVDEV_ENGINE_VIC] = NV_DEVICE_V0_DISABLE_VIC, |
93 | [NVDEV_ENGINE_VENC] = NV_DEVICE_DISABLE_VENC, | 239 | [NVDEV_ENGINE_VENC] = NV_DEVICE_V0_DISABLE_VENC, |
94 | [NVDEV_ENGINE_DISP] = NV_DEVICE_DISABLE_DISP, | 240 | [NVDEV_ENGINE_DISP] = NV_DEVICE_V0_DISABLE_DISP, |
95 | [NVDEV_SUBDEV_NR] = 0, | 241 | [NVDEV_SUBDEV_NR] = 0, |
96 | }; | 242 | }; |
97 | 243 | ||
244 | static void | ||
245 | nouveau_devobj_dtor(struct nouveau_object *object) | ||
246 | { | ||
247 | struct nouveau_devobj *devobj = (void *)object; | ||
248 | int i; | ||
249 | |||
250 | for (i = NVDEV_SUBDEV_NR - 1; i >= 0; i--) | ||
251 | nouveau_object_ref(NULL, &devobj->subdev[i]); | ||
252 | |||
253 | nouveau_parent_destroy(&devobj->base); | ||
254 | } | ||
255 | |||
256 | static struct nouveau_oclass | ||
257 | nouveau_devobj_oclass_super = { | ||
258 | .handle = NV_DEVICE, | ||
259 | .ofuncs = &(struct nouveau_ofuncs) { | ||
260 | .dtor = nouveau_devobj_dtor, | ||
261 | .init = _nouveau_parent_init, | ||
262 | .fini = _nouveau_parent_fini, | ||
263 | .mthd = nouveau_devobj_mthd, | ||
264 | .map = nouveau_devobj_map, | ||
265 | .rd08 = nouveau_devobj_rd08, | ||
266 | .rd16 = nouveau_devobj_rd16, | ||
267 | .rd32 = nouveau_devobj_rd32, | ||
268 | .wr08 = nouveau_devobj_wr08, | ||
269 | .wr16 = nouveau_devobj_wr16, | ||
270 | .wr32 = nouveau_devobj_wr32, | ||
271 | } | ||
272 | }; | ||
273 | |||
98 | static int | 274 | static int |
99 | nouveau_devobj_ctor(struct nouveau_object *parent, | 275 | nouveau_devobj_ctor(struct nouveau_object *parent, |
100 | struct nouveau_object *engine, | 276 | struct nouveau_object *engine, |
101 | struct nouveau_oclass *oclass, void *data, u32 size, | 277 | struct nouveau_oclass *oclass, void *data, u32 size, |
102 | struct nouveau_object **pobject) | 278 | struct nouveau_object **pobject) |
103 | { | 279 | { |
280 | union { | ||
281 | struct nv_device_v0 v0; | ||
282 | } *args = data; | ||
104 | struct nouveau_client *client = nv_client(parent); | 283 | struct nouveau_client *client = nv_client(parent); |
105 | struct nouveau_device *device; | 284 | struct nouveau_device *device; |
106 | struct nouveau_devobj *devobj; | 285 | struct nouveau_devobj *devobj; |
107 | struct nv_device_class *args = data; | ||
108 | u32 boot0, strap; | 286 | u32 boot0, strap; |
109 | u64 disable, mmio_base, mmio_size; | 287 | u64 disable, mmio_base, mmio_size; |
110 | void __iomem *map; | 288 | void __iomem *map; |
111 | int ret, i, c; | 289 | int ret, i, c; |
112 | 290 | ||
113 | if (size < sizeof(struct nv_device_class)) | 291 | nv_ioctl(parent, "create device size %d\n", size); |
114 | return -EINVAL; | 292 | if (nvif_unpack(args->v0, 0, 0, false)) { |
293 | nv_ioctl(parent, "create device v%d device %016llx " | ||
294 | "disable %016llx debug0 %016llx\n", | ||
295 | args->v0.version, args->v0.device, | ||
296 | args->v0.disable, args->v0.debug0); | ||
297 | } else | ||
298 | return ret; | ||
299 | |||
300 | /* give priviledged clients register access */ | ||
301 | if (client->super) | ||
302 | oclass = &nouveau_devobj_oclass_super; | ||
115 | 303 | ||
116 | /* find the device subdev that matches what the client requested */ | 304 | /* find the device subdev that matches what the client requested */ |
117 | device = nv_device(client->device); | 305 | device = nv_device(client->device); |
118 | if (args->device != ~0) { | 306 | if (args->v0.device != ~0) { |
119 | device = nouveau_device_find(args->device); | 307 | device = nouveau_device_find(args->v0.device); |
120 | if (!device) | 308 | if (!device) |
121 | return -ENODEV; | 309 | return -ENODEV; |
122 | } | 310 | } |
@@ -135,14 +323,14 @@ nouveau_devobj_ctor(struct nouveau_object *parent, | |||
135 | mmio_size = nv_device_resource_len(device, 0); | 323 | mmio_size = nv_device_resource_len(device, 0); |
136 | 324 | ||
137 | /* translate api disable mask into internal mapping */ | 325 | /* translate api disable mask into internal mapping */ |
138 | disable = args->debug0; | 326 | disable = args->v0.debug0; |
139 | for (i = 0; i < NVDEV_SUBDEV_NR; i++) { | 327 | for (i = 0; i < NVDEV_SUBDEV_NR; i++) { |
140 | if (args->disable & disable_map[i]) | 328 | if (args->v0.disable & disable_map[i]) |
141 | disable |= (1ULL << i); | 329 | disable |= (1ULL << i); |
142 | } | 330 | } |
143 | 331 | ||
144 | /* identify the chipset, and determine classes of subdev/engines */ | 332 | /* identify the chipset, and determine classes of subdev/engines */ |
145 | if (!(args->disable & NV_DEVICE_DISABLE_IDENTIFY) && | 333 | if (!(args->v0.disable & NV_DEVICE_V0_DISABLE_IDENTIFY) && |
146 | !device->card_type) { | 334 | !device->card_type) { |
147 | map = ioremap(mmio_base, 0x102000); | 335 | map = ioremap(mmio_base, 0x102000); |
148 | if (map == NULL) | 336 | if (map == NULL) |
@@ -180,8 +368,8 @@ nouveau_devobj_ctor(struct nouveau_object *parent, | |||
180 | case 0x080: | 368 | case 0x080: |
181 | case 0x090: | 369 | case 0x090: |
182 | case 0x0a0: device->card_type = NV_50; break; | 370 | case 0x0a0: device->card_type = NV_50; break; |
183 | case 0x0c0: device->card_type = NV_C0; break; | 371 | case 0x0c0: |
184 | case 0x0d0: device->card_type = NV_D0; break; | 372 | case 0x0d0: device->card_type = NV_C0; break; |
185 | case 0x0e0: | 373 | case 0x0e0: |
186 | case 0x0f0: | 374 | case 0x0f0: |
187 | case 0x100: device->card_type = NV_E0; break; | 375 | case 0x100: device->card_type = NV_E0; break; |
@@ -206,8 +394,7 @@ nouveau_devobj_ctor(struct nouveau_object *parent, | |||
206 | case NV_30: ret = nv30_identify(device); break; | 394 | case NV_30: ret = nv30_identify(device); break; |
207 | case NV_40: ret = nv40_identify(device); break; | 395 | case NV_40: ret = nv40_identify(device); break; |
208 | case NV_50: ret = nv50_identify(device); break; | 396 | case NV_50: ret = nv50_identify(device); break; |
209 | case NV_C0: | 397 | case NV_C0: ret = nvc0_identify(device); break; |
210 | case NV_D0: ret = nvc0_identify(device); break; | ||
211 | case NV_E0: ret = nve0_identify(device); break; | 398 | case NV_E0: ret = nve0_identify(device); break; |
212 | case GM100: ret = gm100_identify(device); break; | 399 | case GM100: ret = gm100_identify(device); break; |
213 | default: | 400 | default: |
@@ -242,7 +429,7 @@ nouveau_devobj_ctor(struct nouveau_object *parent, | |||
242 | nv_debug(device, "crystal freq: %dKHz\n", device->crystal); | 429 | nv_debug(device, "crystal freq: %dKHz\n", device->crystal); |
243 | } | 430 | } |
244 | 431 | ||
245 | if (!(args->disable & NV_DEVICE_DISABLE_MMIO) && | 432 | if (!(args->v0.disable & NV_DEVICE_V0_DISABLE_MMIO) && |
246 | !nv_subdev(device)->mmio) { | 433 | !nv_subdev(device)->mmio) { |
247 | nv_subdev(device)->mmio = ioremap(mmio_base, mmio_size); | 434 | nv_subdev(device)->mmio = ioremap(mmio_base, mmio_size); |
248 | if (!nv_subdev(device)->mmio) { | 435 | if (!nv_subdev(device)->mmio) { |
@@ -298,71 +485,19 @@ nouveau_devobj_ctor(struct nouveau_object *parent, | |||
298 | return 0; | 485 | return 0; |
299 | } | 486 | } |
300 | 487 | ||
301 | static void | ||
302 | nouveau_devobj_dtor(struct nouveau_object *object) | ||
303 | { | ||
304 | struct nouveau_devobj *devobj = (void *)object; | ||
305 | int i; | ||
306 | |||
307 | for (i = NVDEV_SUBDEV_NR - 1; i >= 0; i--) | ||
308 | nouveau_object_ref(NULL, &devobj->subdev[i]); | ||
309 | |||
310 | nouveau_parent_destroy(&devobj->base); | ||
311 | } | ||
312 | |||
313 | static u8 | ||
314 | nouveau_devobj_rd08(struct nouveau_object *object, u64 addr) | ||
315 | { | ||
316 | return nv_rd08(object->engine, addr); | ||
317 | } | ||
318 | |||
319 | static u16 | ||
320 | nouveau_devobj_rd16(struct nouveau_object *object, u64 addr) | ||
321 | { | ||
322 | return nv_rd16(object->engine, addr); | ||
323 | } | ||
324 | |||
325 | static u32 | ||
326 | nouveau_devobj_rd32(struct nouveau_object *object, u64 addr) | ||
327 | { | ||
328 | return nv_rd32(object->engine, addr); | ||
329 | } | ||
330 | |||
331 | static void | ||
332 | nouveau_devobj_wr08(struct nouveau_object *object, u64 addr, u8 data) | ||
333 | { | ||
334 | nv_wr08(object->engine, addr, data); | ||
335 | } | ||
336 | |||
337 | static void | ||
338 | nouveau_devobj_wr16(struct nouveau_object *object, u64 addr, u16 data) | ||
339 | { | ||
340 | nv_wr16(object->engine, addr, data); | ||
341 | } | ||
342 | |||
343 | static void | ||
344 | nouveau_devobj_wr32(struct nouveau_object *object, u64 addr, u32 data) | ||
345 | { | ||
346 | nv_wr32(object->engine, addr, data); | ||
347 | } | ||
348 | |||
349 | static struct nouveau_ofuncs | 488 | static struct nouveau_ofuncs |
350 | nouveau_devobj_ofuncs = { | 489 | nouveau_devobj_ofuncs = { |
351 | .ctor = nouveau_devobj_ctor, | 490 | .ctor = nouveau_devobj_ctor, |
352 | .dtor = nouveau_devobj_dtor, | 491 | .dtor = nouveau_devobj_dtor, |
353 | .init = _nouveau_parent_init, | 492 | .init = _nouveau_parent_init, |
354 | .fini = _nouveau_parent_fini, | 493 | .fini = _nouveau_parent_fini, |
355 | .rd08 = nouveau_devobj_rd08, | 494 | .mthd = nouveau_devobj_mthd, |
356 | .rd16 = nouveau_devobj_rd16, | ||
357 | .rd32 = nouveau_devobj_rd32, | ||
358 | .wr08 = nouveau_devobj_wr08, | ||
359 | .wr16 = nouveau_devobj_wr16, | ||
360 | .wr32 = nouveau_devobj_wr32, | ||
361 | }; | 495 | }; |
362 | 496 | ||
363 | /****************************************************************************** | 497 | /****************************************************************************** |
364 | * nouveau_device: engine functions | 498 | * nouveau_device: engine functions |
365 | *****************************************************************************/ | 499 | *****************************************************************************/ |
500 | |||
366 | static struct nouveau_oclass | 501 | static struct nouveau_oclass |
367 | nouveau_device_sclass[] = { | 502 | nouveau_device_sclass[] = { |
368 | { 0x0080, &nouveau_devobj_ofuncs }, | 503 | { 0x0080, &nouveau_devobj_ofuncs }, |
@@ -370,6 +505,23 @@ nouveau_device_sclass[] = { | |||
370 | }; | 505 | }; |
371 | 506 | ||
372 | static int | 507 | static int |
508 | nouveau_device_event_ctor(void *data, u32 size, struct nvkm_notify *notify) | ||
509 | { | ||
510 | if (!WARN_ON(size != 0)) { | ||
511 | notify->size = 0; | ||
512 | notify->types = 1; | ||
513 | notify->index = 0; | ||
514 | return 0; | ||
515 | } | ||
516 | return -EINVAL; | ||
517 | } | ||
518 | |||
519 | static const struct nvkm_event_func | ||
520 | nouveau_device_event_func = { | ||
521 | .ctor = nouveau_device_event_ctor, | ||
522 | }; | ||
523 | |||
524 | static int | ||
373 | nouveau_device_fini(struct nouveau_object *object, bool suspend) | 525 | nouveau_device_fini(struct nouveau_object *object, bool suspend) |
374 | { | 526 | { |
375 | struct nouveau_device *device = (void *)object; | 527 | struct nouveau_device *device = (void *)object; |
@@ -386,7 +538,7 @@ nouveau_device_fini(struct nouveau_object *object, bool suspend) | |||
386 | } | 538 | } |
387 | } | 539 | } |
388 | 540 | ||
389 | ret = 0; | 541 | ret = nvkm_acpi_fini(device, suspend); |
390 | fail: | 542 | fail: |
391 | for (; ret && i < NVDEV_SUBDEV_NR; i++) { | 543 | for (; ret && i < NVDEV_SUBDEV_NR; i++) { |
392 | if ((subdev = device->subdev[i])) { | 544 | if ((subdev = device->subdev[i])) { |
@@ -407,7 +559,11 @@ nouveau_device_init(struct nouveau_object *object) | |||
407 | { | 559 | { |
408 | struct nouveau_device *device = (void *)object; | 560 | struct nouveau_device *device = (void *)object; |
409 | struct nouveau_object *subdev; | 561 | struct nouveau_object *subdev; |
410 | int ret, i; | 562 | int ret, i = 0; |
563 | |||
564 | ret = nvkm_acpi_init(device); | ||
565 | if (ret) | ||
566 | goto fail; | ||
411 | 567 | ||
412 | for (i = 0; i < NVDEV_SUBDEV_NR; i++) { | 568 | for (i = 0; i < NVDEV_SUBDEV_NR; i++) { |
413 | if ((subdev = device->subdev[i])) { | 569 | if ((subdev = device->subdev[i])) { |
@@ -430,6 +586,8 @@ fail: | |||
430 | } | 586 | } |
431 | } | 587 | } |
432 | 588 | ||
589 | if (ret) | ||
590 | nvkm_acpi_fini(device, false); | ||
433 | return ret; | 591 | return ret; |
434 | } | 592 | } |
435 | 593 | ||
@@ -438,6 +596,8 @@ nouveau_device_dtor(struct nouveau_object *object) | |||
438 | { | 596 | { |
439 | struct nouveau_device *device = (void *)object; | 597 | struct nouveau_device *device = (void *)object; |
440 | 598 | ||
599 | nvkm_event_fini(&device->event); | ||
600 | |||
441 | mutex_lock(&nv_devices_mutex); | 601 | mutex_lock(&nv_devices_mutex); |
442 | list_del(&device->head); | 602 | list_del(&device->head); |
443 | mutex_unlock(&nv_devices_mutex); | 603 | mutex_unlock(&nv_devices_mutex); |
@@ -478,31 +638,6 @@ nv_device_resource_len(struct nouveau_device *device, unsigned int bar) | |||
478 | } | 638 | } |
479 | } | 639 | } |
480 | 640 | ||
481 | dma_addr_t | ||
482 | nv_device_map_page(struct nouveau_device *device, struct page *page) | ||
483 | { | ||
484 | dma_addr_t ret; | ||
485 | |||
486 | if (nv_device_is_pci(device)) { | ||
487 | ret = pci_map_page(device->pdev, page, 0, PAGE_SIZE, | ||
488 | PCI_DMA_BIDIRECTIONAL); | ||
489 | if (pci_dma_mapping_error(device->pdev, ret)) | ||
490 | ret = 0; | ||
491 | } else { | ||
492 | ret = page_to_phys(page); | ||
493 | } | ||
494 | |||
495 | return ret; | ||
496 | } | ||
497 | |||
498 | void | ||
499 | nv_device_unmap_page(struct nouveau_device *device, dma_addr_t addr) | ||
500 | { | ||
501 | if (nv_device_is_pci(device)) | ||
502 | pci_unmap_page(device->pdev, addr, PAGE_SIZE, | ||
503 | PCI_DMA_BIDIRECTIONAL); | ||
504 | } | ||
505 | |||
506 | int | 641 | int |
507 | nv_device_get_irq(struct nouveau_device *device, bool stall) | 642 | nv_device_get_irq(struct nouveau_device *device, bool stall) |
508 | { | 643 | { |
@@ -560,6 +695,9 @@ nouveau_device_create_(void *dev, enum nv_bus_type type, u64 name, | |||
560 | nv_subdev(device)->debug = nouveau_dbgopt(device->dbgopt, "DEVICE"); | 695 | nv_subdev(device)->debug = nouveau_dbgopt(device->dbgopt, "DEVICE"); |
561 | nv_engine(device)->sclass = nouveau_device_sclass; | 696 | nv_engine(device)->sclass = nouveau_device_sclass; |
562 | list_add(&device->head, &nv_devices); | 697 | list_add(&device->head, &nv_devices); |
698 | |||
699 | ret = nvkm_event_init(&nouveau_device_event_func, 1, 1, | ||
700 | &device->event); | ||
563 | done: | 701 | done: |
564 | mutex_unlock(&nv_devices_mutex); | 702 | mutex_unlock(&nv_devices_mutex); |
565 | return ret; | 703 | return ret; |
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/ctrl.c b/drivers/gpu/drm/nouveau/core/engine/device/ctrl.c index 4b69bf56ed01..e34101a3490e 100644 --- a/drivers/gpu/drm/nouveau/core/engine/device/ctrl.c +++ b/drivers/gpu/drm/nouveau/core/engine/device/ctrl.c | |||
@@ -22,55 +22,82 @@ | |||
22 | * Authors: Ben Skeggs <bskeggs@redhat.com> | 22 | * Authors: Ben Skeggs <bskeggs@redhat.com> |
23 | */ | 23 | */ |
24 | 24 | ||
25 | #include <core/client.h> | ||
25 | #include <core/object.h> | 26 | #include <core/object.h> |
26 | #include <core/class.h> | 27 | #include <nvif/unpack.h> |
28 | #include <nvif/class.h> | ||
29 | #include <nvif/ioctl.h> | ||
27 | 30 | ||
28 | #include <subdev/clock.h> | 31 | #include <subdev/clock.h> |
29 | 32 | ||
30 | #include "priv.h" | 33 | #include "priv.h" |
31 | 34 | ||
32 | static int | 35 | static int |
33 | nouveau_control_mthd_pstate_info(struct nouveau_object *object, u32 mthd, | 36 | nouveau_control_mthd_pstate_info(struct nouveau_object *object, |
34 | void *data, u32 size) | 37 | void *data, u32 size) |
35 | { | 38 | { |
39 | union { | ||
40 | struct nvif_control_pstate_info_v0 v0; | ||
41 | } *args = data; | ||
36 | struct nouveau_clock *clk = nouveau_clock(object); | 42 | struct nouveau_clock *clk = nouveau_clock(object); |
37 | struct nv_control_pstate_info *args = data; | 43 | int ret; |
38 | 44 | ||
39 | if (size < sizeof(*args)) | 45 | nv_ioctl(object, "control pstate info size %d\n", size); |
40 | return -EINVAL; | 46 | if (nvif_unpack(args->v0, 0, 0, false)) { |
47 | nv_ioctl(object, "control pstate info vers %d\n", | ||
48 | args->v0.version); | ||
49 | } else | ||
50 | return ret; | ||
41 | 51 | ||
42 | if (clk) { | 52 | if (clk) { |
43 | args->count = clk->state_nr; | 53 | args->v0.count = clk->state_nr; |
44 | args->ustate = clk->ustate; | 54 | args->v0.ustate_ac = clk->ustate_ac; |
45 | args->pstate = clk->pstate; | 55 | args->v0.ustate_dc = clk->ustate_dc; |
56 | args->v0.pwrsrc = clk->pwrsrc; | ||
57 | args->v0.pstate = clk->pstate; | ||
46 | } else { | 58 | } else { |
47 | args->count = 0; | 59 | args->v0.count = 0; |
48 | args->ustate = NV_CONTROL_PSTATE_INFO_USTATE_DISABLE; | 60 | args->v0.ustate_ac = NVIF_CONTROL_PSTATE_INFO_V0_USTATE_DISABLE; |
49 | args->pstate = NV_CONTROL_PSTATE_INFO_PSTATE_UNKNOWN; | 61 | args->v0.ustate_dc = NVIF_CONTROL_PSTATE_INFO_V0_USTATE_DISABLE; |
62 | args->v0.pwrsrc = -ENOSYS; | ||
63 | args->v0.pstate = NVIF_CONTROL_PSTATE_INFO_V0_PSTATE_UNKNOWN; | ||
50 | } | 64 | } |
51 | 65 | ||
52 | return 0; | 66 | return 0; |
53 | } | 67 | } |
54 | 68 | ||
55 | static int | 69 | static int |
56 | nouveau_control_mthd_pstate_attr(struct nouveau_object *object, u32 mthd, | 70 | nouveau_control_mthd_pstate_attr(struct nouveau_object *object, |
57 | void *data, u32 size) | 71 | void *data, u32 size) |
58 | { | 72 | { |
73 | union { | ||
74 | struct nvif_control_pstate_attr_v0 v0; | ||
75 | } *args = data; | ||
59 | struct nouveau_clock *clk = nouveau_clock(object); | 76 | struct nouveau_clock *clk = nouveau_clock(object); |
60 | struct nv_control_pstate_attr *args = data; | ||
61 | struct nouveau_clocks *domain; | 77 | struct nouveau_clocks *domain; |
62 | struct nouveau_pstate *pstate; | 78 | struct nouveau_pstate *pstate; |
63 | struct nouveau_cstate *cstate; | 79 | struct nouveau_cstate *cstate; |
64 | int i = 0, j = -1; | 80 | int i = 0, j = -1; |
65 | u32 lo, hi; | 81 | u32 lo, hi; |
66 | 82 | int ret; | |
67 | if ((size < sizeof(*args)) || !clk || | 83 | |
68 | (args->state >= 0 && args->state >= clk->state_nr)) | 84 | nv_ioctl(object, "control pstate attr size %d\n", size); |
69 | return -EINVAL; | 85 | if (nvif_unpack(args->v0, 0, 0, false)) { |
86 | nv_ioctl(object, "control pstate attr vers %d state %d " | ||
87 | "index %d\n", | ||
88 | args->v0.version, args->v0.state, args->v0.index); | ||
89 | if (!clk) | ||
90 | return -ENODEV; | ||
91 | if (args->v0.state < NVIF_CONTROL_PSTATE_ATTR_V0_STATE_CURRENT) | ||
92 | return -EINVAL; | ||
93 | if (args->v0.state >= clk->state_nr) | ||
94 | return -EINVAL; | ||
95 | } else | ||
96 | return ret; | ||
70 | domain = clk->domains; | 97 | domain = clk->domains; |
71 | 98 | ||
72 | while (domain->name != nv_clk_src_max) { | 99 | while (domain->name != nv_clk_src_max) { |
73 | if (domain->mname && ++j == args->index) | 100 | if (domain->mname && ++j == args->v0.index) |
74 | break; | 101 | break; |
75 | domain++; | 102 | domain++; |
76 | } | 103 | } |
@@ -78,9 +105,9 @@ nouveau_control_mthd_pstate_attr(struct nouveau_object *object, u32 mthd, | |||
78 | if (domain->name == nv_clk_src_max) | 105 | if (domain->name == nv_clk_src_max) |
79 | return -EINVAL; | 106 | return -EINVAL; |
80 | 107 | ||
81 | if (args->state != NV_CONTROL_PSTATE_ATTR_STATE_CURRENT) { | 108 | if (args->v0.state != NVIF_CONTROL_PSTATE_ATTR_V0_STATE_CURRENT) { |
82 | list_for_each_entry(pstate, &clk->states, head) { | 109 | list_for_each_entry(pstate, &clk->states, head) { |
83 | if (i++ == args->state) | 110 | if (i++ == args->v0.state) |
84 | break; | 111 | break; |
85 | } | 112 | } |
86 | 113 | ||
@@ -91,21 +118,21 @@ nouveau_control_mthd_pstate_attr(struct nouveau_object *object, u32 mthd, | |||
91 | hi = max(hi, cstate->domain[domain->name]); | 118 | hi = max(hi, cstate->domain[domain->name]); |
92 | } | 119 | } |
93 | 120 | ||
94 | args->state = pstate->pstate; | 121 | args->v0.state = pstate->pstate; |
95 | } else { | 122 | } else { |
96 | lo = max(clk->read(clk, domain->name), 0); | 123 | lo = max(clk->read(clk, domain->name), 0); |
97 | hi = lo; | 124 | hi = lo; |
98 | } | 125 | } |
99 | 126 | ||
100 | snprintf(args->name, sizeof(args->name), "%s", domain->mname); | 127 | snprintf(args->v0.name, sizeof(args->v0.name), "%s", domain->mname); |
101 | snprintf(args->unit, sizeof(args->unit), "MHz"); | 128 | snprintf(args->v0.unit, sizeof(args->v0.unit), "MHz"); |
102 | args->min = lo / domain->mdiv; | 129 | args->v0.min = lo / domain->mdiv; |
103 | args->max = hi / domain->mdiv; | 130 | args->v0.max = hi / domain->mdiv; |
104 | 131 | ||
105 | args->index = 0; | 132 | args->v0.index = 0; |
106 | while ((++domain)->name != nv_clk_src_max) { | 133 | while ((++domain)->name != nv_clk_src_max) { |
107 | if (domain->mname) { | 134 | if (domain->mname) { |
108 | args->index = ++j; | 135 | args->v0.index = ++j; |
109 | break; | 136 | break; |
110 | } | 137 | } |
111 | } | 138 | } |
@@ -114,31 +141,65 @@ nouveau_control_mthd_pstate_attr(struct nouveau_object *object, u32 mthd, | |||
114 | } | 141 | } |
115 | 142 | ||
116 | static int | 143 | static int |
117 | nouveau_control_mthd_pstate_user(struct nouveau_object *object, u32 mthd, | 144 | nouveau_control_mthd_pstate_user(struct nouveau_object *object, |
118 | void *data, u32 size) | 145 | void *data, u32 size) |
119 | { | 146 | { |
147 | union { | ||
148 | struct nvif_control_pstate_user_v0 v0; | ||
149 | } *args = data; | ||
120 | struct nouveau_clock *clk = nouveau_clock(object); | 150 | struct nouveau_clock *clk = nouveau_clock(object); |
121 | struct nv_control_pstate_user *args = data; | 151 | int ret; |
152 | |||
153 | nv_ioctl(object, "control pstate user size %d\n", size); | ||
154 | if (nvif_unpack(args->v0, 0, 0, false)) { | ||
155 | nv_ioctl(object, "control pstate user vers %d ustate %d " | ||
156 | "pwrsrc %d\n", args->v0.version, | ||
157 | args->v0.ustate, args->v0.pwrsrc); | ||
158 | if (!clk) | ||
159 | return -ENODEV; | ||
160 | } else | ||
161 | return ret; | ||
162 | |||
163 | if (args->v0.pwrsrc >= 0) { | ||
164 | ret |= nouveau_clock_ustate(clk, args->v0.ustate, args->v0.pwrsrc); | ||
165 | } else { | ||
166 | ret |= nouveau_clock_ustate(clk, args->v0.ustate, 0); | ||
167 | ret |= nouveau_clock_ustate(clk, args->v0.ustate, 1); | ||
168 | } | ||
122 | 169 | ||
123 | if (size < sizeof(*args) || !clk) | 170 | return ret; |
124 | return -EINVAL; | 171 | } |
125 | 172 | ||
126 | return nouveau_clock_ustate(clk, args->state); | 173 | static int |
174 | nouveau_control_mthd(struct nouveau_object *object, u32 mthd, | ||
175 | void *data, u32 size) | ||
176 | { | ||
177 | switch (mthd) { | ||
178 | case NVIF_CONTROL_PSTATE_INFO: | ||
179 | return nouveau_control_mthd_pstate_info(object, data, size); | ||
180 | case NVIF_CONTROL_PSTATE_ATTR: | ||
181 | return nouveau_control_mthd_pstate_attr(object, data, size); | ||
182 | case NVIF_CONTROL_PSTATE_USER: | ||
183 | return nouveau_control_mthd_pstate_user(object, data, size); | ||
184 | default: | ||
185 | break; | ||
186 | } | ||
187 | return -EINVAL; | ||
127 | } | 188 | } |
128 | 189 | ||
190 | static struct nouveau_ofuncs | ||
191 | nouveau_control_ofuncs = { | ||
192 | .ctor = _nouveau_object_ctor, | ||
193 | .dtor = nouveau_object_destroy, | ||
194 | .init = nouveau_object_init, | ||
195 | .fini = nouveau_object_fini, | ||
196 | .mthd = nouveau_control_mthd, | ||
197 | }; | ||
198 | |||
129 | struct nouveau_oclass | 199 | struct nouveau_oclass |
130 | nouveau_control_oclass[] = { | 200 | nouveau_control_oclass[] = { |
131 | { .handle = NV_CONTROL_CLASS, | 201 | { .handle = NVIF_IOCTL_NEW_V0_CONTROL, |
132 | .ofuncs = &nouveau_object_ofuncs, | 202 | .ofuncs = &nouveau_control_ofuncs |
133 | .omthds = (struct nouveau_omthds[]) { | ||
134 | { NV_CONTROL_PSTATE_INFO, | ||
135 | NV_CONTROL_PSTATE_INFO, nouveau_control_mthd_pstate_info }, | ||
136 | { NV_CONTROL_PSTATE_ATTR, | ||
137 | NV_CONTROL_PSTATE_ATTR, nouveau_control_mthd_pstate_attr }, | ||
138 | { NV_CONTROL_PSTATE_USER, | ||
139 | NV_CONTROL_PSTATE_USER, nouveau_control_mthd_pstate_user }, | ||
140 | {}, | ||
141 | }, | ||
142 | }, | 203 | }, |
143 | {} | 204 | {} |
144 | }; | 205 | }; |
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/gm100.c b/drivers/gpu/drm/nouveau/core/engine/device/gm100.c index a520029e25d9..377ec0b8851e 100644 --- a/drivers/gpu/drm/nouveau/core/engine/device/gm100.c +++ b/drivers/gpu/drm/nouveau/core/engine/device/gm100.c | |||
@@ -33,7 +33,7 @@ | |||
33 | #include <subdev/mc.h> | 33 | #include <subdev/mc.h> |
34 | #include <subdev/timer.h> | 34 | #include <subdev/timer.h> |
35 | #include <subdev/fb.h> | 35 | #include <subdev/fb.h> |
36 | #include <subdev/ltcg.h> | 36 | #include <subdev/ltc.h> |
37 | #include <subdev/ibus.h> | 37 | #include <subdev/ibus.h> |
38 | #include <subdev/instmem.h> | 38 | #include <subdev/instmem.h> |
39 | #include <subdev/vm.h> | 39 | #include <subdev/vm.h> |
@@ -68,20 +68,20 @@ gm100_identify(struct nouveau_device *device) | |||
68 | #endif | 68 | #endif |
69 | device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; | 69 | device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; |
70 | device->oclass[NVDEV_SUBDEV_DEVINIT] = gm107_devinit_oclass; | 70 | device->oclass[NVDEV_SUBDEV_DEVINIT] = gm107_devinit_oclass; |
71 | device->oclass[NVDEV_SUBDEV_MC ] = nvc3_mc_oclass; | 71 | device->oclass[NVDEV_SUBDEV_MC ] = gk20a_mc_oclass; |
72 | device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass; | 72 | device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass; |
73 | device->oclass[NVDEV_SUBDEV_TIMER ] = &gk20a_timer_oclass; | 73 | device->oclass[NVDEV_SUBDEV_TIMER ] = &gk20a_timer_oclass; |
74 | device->oclass[NVDEV_SUBDEV_FB ] = gm107_fb_oclass; | 74 | device->oclass[NVDEV_SUBDEV_FB ] = gm107_fb_oclass; |
75 | device->oclass[NVDEV_SUBDEV_LTCG ] = gm107_ltcg_oclass; | 75 | device->oclass[NVDEV_SUBDEV_LTC ] = gm107_ltc_oclass; |
76 | device->oclass[NVDEV_SUBDEV_IBUS ] = &nve0_ibus_oclass; | 76 | device->oclass[NVDEV_SUBDEV_IBUS ] = &nve0_ibus_oclass; |
77 | device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass; | 77 | device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass; |
78 | device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass; | 78 | device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass; |
79 | device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass; | 79 | device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass; |
80 | #if 0 | 80 | #if 0 |
81 | device->oclass[NVDEV_SUBDEV_PWR ] = &nv108_pwr_oclass; | 81 | device->oclass[NVDEV_SUBDEV_PWR ] = nv108_pwr_oclass; |
82 | device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; | 82 | device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; |
83 | #endif | 83 | #endif |
84 | device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvd0_dmaeng_oclass; | 84 | device->oclass[NVDEV_ENGINE_DMAOBJ ] = nvd0_dmaeng_oclass; |
85 | device->oclass[NVDEV_ENGINE_FIFO ] = nv108_fifo_oclass; | 85 | device->oclass[NVDEV_ENGINE_FIFO ] = nv108_fifo_oclass; |
86 | device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass; | 86 | device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass; |
87 | device->oclass[NVDEV_ENGINE_GR ] = gm107_graph_oclass; | 87 | device->oclass[NVDEV_ENGINE_GR ] = gm107_graph_oclass; |
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nv04.c b/drivers/gpu/drm/nouveau/core/engine/device/nv04.c index 40b29d0214cb..573b55f5c2f9 100644 --- a/drivers/gpu/drm/nouveau/core/engine/device/nv04.c +++ b/drivers/gpu/drm/nouveau/core/engine/device/nv04.c | |||
@@ -56,7 +56,7 @@ nv04_identify(struct nouveau_device *device) | |||
56 | device->oclass[NVDEV_SUBDEV_FB ] = nv04_fb_oclass; | 56 | device->oclass[NVDEV_SUBDEV_FB ] = nv04_fb_oclass; |
57 | device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass; | 57 | device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass; |
58 | device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; | 58 | device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; |
59 | device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; | 59 | device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass; |
60 | device->oclass[NVDEV_ENGINE_FIFO ] = nv04_fifo_oclass; | 60 | device->oclass[NVDEV_ENGINE_FIFO ] = nv04_fifo_oclass; |
61 | device->oclass[NVDEV_ENGINE_SW ] = nv04_software_oclass; | 61 | device->oclass[NVDEV_ENGINE_SW ] = nv04_software_oclass; |
62 | device->oclass[NVDEV_ENGINE_GR ] = &nv04_graph_oclass; | 62 | device->oclass[NVDEV_ENGINE_GR ] = &nv04_graph_oclass; |
@@ -74,7 +74,7 @@ nv04_identify(struct nouveau_device *device) | |||
74 | device->oclass[NVDEV_SUBDEV_FB ] = nv04_fb_oclass; | 74 | device->oclass[NVDEV_SUBDEV_FB ] = nv04_fb_oclass; |
75 | device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass; | 75 | device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass; |
76 | device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; | 76 | device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; |
77 | device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; | 77 | device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass; |
78 | device->oclass[NVDEV_ENGINE_FIFO ] = nv04_fifo_oclass; | 78 | device->oclass[NVDEV_ENGINE_FIFO ] = nv04_fifo_oclass; |
79 | device->oclass[NVDEV_ENGINE_SW ] = nv04_software_oclass; | 79 | device->oclass[NVDEV_ENGINE_SW ] = nv04_software_oclass; |
80 | device->oclass[NVDEV_ENGINE_GR ] = &nv04_graph_oclass; | 80 | device->oclass[NVDEV_ENGINE_GR ] = &nv04_graph_oclass; |
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nv10.c b/drivers/gpu/drm/nouveau/core/engine/device/nv10.c index 5f7c25ff523d..183a85a6204e 100644 --- a/drivers/gpu/drm/nouveau/core/engine/device/nv10.c +++ b/drivers/gpu/drm/nouveau/core/engine/device/nv10.c | |||
@@ -58,7 +58,7 @@ nv10_identify(struct nouveau_device *device) | |||
58 | device->oclass[NVDEV_SUBDEV_FB ] = nv10_fb_oclass; | 58 | device->oclass[NVDEV_SUBDEV_FB ] = nv10_fb_oclass; |
59 | device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass; | 59 | device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass; |
60 | device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; | 60 | device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; |
61 | device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; | 61 | device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass; |
62 | device->oclass[NVDEV_ENGINE_GR ] = &nv10_graph_oclass; | 62 | device->oclass[NVDEV_ENGINE_GR ] = &nv10_graph_oclass; |
63 | device->oclass[NVDEV_ENGINE_DISP ] = nv04_disp_oclass; | 63 | device->oclass[NVDEV_ENGINE_DISP ] = nv04_disp_oclass; |
64 | break; | 64 | break; |
@@ -75,7 +75,7 @@ nv10_identify(struct nouveau_device *device) | |||
75 | device->oclass[NVDEV_SUBDEV_FB ] = nv10_fb_oclass; | 75 | device->oclass[NVDEV_SUBDEV_FB ] = nv10_fb_oclass; |
76 | device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass; | 76 | device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass; |
77 | device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; | 77 | device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; |
78 | device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; | 78 | device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass; |
79 | device->oclass[NVDEV_ENGINE_FIFO ] = nv10_fifo_oclass; | 79 | device->oclass[NVDEV_ENGINE_FIFO ] = nv10_fifo_oclass; |
80 | device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass; | 80 | device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass; |
81 | device->oclass[NVDEV_ENGINE_GR ] = &nv10_graph_oclass; | 81 | device->oclass[NVDEV_ENGINE_GR ] = &nv10_graph_oclass; |
@@ -94,7 +94,7 @@ nv10_identify(struct nouveau_device *device) | |||
94 | device->oclass[NVDEV_SUBDEV_FB ] = nv10_fb_oclass; | 94 | device->oclass[NVDEV_SUBDEV_FB ] = nv10_fb_oclass; |
95 | device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass; | 95 | device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass; |
96 | device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; | 96 | device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; |
97 | device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; | 97 | device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass; |
98 | device->oclass[NVDEV_ENGINE_FIFO ] = nv10_fifo_oclass; | 98 | device->oclass[NVDEV_ENGINE_FIFO ] = nv10_fifo_oclass; |
99 | device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass; | 99 | device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass; |
100 | device->oclass[NVDEV_ENGINE_GR ] = &nv10_graph_oclass; | 100 | device->oclass[NVDEV_ENGINE_GR ] = &nv10_graph_oclass; |
@@ -113,7 +113,7 @@ nv10_identify(struct nouveau_device *device) | |||
113 | device->oclass[NVDEV_SUBDEV_FB ] = nv1a_fb_oclass; | 113 | device->oclass[NVDEV_SUBDEV_FB ] = nv1a_fb_oclass; |
114 | device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass; | 114 | device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass; |
115 | device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; | 115 | device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; |
116 | device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; | 116 | device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass; |
117 | device->oclass[NVDEV_ENGINE_FIFO ] = nv10_fifo_oclass; | 117 | device->oclass[NVDEV_ENGINE_FIFO ] = nv10_fifo_oclass; |
118 | device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass; | 118 | device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass; |
119 | device->oclass[NVDEV_ENGINE_GR ] = &nv10_graph_oclass; | 119 | device->oclass[NVDEV_ENGINE_GR ] = &nv10_graph_oclass; |
@@ -132,7 +132,7 @@ nv10_identify(struct nouveau_device *device) | |||
132 | device->oclass[NVDEV_SUBDEV_FB ] = nv10_fb_oclass; | 132 | device->oclass[NVDEV_SUBDEV_FB ] = nv10_fb_oclass; |
133 | device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass; | 133 | device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass; |
134 | device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; | 134 | device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; |
135 | device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; | 135 | device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass; |
136 | device->oclass[NVDEV_ENGINE_FIFO ] = nv10_fifo_oclass; | 136 | device->oclass[NVDEV_ENGINE_FIFO ] = nv10_fifo_oclass; |
137 | device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass; | 137 | device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass; |
138 | device->oclass[NVDEV_ENGINE_GR ] = &nv10_graph_oclass; | 138 | device->oclass[NVDEV_ENGINE_GR ] = &nv10_graph_oclass; |
@@ -151,7 +151,7 @@ nv10_identify(struct nouveau_device *device) | |||
151 | device->oclass[NVDEV_SUBDEV_FB ] = nv10_fb_oclass; | 151 | device->oclass[NVDEV_SUBDEV_FB ] = nv10_fb_oclass; |
152 | device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass; | 152 | device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass; |
153 | device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; | 153 | device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; |
154 | device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; | 154 | device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass; |
155 | device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass; | 155 | device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass; |
156 | device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass; | 156 | device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass; |
157 | device->oclass[NVDEV_ENGINE_GR ] = &nv10_graph_oclass; | 157 | device->oclass[NVDEV_ENGINE_GR ] = &nv10_graph_oclass; |
@@ -170,7 +170,7 @@ nv10_identify(struct nouveau_device *device) | |||
170 | device->oclass[NVDEV_SUBDEV_FB ] = nv1a_fb_oclass; | 170 | device->oclass[NVDEV_SUBDEV_FB ] = nv1a_fb_oclass; |
171 | device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass; | 171 | device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass; |
172 | device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; | 172 | device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; |
173 | device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; | 173 | device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass; |
174 | device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass; | 174 | device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass; |
175 | device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass; | 175 | device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass; |
176 | device->oclass[NVDEV_ENGINE_GR ] = &nv10_graph_oclass; | 176 | device->oclass[NVDEV_ENGINE_GR ] = &nv10_graph_oclass; |
@@ -189,7 +189,7 @@ nv10_identify(struct nouveau_device *device) | |||
189 | device->oclass[NVDEV_SUBDEV_FB ] = nv10_fb_oclass; | 189 | device->oclass[NVDEV_SUBDEV_FB ] = nv10_fb_oclass; |
190 | device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass; | 190 | device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass; |
191 | device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; | 191 | device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; |
192 | device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; | 192 | device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass; |
193 | device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass; | 193 | device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass; |
194 | device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass; | 194 | device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass; |
195 | device->oclass[NVDEV_ENGINE_GR ] = &nv10_graph_oclass; | 195 | device->oclass[NVDEV_ENGINE_GR ] = &nv10_graph_oclass; |
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nv20.c b/drivers/gpu/drm/nouveau/core/engine/device/nv20.c index 75fed11bba0a..aa564c68a920 100644 --- a/drivers/gpu/drm/nouveau/core/engine/device/nv20.c +++ b/drivers/gpu/drm/nouveau/core/engine/device/nv20.c | |||
@@ -59,7 +59,7 @@ nv20_identify(struct nouveau_device *device) | |||
59 | device->oclass[NVDEV_SUBDEV_FB ] = nv20_fb_oclass; | 59 | device->oclass[NVDEV_SUBDEV_FB ] = nv20_fb_oclass; |
60 | device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass; | 60 | device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass; |
61 | device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; | 61 | device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; |
62 | device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; | 62 | device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass; |
63 | device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass; | 63 | device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass; |
64 | device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass; | 64 | device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass; |
65 | device->oclass[NVDEV_ENGINE_GR ] = &nv20_graph_oclass; | 65 | device->oclass[NVDEV_ENGINE_GR ] = &nv20_graph_oclass; |
@@ -78,7 +78,7 @@ nv20_identify(struct nouveau_device *device) | |||
78 | device->oclass[NVDEV_SUBDEV_FB ] = nv25_fb_oclass; | 78 | device->oclass[NVDEV_SUBDEV_FB ] = nv25_fb_oclass; |
79 | device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass; | 79 | device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass; |
80 | device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; | 80 | device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; |
81 | device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; | 81 | device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass; |
82 | device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass; | 82 | device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass; |
83 | device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass; | 83 | device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass; |
84 | device->oclass[NVDEV_ENGINE_GR ] = &nv25_graph_oclass; | 84 | device->oclass[NVDEV_ENGINE_GR ] = &nv25_graph_oclass; |
@@ -97,7 +97,7 @@ nv20_identify(struct nouveau_device *device) | |||
97 | device->oclass[NVDEV_SUBDEV_FB ] = nv25_fb_oclass; | 97 | device->oclass[NVDEV_SUBDEV_FB ] = nv25_fb_oclass; |
98 | device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass; | 98 | device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass; |
99 | device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; | 99 | device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; |
100 | device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; | 100 | device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass; |
101 | device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass; | 101 | device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass; |
102 | device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass; | 102 | device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass; |
103 | device->oclass[NVDEV_ENGINE_GR ] = &nv25_graph_oclass; | 103 | device->oclass[NVDEV_ENGINE_GR ] = &nv25_graph_oclass; |
@@ -116,7 +116,7 @@ nv20_identify(struct nouveau_device *device) | |||
116 | device->oclass[NVDEV_SUBDEV_FB ] = nv25_fb_oclass; | 116 | device->oclass[NVDEV_SUBDEV_FB ] = nv25_fb_oclass; |
117 | device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass; | 117 | device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass; |
118 | device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; | 118 | device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; |
119 | device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; | 119 | device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass; |
120 | device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass; | 120 | device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass; |
121 | device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass; | 121 | device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass; |
122 | device->oclass[NVDEV_ENGINE_GR ] = &nv2a_graph_oclass; | 122 | device->oclass[NVDEV_ENGINE_GR ] = &nv2a_graph_oclass; |
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nv30.c b/drivers/gpu/drm/nouveau/core/engine/device/nv30.c index 36919d7db7cc..11bd31da82ab 100644 --- a/drivers/gpu/drm/nouveau/core/engine/device/nv30.c +++ b/drivers/gpu/drm/nouveau/core/engine/device/nv30.c | |||
@@ -59,7 +59,7 @@ nv30_identify(struct nouveau_device *device) | |||
59 | device->oclass[NVDEV_SUBDEV_FB ] = nv30_fb_oclass; | 59 | device->oclass[NVDEV_SUBDEV_FB ] = nv30_fb_oclass; |
60 | device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass; | 60 | device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass; |
61 | device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; | 61 | device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; |
62 | device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; | 62 | device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass; |
63 | device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass; | 63 | device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass; |
64 | device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass; | 64 | device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass; |
65 | device->oclass[NVDEV_ENGINE_GR ] = &nv30_graph_oclass; | 65 | device->oclass[NVDEV_ENGINE_GR ] = &nv30_graph_oclass; |
@@ -78,7 +78,7 @@ nv30_identify(struct nouveau_device *device) | |||
78 | device->oclass[NVDEV_SUBDEV_FB ] = nv35_fb_oclass; | 78 | device->oclass[NVDEV_SUBDEV_FB ] = nv35_fb_oclass; |
79 | device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass; | 79 | device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass; |
80 | device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; | 80 | device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; |
81 | device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; | 81 | device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass; |
82 | device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass; | 82 | device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass; |
83 | device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass; | 83 | device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass; |
84 | device->oclass[NVDEV_ENGINE_GR ] = &nv35_graph_oclass; | 84 | device->oclass[NVDEV_ENGINE_GR ] = &nv35_graph_oclass; |
@@ -97,7 +97,7 @@ nv30_identify(struct nouveau_device *device) | |||
97 | device->oclass[NVDEV_SUBDEV_FB ] = nv30_fb_oclass; | 97 | device->oclass[NVDEV_SUBDEV_FB ] = nv30_fb_oclass; |
98 | device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass; | 98 | device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass; |
99 | device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; | 99 | device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; |
100 | device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; | 100 | device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass; |
101 | device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass; | 101 | device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass; |
102 | device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass; | 102 | device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass; |
103 | device->oclass[NVDEV_ENGINE_GR ] = &nv30_graph_oclass; | 103 | device->oclass[NVDEV_ENGINE_GR ] = &nv30_graph_oclass; |
@@ -117,7 +117,7 @@ nv30_identify(struct nouveau_device *device) | |||
117 | device->oclass[NVDEV_SUBDEV_FB ] = nv36_fb_oclass; | 117 | device->oclass[NVDEV_SUBDEV_FB ] = nv36_fb_oclass; |
118 | device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass; | 118 | device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass; |
119 | device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; | 119 | device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; |
120 | device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; | 120 | device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass; |
121 | device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass; | 121 | device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass; |
122 | device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass; | 122 | device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass; |
123 | device->oclass[NVDEV_ENGINE_GR ] = &nv35_graph_oclass; | 123 | device->oclass[NVDEV_ENGINE_GR ] = &nv35_graph_oclass; |
@@ -137,7 +137,7 @@ nv30_identify(struct nouveau_device *device) | |||
137 | device->oclass[NVDEV_SUBDEV_FB ] = nv10_fb_oclass; | 137 | device->oclass[NVDEV_SUBDEV_FB ] = nv10_fb_oclass; |
138 | device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass; | 138 | device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass; |
139 | device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; | 139 | device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; |
140 | device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; | 140 | device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass; |
141 | device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass; | 141 | device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass; |
142 | device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass; | 142 | device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass; |
143 | device->oclass[NVDEV_ENGINE_GR ] = &nv34_graph_oclass; | 143 | device->oclass[NVDEV_ENGINE_GR ] = &nv34_graph_oclass; |
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nv40.c b/drivers/gpu/drm/nouveau/core/engine/device/nv40.c index 1130a62be2c7..e96c223cb797 100644 --- a/drivers/gpu/drm/nouveau/core/engine/device/nv40.c +++ b/drivers/gpu/drm/nouveau/core/engine/device/nv40.c | |||
@@ -65,7 +65,7 @@ nv40_identify(struct nouveau_device *device) | |||
65 | device->oclass[NVDEV_SUBDEV_INSTMEM] = nv40_instmem_oclass; | 65 | device->oclass[NVDEV_SUBDEV_INSTMEM] = nv40_instmem_oclass; |
66 | device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; | 66 | device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; |
67 | device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; | 67 | device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; |
68 | device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; | 68 | device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass; |
69 | device->oclass[NVDEV_ENGINE_FIFO ] = nv40_fifo_oclass; | 69 | device->oclass[NVDEV_ENGINE_FIFO ] = nv40_fifo_oclass; |
70 | device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass; | 70 | device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass; |
71 | device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass; | 71 | device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass; |
@@ -88,7 +88,7 @@ nv40_identify(struct nouveau_device *device) | |||
88 | device->oclass[NVDEV_SUBDEV_INSTMEM] = nv40_instmem_oclass; | 88 | device->oclass[NVDEV_SUBDEV_INSTMEM] = nv40_instmem_oclass; |
89 | device->oclass[NVDEV_SUBDEV_VM ] = &nv41_vmmgr_oclass; | 89 | device->oclass[NVDEV_SUBDEV_VM ] = &nv41_vmmgr_oclass; |
90 | device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; | 90 | device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; |
91 | device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; | 91 | device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass; |
92 | device->oclass[NVDEV_ENGINE_FIFO ] = nv40_fifo_oclass; | 92 | device->oclass[NVDEV_ENGINE_FIFO ] = nv40_fifo_oclass; |
93 | device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass; | 93 | device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass; |
94 | device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass; | 94 | device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass; |
@@ -111,7 +111,7 @@ nv40_identify(struct nouveau_device *device) | |||
111 | device->oclass[NVDEV_SUBDEV_INSTMEM] = nv40_instmem_oclass; | 111 | device->oclass[NVDEV_SUBDEV_INSTMEM] = nv40_instmem_oclass; |
112 | device->oclass[NVDEV_SUBDEV_VM ] = &nv41_vmmgr_oclass; | 112 | device->oclass[NVDEV_SUBDEV_VM ] = &nv41_vmmgr_oclass; |
113 | device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; | 113 | device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; |
114 | device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; | 114 | device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass; |
115 | device->oclass[NVDEV_ENGINE_FIFO ] = nv40_fifo_oclass; | 115 | device->oclass[NVDEV_ENGINE_FIFO ] = nv40_fifo_oclass; |
116 | device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass; | 116 | device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass; |
117 | device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass; | 117 | device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass; |
@@ -134,7 +134,7 @@ nv40_identify(struct nouveau_device *device) | |||
134 | device->oclass[NVDEV_SUBDEV_INSTMEM] = nv40_instmem_oclass; | 134 | device->oclass[NVDEV_SUBDEV_INSTMEM] = nv40_instmem_oclass; |
135 | device->oclass[NVDEV_SUBDEV_VM ] = &nv41_vmmgr_oclass; | 135 | device->oclass[NVDEV_SUBDEV_VM ] = &nv41_vmmgr_oclass; |
136 | device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; | 136 | device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; |
137 | device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; | 137 | device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass; |
138 | device->oclass[NVDEV_ENGINE_FIFO ] = nv40_fifo_oclass; | 138 | device->oclass[NVDEV_ENGINE_FIFO ] = nv40_fifo_oclass; |
139 | device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass; | 139 | device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass; |
140 | device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass; | 140 | device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass; |
@@ -157,7 +157,7 @@ nv40_identify(struct nouveau_device *device) | |||
157 | device->oclass[NVDEV_SUBDEV_INSTMEM] = nv40_instmem_oclass; | 157 | device->oclass[NVDEV_SUBDEV_INSTMEM] = nv40_instmem_oclass; |
158 | device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; | 158 | device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; |
159 | device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; | 159 | device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; |
160 | device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; | 160 | device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass; |
161 | device->oclass[NVDEV_ENGINE_FIFO ] = nv40_fifo_oclass; | 161 | device->oclass[NVDEV_ENGINE_FIFO ] = nv40_fifo_oclass; |
162 | device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass; | 162 | device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass; |
163 | device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass; | 163 | device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass; |
@@ -180,7 +180,7 @@ nv40_identify(struct nouveau_device *device) | |||
180 | device->oclass[NVDEV_SUBDEV_INSTMEM] = nv40_instmem_oclass; | 180 | device->oclass[NVDEV_SUBDEV_INSTMEM] = nv40_instmem_oclass; |
181 | device->oclass[NVDEV_SUBDEV_VM ] = &nv41_vmmgr_oclass; | 181 | device->oclass[NVDEV_SUBDEV_VM ] = &nv41_vmmgr_oclass; |
182 | device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; | 182 | device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; |
183 | device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; | 183 | device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass; |
184 | device->oclass[NVDEV_ENGINE_FIFO ] = nv40_fifo_oclass; | 184 | device->oclass[NVDEV_ENGINE_FIFO ] = nv40_fifo_oclass; |
185 | device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass; | 185 | device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass; |
186 | device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass; | 186 | device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass; |
@@ -203,7 +203,7 @@ nv40_identify(struct nouveau_device *device) | |||
203 | device->oclass[NVDEV_SUBDEV_INSTMEM] = nv40_instmem_oclass; | 203 | device->oclass[NVDEV_SUBDEV_INSTMEM] = nv40_instmem_oclass; |
204 | device->oclass[NVDEV_SUBDEV_VM ] = &nv41_vmmgr_oclass; | 204 | device->oclass[NVDEV_SUBDEV_VM ] = &nv41_vmmgr_oclass; |
205 | device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; | 205 | device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; |
206 | device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; | 206 | device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass; |
207 | device->oclass[NVDEV_ENGINE_FIFO ] = nv40_fifo_oclass; | 207 | device->oclass[NVDEV_ENGINE_FIFO ] = nv40_fifo_oclass; |
208 | device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass; | 208 | device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass; |
209 | device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass; | 209 | device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass; |
@@ -226,7 +226,7 @@ nv40_identify(struct nouveau_device *device) | |||
226 | device->oclass[NVDEV_SUBDEV_INSTMEM] = nv40_instmem_oclass; | 226 | device->oclass[NVDEV_SUBDEV_INSTMEM] = nv40_instmem_oclass; |
227 | device->oclass[NVDEV_SUBDEV_VM ] = &nv41_vmmgr_oclass; | 227 | device->oclass[NVDEV_SUBDEV_VM ] = &nv41_vmmgr_oclass; |
228 | device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; | 228 | device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; |
229 | device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; | 229 | device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass; |
230 | device->oclass[NVDEV_ENGINE_FIFO ] = nv40_fifo_oclass; | 230 | device->oclass[NVDEV_ENGINE_FIFO ] = nv40_fifo_oclass; |
231 | device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass; | 231 | device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass; |
232 | device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass; | 232 | device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass; |
@@ -249,7 +249,7 @@ nv40_identify(struct nouveau_device *device) | |||
249 | device->oclass[NVDEV_SUBDEV_INSTMEM] = nv40_instmem_oclass; | 249 | device->oclass[NVDEV_SUBDEV_INSTMEM] = nv40_instmem_oclass; |
250 | device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass; | 250 | device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass; |
251 | device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; | 251 | device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; |
252 | device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; | 252 | device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass; |
253 | device->oclass[NVDEV_ENGINE_FIFO ] = nv40_fifo_oclass; | 253 | device->oclass[NVDEV_ENGINE_FIFO ] = nv40_fifo_oclass; |
254 | device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass; | 254 | device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass; |
255 | device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass; | 255 | device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass; |
@@ -272,7 +272,7 @@ nv40_identify(struct nouveau_device *device) | |||
272 | device->oclass[NVDEV_SUBDEV_INSTMEM] = nv40_instmem_oclass; | 272 | device->oclass[NVDEV_SUBDEV_INSTMEM] = nv40_instmem_oclass; |
273 | device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass; | 273 | device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass; |
274 | device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; | 274 | device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; |
275 | device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; | 275 | device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass; |
276 | device->oclass[NVDEV_ENGINE_FIFO ] = nv40_fifo_oclass; | 276 | device->oclass[NVDEV_ENGINE_FIFO ] = nv40_fifo_oclass; |
277 | device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass; | 277 | device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass; |
278 | device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass; | 278 | device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass; |
@@ -295,7 +295,7 @@ nv40_identify(struct nouveau_device *device) | |||
295 | device->oclass[NVDEV_SUBDEV_INSTMEM] = nv40_instmem_oclass; | 295 | device->oclass[NVDEV_SUBDEV_INSTMEM] = nv40_instmem_oclass; |
296 | device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass; | 296 | device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass; |
297 | device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; | 297 | device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; |
298 | device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; | 298 | device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass; |
299 | device->oclass[NVDEV_ENGINE_FIFO ] = nv40_fifo_oclass; | 299 | device->oclass[NVDEV_ENGINE_FIFO ] = nv40_fifo_oclass; |
300 | device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass; | 300 | device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass; |
301 | device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass; | 301 | device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass; |
@@ -318,7 +318,7 @@ nv40_identify(struct nouveau_device *device) | |||
318 | device->oclass[NVDEV_SUBDEV_INSTMEM] = nv40_instmem_oclass; | 318 | device->oclass[NVDEV_SUBDEV_INSTMEM] = nv40_instmem_oclass; |
319 | device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass; | 319 | device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass; |
320 | device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; | 320 | device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; |
321 | device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; | 321 | device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass; |
322 | device->oclass[NVDEV_ENGINE_FIFO ] = nv40_fifo_oclass; | 322 | device->oclass[NVDEV_ENGINE_FIFO ] = nv40_fifo_oclass; |
323 | device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass; | 323 | device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass; |
324 | device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass; | 324 | device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass; |
@@ -341,7 +341,7 @@ nv40_identify(struct nouveau_device *device) | |||
341 | device->oclass[NVDEV_SUBDEV_INSTMEM] = nv40_instmem_oclass; | 341 | device->oclass[NVDEV_SUBDEV_INSTMEM] = nv40_instmem_oclass; |
342 | device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass; | 342 | device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass; |
343 | device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; | 343 | device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; |
344 | device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; | 344 | device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass; |
345 | device->oclass[NVDEV_ENGINE_FIFO ] = nv40_fifo_oclass; | 345 | device->oclass[NVDEV_ENGINE_FIFO ] = nv40_fifo_oclass; |
346 | device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass; | 346 | device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass; |
347 | device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass; | 347 | device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass; |
@@ -364,7 +364,7 @@ nv40_identify(struct nouveau_device *device) | |||
364 | device->oclass[NVDEV_SUBDEV_INSTMEM] = nv40_instmem_oclass; | 364 | device->oclass[NVDEV_SUBDEV_INSTMEM] = nv40_instmem_oclass; |
365 | device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass; | 365 | device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass; |
366 | device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; | 366 | device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; |
367 | device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; | 367 | device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass; |
368 | device->oclass[NVDEV_ENGINE_FIFO ] = nv40_fifo_oclass; | 368 | device->oclass[NVDEV_ENGINE_FIFO ] = nv40_fifo_oclass; |
369 | device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass; | 369 | device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass; |
370 | device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass; | 370 | device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass; |
@@ -387,7 +387,7 @@ nv40_identify(struct nouveau_device *device) | |||
387 | device->oclass[NVDEV_SUBDEV_INSTMEM] = nv40_instmem_oclass; | 387 | device->oclass[NVDEV_SUBDEV_INSTMEM] = nv40_instmem_oclass; |
388 | device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass; | 388 | device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass; |
389 | device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; | 389 | device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; |
390 | device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; | 390 | device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass; |
391 | device->oclass[NVDEV_ENGINE_FIFO ] = nv40_fifo_oclass; | 391 | device->oclass[NVDEV_ENGINE_FIFO ] = nv40_fifo_oclass; |
392 | device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass; | 392 | device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass; |
393 | device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass; | 393 | device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass; |
@@ -410,7 +410,7 @@ nv40_identify(struct nouveau_device *device) | |||
410 | device->oclass[NVDEV_SUBDEV_INSTMEM] = nv40_instmem_oclass; | 410 | device->oclass[NVDEV_SUBDEV_INSTMEM] = nv40_instmem_oclass; |
411 | device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass; | 411 | device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass; |
412 | device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; | 412 | device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; |
413 | device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; | 413 | device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass; |
414 | device->oclass[NVDEV_ENGINE_FIFO ] = nv40_fifo_oclass; | 414 | device->oclass[NVDEV_ENGINE_FIFO ] = nv40_fifo_oclass; |
415 | device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass; | 415 | device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass; |
416 | device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass; | 416 | device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass; |
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nv50.c b/drivers/gpu/drm/nouveau/core/engine/device/nv50.c index ef0b0bde1a91..932f84fae459 100644 --- a/drivers/gpu/drm/nouveau/core/engine/device/nv50.c +++ b/drivers/gpu/drm/nouveau/core/engine/device/nv50.c | |||
@@ -74,7 +74,7 @@ nv50_identify(struct nouveau_device *device) | |||
74 | device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass; | 74 | device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass; |
75 | device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass; | 75 | device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass; |
76 | device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; | 76 | device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; |
77 | device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass; | 77 | device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv50_dmaeng_oclass; |
78 | device->oclass[NVDEV_ENGINE_FIFO ] = nv50_fifo_oclass; | 78 | device->oclass[NVDEV_ENGINE_FIFO ] = nv50_fifo_oclass; |
79 | device->oclass[NVDEV_ENGINE_SW ] = nv50_software_oclass; | 79 | device->oclass[NVDEV_ENGINE_SW ] = nv50_software_oclass; |
80 | device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass; | 80 | device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass; |
@@ -99,7 +99,7 @@ nv50_identify(struct nouveau_device *device) | |||
99 | device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass; | 99 | device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass; |
100 | device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass; | 100 | device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass; |
101 | device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; | 101 | device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; |
102 | device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass; | 102 | device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv50_dmaeng_oclass; |
103 | device->oclass[NVDEV_ENGINE_FIFO ] = nv84_fifo_oclass; | 103 | device->oclass[NVDEV_ENGINE_FIFO ] = nv84_fifo_oclass; |
104 | device->oclass[NVDEV_ENGINE_SW ] = nv50_software_oclass; | 104 | device->oclass[NVDEV_ENGINE_SW ] = nv50_software_oclass; |
105 | device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass; | 105 | device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass; |
@@ -127,7 +127,7 @@ nv50_identify(struct nouveau_device *device) | |||
127 | device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass; | 127 | device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass; |
128 | device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass; | 128 | device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass; |
129 | device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; | 129 | device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; |
130 | device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass; | 130 | device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv50_dmaeng_oclass; |
131 | device->oclass[NVDEV_ENGINE_FIFO ] = nv84_fifo_oclass; | 131 | device->oclass[NVDEV_ENGINE_FIFO ] = nv84_fifo_oclass; |
132 | device->oclass[NVDEV_ENGINE_SW ] = nv50_software_oclass; | 132 | device->oclass[NVDEV_ENGINE_SW ] = nv50_software_oclass; |
133 | device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass; | 133 | device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass; |
@@ -155,7 +155,7 @@ nv50_identify(struct nouveau_device *device) | |||
155 | device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass; | 155 | device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass; |
156 | device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass; | 156 | device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass; |
157 | device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; | 157 | device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; |
158 | device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass; | 158 | device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv50_dmaeng_oclass; |
159 | device->oclass[NVDEV_ENGINE_FIFO ] = nv84_fifo_oclass; | 159 | device->oclass[NVDEV_ENGINE_FIFO ] = nv84_fifo_oclass; |
160 | device->oclass[NVDEV_ENGINE_SW ] = nv50_software_oclass; | 160 | device->oclass[NVDEV_ENGINE_SW ] = nv50_software_oclass; |
161 | device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass; | 161 | device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass; |
@@ -183,7 +183,7 @@ nv50_identify(struct nouveau_device *device) | |||
183 | device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass; | 183 | device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass; |
184 | device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass; | 184 | device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass; |
185 | device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; | 185 | device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; |
186 | device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass; | 186 | device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv50_dmaeng_oclass; |
187 | device->oclass[NVDEV_ENGINE_FIFO ] = nv84_fifo_oclass; | 187 | device->oclass[NVDEV_ENGINE_FIFO ] = nv84_fifo_oclass; |
188 | device->oclass[NVDEV_ENGINE_SW ] = nv50_software_oclass; | 188 | device->oclass[NVDEV_ENGINE_SW ] = nv50_software_oclass; |
189 | device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass; | 189 | device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass; |
@@ -211,7 +211,7 @@ nv50_identify(struct nouveau_device *device) | |||
211 | device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass; | 211 | device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass; |
212 | device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass; | 212 | device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass; |
213 | device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; | 213 | device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; |
214 | device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass; | 214 | device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv50_dmaeng_oclass; |
215 | device->oclass[NVDEV_ENGINE_FIFO ] = nv84_fifo_oclass; | 215 | device->oclass[NVDEV_ENGINE_FIFO ] = nv84_fifo_oclass; |
216 | device->oclass[NVDEV_ENGINE_SW ] = nv50_software_oclass; | 216 | device->oclass[NVDEV_ENGINE_SW ] = nv50_software_oclass; |
217 | device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass; | 217 | device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass; |
@@ -239,7 +239,7 @@ nv50_identify(struct nouveau_device *device) | |||
239 | device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass; | 239 | device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass; |
240 | device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass; | 240 | device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass; |
241 | device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; | 241 | device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; |
242 | device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass; | 242 | device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv50_dmaeng_oclass; |
243 | device->oclass[NVDEV_ENGINE_FIFO ] = nv84_fifo_oclass; | 243 | device->oclass[NVDEV_ENGINE_FIFO ] = nv84_fifo_oclass; |
244 | device->oclass[NVDEV_ENGINE_SW ] = nv50_software_oclass; | 244 | device->oclass[NVDEV_ENGINE_SW ] = nv50_software_oclass; |
245 | device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass; | 245 | device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass; |
@@ -267,7 +267,7 @@ nv50_identify(struct nouveau_device *device) | |||
267 | device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass; | 267 | device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass; |
268 | device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass; | 268 | device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass; |
269 | device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; | 269 | device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; |
270 | device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass; | 270 | device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv50_dmaeng_oclass; |
271 | device->oclass[NVDEV_ENGINE_FIFO ] = nv84_fifo_oclass; | 271 | device->oclass[NVDEV_ENGINE_FIFO ] = nv84_fifo_oclass; |
272 | device->oclass[NVDEV_ENGINE_SW ] = nv50_software_oclass; | 272 | device->oclass[NVDEV_ENGINE_SW ] = nv50_software_oclass; |
273 | device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass; | 273 | device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass; |
@@ -295,7 +295,7 @@ nv50_identify(struct nouveau_device *device) | |||
295 | device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass; | 295 | device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass; |
296 | device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass; | 296 | device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass; |
297 | device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; | 297 | device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; |
298 | device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass; | 298 | device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv50_dmaeng_oclass; |
299 | device->oclass[NVDEV_ENGINE_FIFO ] = nv84_fifo_oclass; | 299 | device->oclass[NVDEV_ENGINE_FIFO ] = nv84_fifo_oclass; |
300 | device->oclass[NVDEV_ENGINE_SW ] = nv50_software_oclass; | 300 | device->oclass[NVDEV_ENGINE_SW ] = nv50_software_oclass; |
301 | device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass; | 301 | device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass; |
@@ -323,7 +323,7 @@ nv50_identify(struct nouveau_device *device) | |||
323 | device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass; | 323 | device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass; |
324 | device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass; | 324 | device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass; |
325 | device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; | 325 | device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; |
326 | device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass; | 326 | device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv50_dmaeng_oclass; |
327 | device->oclass[NVDEV_ENGINE_FIFO ] = nv84_fifo_oclass; | 327 | device->oclass[NVDEV_ENGINE_FIFO ] = nv84_fifo_oclass; |
328 | device->oclass[NVDEV_ENGINE_SW ] = nv50_software_oclass; | 328 | device->oclass[NVDEV_ENGINE_SW ] = nv50_software_oclass; |
329 | device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass; | 329 | device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass; |
@@ -350,9 +350,9 @@ nv50_identify(struct nouveau_device *device) | |||
350 | device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass; | 350 | device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass; |
351 | device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass; | 351 | device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass; |
352 | device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass; | 352 | device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass; |
353 | device->oclass[NVDEV_SUBDEV_PWR ] = &nva3_pwr_oclass; | 353 | device->oclass[NVDEV_SUBDEV_PWR ] = nva3_pwr_oclass; |
354 | device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; | 354 | device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; |
355 | device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass; | 355 | device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv50_dmaeng_oclass; |
356 | device->oclass[NVDEV_ENGINE_FIFO ] = nv84_fifo_oclass; | 356 | device->oclass[NVDEV_ENGINE_FIFO ] = nv84_fifo_oclass; |
357 | device->oclass[NVDEV_ENGINE_SW ] = nv50_software_oclass; | 357 | device->oclass[NVDEV_ENGINE_SW ] = nv50_software_oclass; |
358 | device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass; | 358 | device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass; |
@@ -380,9 +380,9 @@ nv50_identify(struct nouveau_device *device) | |||
380 | device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass; | 380 | device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass; |
381 | device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass; | 381 | device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass; |
382 | device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass; | 382 | device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass; |
383 | device->oclass[NVDEV_SUBDEV_PWR ] = &nva3_pwr_oclass; | 383 | device->oclass[NVDEV_SUBDEV_PWR ] = nva3_pwr_oclass; |
384 | device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; | 384 | device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; |
385 | device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass; | 385 | device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv50_dmaeng_oclass; |
386 | device->oclass[NVDEV_ENGINE_FIFO ] = nv84_fifo_oclass; | 386 | device->oclass[NVDEV_ENGINE_FIFO ] = nv84_fifo_oclass; |
387 | device->oclass[NVDEV_ENGINE_SW ] = nv50_software_oclass; | 387 | device->oclass[NVDEV_ENGINE_SW ] = nv50_software_oclass; |
388 | device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass; | 388 | device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass; |
@@ -409,9 +409,9 @@ nv50_identify(struct nouveau_device *device) | |||
409 | device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass; | 409 | device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass; |
410 | device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass; | 410 | device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass; |
411 | device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass; | 411 | device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass; |
412 | device->oclass[NVDEV_SUBDEV_PWR ] = &nva3_pwr_oclass; | 412 | device->oclass[NVDEV_SUBDEV_PWR ] = nva3_pwr_oclass; |
413 | device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; | 413 | device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; |
414 | device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass; | 414 | device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv50_dmaeng_oclass; |
415 | device->oclass[NVDEV_ENGINE_FIFO ] = nv84_fifo_oclass; | 415 | device->oclass[NVDEV_ENGINE_FIFO ] = nv84_fifo_oclass; |
416 | device->oclass[NVDEV_ENGINE_SW ] = nv50_software_oclass; | 416 | device->oclass[NVDEV_ENGINE_SW ] = nv50_software_oclass; |
417 | device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass; | 417 | device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass; |
@@ -438,9 +438,9 @@ nv50_identify(struct nouveau_device *device) | |||
438 | device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass; | 438 | device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass; |
439 | device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass; | 439 | device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass; |
440 | device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass; | 440 | device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass; |
441 | device->oclass[NVDEV_SUBDEV_PWR ] = &nva3_pwr_oclass; | 441 | device->oclass[NVDEV_SUBDEV_PWR ] = nva3_pwr_oclass; |
442 | device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; | 442 | device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; |
443 | device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass; | 443 | device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv50_dmaeng_oclass; |
444 | device->oclass[NVDEV_ENGINE_FIFO ] = nv84_fifo_oclass; | 444 | device->oclass[NVDEV_ENGINE_FIFO ] = nv84_fifo_oclass; |
445 | device->oclass[NVDEV_ENGINE_SW ] = nv50_software_oclass; | 445 | device->oclass[NVDEV_ENGINE_SW ] = nv50_software_oclass; |
446 | device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass; | 446 | device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass; |
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/device/nvc0.c index 8d55ed633b19..b4a2917ce555 100644 --- a/drivers/gpu/drm/nouveau/core/engine/device/nvc0.c +++ b/drivers/gpu/drm/nouveau/core/engine/device/nvc0.c | |||
@@ -33,7 +33,7 @@ | |||
33 | #include <subdev/mc.h> | 33 | #include <subdev/mc.h> |
34 | #include <subdev/timer.h> | 34 | #include <subdev/timer.h> |
35 | #include <subdev/fb.h> | 35 | #include <subdev/fb.h> |
36 | #include <subdev/ltcg.h> | 36 | #include <subdev/ltc.h> |
37 | #include <subdev/ibus.h> | 37 | #include <subdev/ibus.h> |
38 | #include <subdev/instmem.h> | 38 | #include <subdev/instmem.h> |
39 | #include <subdev/vm.h> | 39 | #include <subdev/vm.h> |
@@ -70,14 +70,14 @@ nvc0_identify(struct nouveau_device *device) | |||
70 | device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass; | 70 | device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass; |
71 | device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; | 71 | device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; |
72 | device->oclass[NVDEV_SUBDEV_FB ] = nvc0_fb_oclass; | 72 | device->oclass[NVDEV_SUBDEV_FB ] = nvc0_fb_oclass; |
73 | device->oclass[NVDEV_SUBDEV_LTCG ] = gf100_ltcg_oclass; | 73 | device->oclass[NVDEV_SUBDEV_LTC ] = gf100_ltc_oclass; |
74 | device->oclass[NVDEV_SUBDEV_IBUS ] = &nvc0_ibus_oclass; | 74 | device->oclass[NVDEV_SUBDEV_IBUS ] = &nvc0_ibus_oclass; |
75 | device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass; | 75 | device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass; |
76 | device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass; | 76 | device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass; |
77 | device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass; | 77 | device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass; |
78 | device->oclass[NVDEV_SUBDEV_PWR ] = &nvc0_pwr_oclass; | 78 | device->oclass[NVDEV_SUBDEV_PWR ] = nvc0_pwr_oclass; |
79 | device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; | 79 | device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; |
80 | device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvc0_dmaeng_oclass; | 80 | device->oclass[NVDEV_ENGINE_DMAOBJ ] = nvc0_dmaeng_oclass; |
81 | device->oclass[NVDEV_ENGINE_FIFO ] = nvc0_fifo_oclass; | 81 | device->oclass[NVDEV_ENGINE_FIFO ] = nvc0_fifo_oclass; |
82 | device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass; | 82 | device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass; |
83 | device->oclass[NVDEV_ENGINE_GR ] = nvc0_graph_oclass; | 83 | device->oclass[NVDEV_ENGINE_GR ] = nvc0_graph_oclass; |
@@ -102,14 +102,14 @@ nvc0_identify(struct nouveau_device *device) | |||
102 | device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass; | 102 | device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass; |
103 | device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; | 103 | device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; |
104 | device->oclass[NVDEV_SUBDEV_FB ] = nvc0_fb_oclass; | 104 | device->oclass[NVDEV_SUBDEV_FB ] = nvc0_fb_oclass; |
105 | device->oclass[NVDEV_SUBDEV_LTCG ] = gf100_ltcg_oclass; | 105 | device->oclass[NVDEV_SUBDEV_LTC ] = gf100_ltc_oclass; |
106 | device->oclass[NVDEV_SUBDEV_IBUS ] = &nvc0_ibus_oclass; | 106 | device->oclass[NVDEV_SUBDEV_IBUS ] = &nvc0_ibus_oclass; |
107 | device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass; | 107 | device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass; |
108 | device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass; | 108 | device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass; |
109 | device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass; | 109 | device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass; |
110 | device->oclass[NVDEV_SUBDEV_PWR ] = &nvc0_pwr_oclass; | 110 | device->oclass[NVDEV_SUBDEV_PWR ] = nvc0_pwr_oclass; |
111 | device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; | 111 | device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; |
112 | device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvc0_dmaeng_oclass; | 112 | device->oclass[NVDEV_ENGINE_DMAOBJ ] = nvc0_dmaeng_oclass; |
113 | device->oclass[NVDEV_ENGINE_FIFO ] = nvc0_fifo_oclass; | 113 | device->oclass[NVDEV_ENGINE_FIFO ] = nvc0_fifo_oclass; |
114 | device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass; | 114 | device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass; |
115 | device->oclass[NVDEV_ENGINE_GR ] = nvc4_graph_oclass; | 115 | device->oclass[NVDEV_ENGINE_GR ] = nvc4_graph_oclass; |
@@ -134,14 +134,14 @@ nvc0_identify(struct nouveau_device *device) | |||
134 | device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass; | 134 | device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass; |
135 | device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; | 135 | device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; |
136 | device->oclass[NVDEV_SUBDEV_FB ] = nvc0_fb_oclass; | 136 | device->oclass[NVDEV_SUBDEV_FB ] = nvc0_fb_oclass; |
137 | device->oclass[NVDEV_SUBDEV_LTCG ] = gf100_ltcg_oclass; | 137 | device->oclass[NVDEV_SUBDEV_LTC ] = gf100_ltc_oclass; |
138 | device->oclass[NVDEV_SUBDEV_IBUS ] = &nvc0_ibus_oclass; | 138 | device->oclass[NVDEV_SUBDEV_IBUS ] = &nvc0_ibus_oclass; |
139 | device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass; | 139 | device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass; |
140 | device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass; | 140 | device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass; |
141 | device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass; | 141 | device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass; |
142 | device->oclass[NVDEV_SUBDEV_PWR ] = &nvc0_pwr_oclass; | 142 | device->oclass[NVDEV_SUBDEV_PWR ] = nvc0_pwr_oclass; |
143 | device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; | 143 | device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; |
144 | device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvc0_dmaeng_oclass; | 144 | device->oclass[NVDEV_ENGINE_DMAOBJ ] = nvc0_dmaeng_oclass; |
145 | device->oclass[NVDEV_ENGINE_FIFO ] = nvc0_fifo_oclass; | 145 | device->oclass[NVDEV_ENGINE_FIFO ] = nvc0_fifo_oclass; |
146 | device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass; | 146 | device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass; |
147 | device->oclass[NVDEV_ENGINE_GR ] = nvc4_graph_oclass; | 147 | device->oclass[NVDEV_ENGINE_GR ] = nvc4_graph_oclass; |
@@ -165,14 +165,14 @@ nvc0_identify(struct nouveau_device *device) | |||
165 | device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass; | 165 | device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass; |
166 | device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; | 166 | device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; |
167 | device->oclass[NVDEV_SUBDEV_FB ] = nvc0_fb_oclass; | 167 | device->oclass[NVDEV_SUBDEV_FB ] = nvc0_fb_oclass; |
168 | device->oclass[NVDEV_SUBDEV_LTCG ] = gf100_ltcg_oclass; | 168 | device->oclass[NVDEV_SUBDEV_LTC ] = gf100_ltc_oclass; |
169 | device->oclass[NVDEV_SUBDEV_IBUS ] = &nvc0_ibus_oclass; | 169 | device->oclass[NVDEV_SUBDEV_IBUS ] = &nvc0_ibus_oclass; |
170 | device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass; | 170 | device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass; |
171 | device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass; | 171 | device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass; |
172 | device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass; | 172 | device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass; |
173 | device->oclass[NVDEV_SUBDEV_PWR ] = &nvc0_pwr_oclass; | 173 | device->oclass[NVDEV_SUBDEV_PWR ] = nvc0_pwr_oclass; |
174 | device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; | 174 | device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; |
175 | device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvc0_dmaeng_oclass; | 175 | device->oclass[NVDEV_ENGINE_DMAOBJ ] = nvc0_dmaeng_oclass; |
176 | device->oclass[NVDEV_ENGINE_FIFO ] = nvc0_fifo_oclass; | 176 | device->oclass[NVDEV_ENGINE_FIFO ] = nvc0_fifo_oclass; |
177 | device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass; | 177 | device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass; |
178 | device->oclass[NVDEV_ENGINE_GR ] = nvc4_graph_oclass; | 178 | device->oclass[NVDEV_ENGINE_GR ] = nvc4_graph_oclass; |
@@ -197,14 +197,14 @@ nvc0_identify(struct nouveau_device *device) | |||
197 | device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass; | 197 | device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass; |
198 | device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; | 198 | device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; |
199 | device->oclass[NVDEV_SUBDEV_FB ] = nvc0_fb_oclass; | 199 | device->oclass[NVDEV_SUBDEV_FB ] = nvc0_fb_oclass; |
200 | device->oclass[NVDEV_SUBDEV_LTCG ] = gf100_ltcg_oclass; | 200 | device->oclass[NVDEV_SUBDEV_LTC ] = gf100_ltc_oclass; |
201 | device->oclass[NVDEV_SUBDEV_IBUS ] = &nvc0_ibus_oclass; | 201 | device->oclass[NVDEV_SUBDEV_IBUS ] = &nvc0_ibus_oclass; |
202 | device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass; | 202 | device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass; |
203 | device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass; | 203 | device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass; |
204 | device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass; | 204 | device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass; |
205 | device->oclass[NVDEV_SUBDEV_PWR ] = &nvc0_pwr_oclass; | 205 | device->oclass[NVDEV_SUBDEV_PWR ] = nvc0_pwr_oclass; |
206 | device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; | 206 | device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; |
207 | device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvc0_dmaeng_oclass; | 207 | device->oclass[NVDEV_ENGINE_DMAOBJ ] = nvc0_dmaeng_oclass; |
208 | device->oclass[NVDEV_ENGINE_FIFO ] = nvc0_fifo_oclass; | 208 | device->oclass[NVDEV_ENGINE_FIFO ] = nvc0_fifo_oclass; |
209 | device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass; | 209 | device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass; |
210 | device->oclass[NVDEV_ENGINE_GR ] = nvc4_graph_oclass; | 210 | device->oclass[NVDEV_ENGINE_GR ] = nvc4_graph_oclass; |
@@ -229,14 +229,14 @@ nvc0_identify(struct nouveau_device *device) | |||
229 | device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass; | 229 | device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass; |
230 | device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; | 230 | device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; |
231 | device->oclass[NVDEV_SUBDEV_FB ] = nvc0_fb_oclass; | 231 | device->oclass[NVDEV_SUBDEV_FB ] = nvc0_fb_oclass; |
232 | device->oclass[NVDEV_SUBDEV_LTCG ] = gf100_ltcg_oclass; | 232 | device->oclass[NVDEV_SUBDEV_LTC ] = gf100_ltc_oclass; |
233 | device->oclass[NVDEV_SUBDEV_IBUS ] = &nvc0_ibus_oclass; | 233 | device->oclass[NVDEV_SUBDEV_IBUS ] = &nvc0_ibus_oclass; |
234 | device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass; | 234 | device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass; |
235 | device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass; | 235 | device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass; |
236 | device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass; | 236 | device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass; |
237 | device->oclass[NVDEV_SUBDEV_PWR ] = &nvc0_pwr_oclass; | 237 | device->oclass[NVDEV_SUBDEV_PWR ] = nvc0_pwr_oclass; |
238 | device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; | 238 | device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; |
239 | device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvc0_dmaeng_oclass; | 239 | device->oclass[NVDEV_ENGINE_DMAOBJ ] = nvc0_dmaeng_oclass; |
240 | device->oclass[NVDEV_ENGINE_FIFO ] = nvc0_fifo_oclass; | 240 | device->oclass[NVDEV_ENGINE_FIFO ] = nvc0_fifo_oclass; |
241 | device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass; | 241 | device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass; |
242 | device->oclass[NVDEV_ENGINE_GR ] = nvc1_graph_oclass; | 242 | device->oclass[NVDEV_ENGINE_GR ] = nvc1_graph_oclass; |
@@ -260,14 +260,14 @@ nvc0_identify(struct nouveau_device *device) | |||
260 | device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass; | 260 | device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass; |
261 | device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; | 261 | device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; |
262 | device->oclass[NVDEV_SUBDEV_FB ] = nvc0_fb_oclass; | 262 | device->oclass[NVDEV_SUBDEV_FB ] = nvc0_fb_oclass; |
263 | device->oclass[NVDEV_SUBDEV_LTCG ] = gf100_ltcg_oclass; | 263 | device->oclass[NVDEV_SUBDEV_LTC ] = gf100_ltc_oclass; |
264 | device->oclass[NVDEV_SUBDEV_IBUS ] = &nvc0_ibus_oclass; | 264 | device->oclass[NVDEV_SUBDEV_IBUS ] = &nvc0_ibus_oclass; |
265 | device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass; | 265 | device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass; |
266 | device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass; | 266 | device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass; |
267 | device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass; | 267 | device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass; |
268 | device->oclass[NVDEV_SUBDEV_PWR ] = &nvc0_pwr_oclass; | 268 | device->oclass[NVDEV_SUBDEV_PWR ] = nvc0_pwr_oclass; |
269 | device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; | 269 | device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; |
270 | device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvc0_dmaeng_oclass; | 270 | device->oclass[NVDEV_ENGINE_DMAOBJ ] = nvc0_dmaeng_oclass; |
271 | device->oclass[NVDEV_ENGINE_FIFO ] = nvc0_fifo_oclass; | 271 | device->oclass[NVDEV_ENGINE_FIFO ] = nvc0_fifo_oclass; |
272 | device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass; | 272 | device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass; |
273 | device->oclass[NVDEV_ENGINE_GR ] = nvc8_graph_oclass; | 273 | device->oclass[NVDEV_ENGINE_GR ] = nvc8_graph_oclass; |
@@ -292,14 +292,14 @@ nvc0_identify(struct nouveau_device *device) | |||
292 | device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass; | 292 | device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass; |
293 | device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; | 293 | device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; |
294 | device->oclass[NVDEV_SUBDEV_FB ] = nvc0_fb_oclass; | 294 | device->oclass[NVDEV_SUBDEV_FB ] = nvc0_fb_oclass; |
295 | device->oclass[NVDEV_SUBDEV_LTCG ] = gf100_ltcg_oclass; | 295 | device->oclass[NVDEV_SUBDEV_LTC ] = gf100_ltc_oclass; |
296 | device->oclass[NVDEV_SUBDEV_IBUS ] = &nvc0_ibus_oclass; | 296 | device->oclass[NVDEV_SUBDEV_IBUS ] = &nvc0_ibus_oclass; |
297 | device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass; | 297 | device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass; |
298 | device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass; | 298 | device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass; |
299 | device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass; | 299 | device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass; |
300 | device->oclass[NVDEV_SUBDEV_PWR ] = &nvd0_pwr_oclass; | 300 | device->oclass[NVDEV_SUBDEV_PWR ] = nvd0_pwr_oclass; |
301 | device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; | 301 | device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; |
302 | device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvd0_dmaeng_oclass; | 302 | device->oclass[NVDEV_ENGINE_DMAOBJ ] = nvd0_dmaeng_oclass; |
303 | device->oclass[NVDEV_ENGINE_FIFO ] = nvc0_fifo_oclass; | 303 | device->oclass[NVDEV_ENGINE_FIFO ] = nvc0_fifo_oclass; |
304 | device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass; | 304 | device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass; |
305 | device->oclass[NVDEV_ENGINE_GR ] = nvd9_graph_oclass; | 305 | device->oclass[NVDEV_ENGINE_GR ] = nvd9_graph_oclass; |
@@ -323,12 +323,12 @@ nvc0_identify(struct nouveau_device *device) | |||
323 | device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass; | 323 | device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass; |
324 | device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; | 324 | device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; |
325 | device->oclass[NVDEV_SUBDEV_FB ] = nvc0_fb_oclass; | 325 | device->oclass[NVDEV_SUBDEV_FB ] = nvc0_fb_oclass; |
326 | device->oclass[NVDEV_SUBDEV_LTCG ] = gf100_ltcg_oclass; | 326 | device->oclass[NVDEV_SUBDEV_LTC ] = gf100_ltc_oclass; |
327 | device->oclass[NVDEV_SUBDEV_IBUS ] = &nvc0_ibus_oclass; | 327 | device->oclass[NVDEV_SUBDEV_IBUS ] = &nvc0_ibus_oclass; |
328 | device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass; | 328 | device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass; |
329 | device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass; | 329 | device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass; |
330 | device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass; | 330 | device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass; |
331 | device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvd0_dmaeng_oclass; | 331 | device->oclass[NVDEV_ENGINE_DMAOBJ ] = nvd0_dmaeng_oclass; |
332 | device->oclass[NVDEV_ENGINE_FIFO ] = nvc0_fifo_oclass; | 332 | device->oclass[NVDEV_ENGINE_FIFO ] = nvc0_fifo_oclass; |
333 | device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass; | 333 | device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass; |
334 | device->oclass[NVDEV_ENGINE_GR ] = nvd7_graph_oclass; | 334 | device->oclass[NVDEV_ENGINE_GR ] = nvd7_graph_oclass; |
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nve0.c b/drivers/gpu/drm/nouveau/core/engine/device/nve0.c index 2d1e97d4264f..54ec53bc6252 100644 --- a/drivers/gpu/drm/nouveau/core/engine/device/nve0.c +++ b/drivers/gpu/drm/nouveau/core/engine/device/nve0.c | |||
@@ -33,7 +33,7 @@ | |||
33 | #include <subdev/mc.h> | 33 | #include <subdev/mc.h> |
34 | #include <subdev/timer.h> | 34 | #include <subdev/timer.h> |
35 | #include <subdev/fb.h> | 35 | #include <subdev/fb.h> |
36 | #include <subdev/ltcg.h> | 36 | #include <subdev/ltc.h> |
37 | #include <subdev/ibus.h> | 37 | #include <subdev/ibus.h> |
38 | #include <subdev/instmem.h> | 38 | #include <subdev/instmem.h> |
39 | #include <subdev/vm.h> | 39 | #include <subdev/vm.h> |
@@ -70,14 +70,14 @@ nve0_identify(struct nouveau_device *device) | |||
70 | device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass; | 70 | device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass; |
71 | device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; | 71 | device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; |
72 | device->oclass[NVDEV_SUBDEV_FB ] = nve0_fb_oclass; | 72 | device->oclass[NVDEV_SUBDEV_FB ] = nve0_fb_oclass; |
73 | device->oclass[NVDEV_SUBDEV_LTCG ] = gf100_ltcg_oclass; | 73 | device->oclass[NVDEV_SUBDEV_LTC ] = gk104_ltc_oclass; |
74 | device->oclass[NVDEV_SUBDEV_IBUS ] = &nve0_ibus_oclass; | 74 | device->oclass[NVDEV_SUBDEV_IBUS ] = &nve0_ibus_oclass; |
75 | device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass; | 75 | device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass; |
76 | device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass; | 76 | device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass; |
77 | device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass; | 77 | device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass; |
78 | device->oclass[NVDEV_SUBDEV_PWR ] = &nvd0_pwr_oclass; | 78 | device->oclass[NVDEV_SUBDEV_PWR ] = gk104_pwr_oclass; |
79 | device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; | 79 | device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; |
80 | device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvd0_dmaeng_oclass; | 80 | device->oclass[NVDEV_ENGINE_DMAOBJ ] = nvd0_dmaeng_oclass; |
81 | device->oclass[NVDEV_ENGINE_FIFO ] = nve0_fifo_oclass; | 81 | device->oclass[NVDEV_ENGINE_FIFO ] = nve0_fifo_oclass; |
82 | device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass; | 82 | device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass; |
83 | device->oclass[NVDEV_ENGINE_GR ] = nve4_graph_oclass; | 83 | device->oclass[NVDEV_ENGINE_GR ] = nve4_graph_oclass; |
@@ -103,14 +103,14 @@ nve0_identify(struct nouveau_device *device) | |||
103 | device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass; | 103 | device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass; |
104 | device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; | 104 | device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; |
105 | device->oclass[NVDEV_SUBDEV_FB ] = nve0_fb_oclass; | 105 | device->oclass[NVDEV_SUBDEV_FB ] = nve0_fb_oclass; |
106 | device->oclass[NVDEV_SUBDEV_LTCG ] = gf100_ltcg_oclass; | 106 | device->oclass[NVDEV_SUBDEV_LTC ] = gk104_ltc_oclass; |
107 | device->oclass[NVDEV_SUBDEV_IBUS ] = &nve0_ibus_oclass; | 107 | device->oclass[NVDEV_SUBDEV_IBUS ] = &nve0_ibus_oclass; |
108 | device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass; | 108 | device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass; |
109 | device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass; | 109 | device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass; |
110 | device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass; | 110 | device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass; |
111 | device->oclass[NVDEV_SUBDEV_PWR ] = &nvd0_pwr_oclass; | 111 | device->oclass[NVDEV_SUBDEV_PWR ] = nvd0_pwr_oclass; |
112 | device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; | 112 | device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; |
113 | device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvd0_dmaeng_oclass; | 113 | device->oclass[NVDEV_ENGINE_DMAOBJ ] = nvd0_dmaeng_oclass; |
114 | device->oclass[NVDEV_ENGINE_FIFO ] = nve0_fifo_oclass; | 114 | device->oclass[NVDEV_ENGINE_FIFO ] = nve0_fifo_oclass; |
115 | device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass; | 115 | device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass; |
116 | device->oclass[NVDEV_ENGINE_GR ] = nve4_graph_oclass; | 116 | device->oclass[NVDEV_ENGINE_GR ] = nve4_graph_oclass; |
@@ -136,14 +136,14 @@ nve0_identify(struct nouveau_device *device) | |||
136 | device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass; | 136 | device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass; |
137 | device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; | 137 | device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; |
138 | device->oclass[NVDEV_SUBDEV_FB ] = nve0_fb_oclass; | 138 | device->oclass[NVDEV_SUBDEV_FB ] = nve0_fb_oclass; |
139 | device->oclass[NVDEV_SUBDEV_LTCG ] = gf100_ltcg_oclass; | 139 | device->oclass[NVDEV_SUBDEV_LTC ] = gk104_ltc_oclass; |
140 | device->oclass[NVDEV_SUBDEV_IBUS ] = &nve0_ibus_oclass; | 140 | device->oclass[NVDEV_SUBDEV_IBUS ] = &nve0_ibus_oclass; |
141 | device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass; | 141 | device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass; |
142 | device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass; | 142 | device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass; |
143 | device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass; | 143 | device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass; |
144 | device->oclass[NVDEV_SUBDEV_PWR ] = &nvd0_pwr_oclass; | 144 | device->oclass[NVDEV_SUBDEV_PWR ] = gk104_pwr_oclass; |
145 | device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; | 145 | device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; |
146 | device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvd0_dmaeng_oclass; | 146 | device->oclass[NVDEV_ENGINE_DMAOBJ ] = nvd0_dmaeng_oclass; |
147 | device->oclass[NVDEV_ENGINE_FIFO ] = nve0_fifo_oclass; | 147 | device->oclass[NVDEV_ENGINE_FIFO ] = nve0_fifo_oclass; |
148 | device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass; | 148 | device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass; |
149 | device->oclass[NVDEV_ENGINE_GR ] = nve4_graph_oclass; | 149 | device->oclass[NVDEV_ENGINE_GR ] = nve4_graph_oclass; |
@@ -158,15 +158,16 @@ nve0_identify(struct nouveau_device *device) | |||
158 | break; | 158 | break; |
159 | case 0xea: | 159 | case 0xea: |
160 | device->cname = "GK20A"; | 160 | device->cname = "GK20A"; |
161 | device->oclass[NVDEV_SUBDEV_MC ] = nvc3_mc_oclass; | 161 | device->oclass[NVDEV_SUBDEV_CLOCK ] = &gk20a_clock_oclass; |
162 | device->oclass[NVDEV_SUBDEV_MC ] = gk20a_mc_oclass; | ||
162 | device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass; | 163 | device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass; |
163 | device->oclass[NVDEV_SUBDEV_TIMER ] = &gk20a_timer_oclass; | 164 | device->oclass[NVDEV_SUBDEV_TIMER ] = &gk20a_timer_oclass; |
164 | device->oclass[NVDEV_SUBDEV_FB ] = gk20a_fb_oclass; | 165 | device->oclass[NVDEV_SUBDEV_FB ] = gk20a_fb_oclass; |
165 | device->oclass[NVDEV_SUBDEV_IBUS ] = &gk20a_ibus_oclass; | 166 | device->oclass[NVDEV_SUBDEV_IBUS ] = &gk20a_ibus_oclass; |
166 | device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass; | 167 | device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass; |
167 | device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass; | 168 | device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass; |
168 | device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass; | 169 | device->oclass[NVDEV_SUBDEV_BAR ] = &gk20a_bar_oclass; |
169 | device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvd0_dmaeng_oclass; | 170 | device->oclass[NVDEV_ENGINE_DMAOBJ ] = nvd0_dmaeng_oclass; |
170 | device->oclass[NVDEV_ENGINE_FIFO ] = gk20a_fifo_oclass; | 171 | device->oclass[NVDEV_ENGINE_FIFO ] = gk20a_fifo_oclass; |
171 | device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass; | 172 | device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass; |
172 | device->oclass[NVDEV_ENGINE_GR ] = gk20a_graph_oclass; | 173 | device->oclass[NVDEV_ENGINE_GR ] = gk20a_graph_oclass; |
@@ -186,14 +187,14 @@ nve0_identify(struct nouveau_device *device) | |||
186 | device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass; | 187 | device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass; |
187 | device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; | 188 | device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; |
188 | device->oclass[NVDEV_SUBDEV_FB ] = nve0_fb_oclass; | 189 | device->oclass[NVDEV_SUBDEV_FB ] = nve0_fb_oclass; |
189 | device->oclass[NVDEV_SUBDEV_LTCG ] = gf100_ltcg_oclass; | 190 | device->oclass[NVDEV_SUBDEV_LTC ] = gk104_ltc_oclass; |
190 | device->oclass[NVDEV_SUBDEV_IBUS ] = &nve0_ibus_oclass; | 191 | device->oclass[NVDEV_SUBDEV_IBUS ] = &nve0_ibus_oclass; |
191 | device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass; | 192 | device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass; |
192 | device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass; | 193 | device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass; |
193 | device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass; | 194 | device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass; |
194 | device->oclass[NVDEV_SUBDEV_PWR ] = &nvd0_pwr_oclass; | 195 | device->oclass[NVDEV_SUBDEV_PWR ] = nvd0_pwr_oclass; |
195 | device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; | 196 | device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; |
196 | device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvd0_dmaeng_oclass; | 197 | device->oclass[NVDEV_ENGINE_DMAOBJ ] = nvd0_dmaeng_oclass; |
197 | device->oclass[NVDEV_ENGINE_FIFO ] = nve0_fifo_oclass; | 198 | device->oclass[NVDEV_ENGINE_FIFO ] = nve0_fifo_oclass; |
198 | device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass; | 199 | device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass; |
199 | device->oclass[NVDEV_ENGINE_GR ] = nvf0_graph_oclass; | 200 | device->oclass[NVDEV_ENGINE_GR ] = nvf0_graph_oclass; |
@@ -219,17 +220,17 @@ nve0_identify(struct nouveau_device *device) | |||
219 | device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass; | 220 | device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass; |
220 | device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; | 221 | device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; |
221 | device->oclass[NVDEV_SUBDEV_FB ] = nve0_fb_oclass; | 222 | device->oclass[NVDEV_SUBDEV_FB ] = nve0_fb_oclass; |
222 | device->oclass[NVDEV_SUBDEV_LTCG ] = gf100_ltcg_oclass; | 223 | device->oclass[NVDEV_SUBDEV_LTC ] = gk104_ltc_oclass; |
223 | device->oclass[NVDEV_SUBDEV_IBUS ] = &nve0_ibus_oclass; | 224 | device->oclass[NVDEV_SUBDEV_IBUS ] = &nve0_ibus_oclass; |
224 | device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass; | 225 | device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass; |
225 | device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass; | 226 | device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass; |
226 | device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass; | 227 | device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass; |
227 | device->oclass[NVDEV_SUBDEV_PWR ] = &nvd0_pwr_oclass; | 228 | device->oclass[NVDEV_SUBDEV_PWR ] = nvd0_pwr_oclass; |
228 | device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; | 229 | device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; |
229 | device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvd0_dmaeng_oclass; | 230 | device->oclass[NVDEV_ENGINE_DMAOBJ ] = nvd0_dmaeng_oclass; |
230 | device->oclass[NVDEV_ENGINE_FIFO ] = nve0_fifo_oclass; | 231 | device->oclass[NVDEV_ENGINE_FIFO ] = nve0_fifo_oclass; |
231 | device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass; | 232 | device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass; |
232 | device->oclass[NVDEV_ENGINE_GR ] = nvf0_graph_oclass; | 233 | device->oclass[NVDEV_ENGINE_GR ] = gk110b_graph_oclass; |
233 | device->oclass[NVDEV_ENGINE_DISP ] = nvf0_disp_oclass; | 234 | device->oclass[NVDEV_ENGINE_DISP ] = nvf0_disp_oclass; |
234 | device->oclass[NVDEV_ENGINE_COPY0 ] = &nve0_copy0_oclass; | 235 | device->oclass[NVDEV_ENGINE_COPY0 ] = &nve0_copy0_oclass; |
235 | device->oclass[NVDEV_ENGINE_COPY1 ] = &nve0_copy1_oclass; | 236 | device->oclass[NVDEV_ENGINE_COPY1 ] = &nve0_copy1_oclass; |
@@ -248,18 +249,18 @@ nve0_identify(struct nouveau_device *device) | |||
248 | device->oclass[NVDEV_SUBDEV_THERM ] = &nvd0_therm_oclass; | 249 | device->oclass[NVDEV_SUBDEV_THERM ] = &nvd0_therm_oclass; |
249 | device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; | 250 | device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; |
250 | device->oclass[NVDEV_SUBDEV_DEVINIT] = nvc0_devinit_oclass; | 251 | device->oclass[NVDEV_SUBDEV_DEVINIT] = nvc0_devinit_oclass; |
251 | device->oclass[NVDEV_SUBDEV_MC ] = nvc3_mc_oclass; | 252 | device->oclass[NVDEV_SUBDEV_MC ] = gk20a_mc_oclass; |
252 | device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass; | 253 | device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass; |
253 | device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; | 254 | device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; |
254 | device->oclass[NVDEV_SUBDEV_FB ] = nve0_fb_oclass; | 255 | device->oclass[NVDEV_SUBDEV_FB ] = nve0_fb_oclass; |
255 | device->oclass[NVDEV_SUBDEV_LTCG ] = gf100_ltcg_oclass; | 256 | device->oclass[NVDEV_SUBDEV_LTC ] = gk104_ltc_oclass; |
256 | device->oclass[NVDEV_SUBDEV_IBUS ] = &nve0_ibus_oclass; | 257 | device->oclass[NVDEV_SUBDEV_IBUS ] = &nve0_ibus_oclass; |
257 | device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass; | 258 | device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass; |
258 | device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass; | 259 | device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass; |
259 | device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass; | 260 | device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass; |
260 | device->oclass[NVDEV_SUBDEV_PWR ] = &nv108_pwr_oclass; | 261 | device->oclass[NVDEV_SUBDEV_PWR ] = nv108_pwr_oclass; |
261 | device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; | 262 | device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; |
262 | device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvd0_dmaeng_oclass; | 263 | device->oclass[NVDEV_ENGINE_DMAOBJ ] = nvd0_dmaeng_oclass; |
263 | device->oclass[NVDEV_ENGINE_FIFO ] = nv108_fifo_oclass; | 264 | device->oclass[NVDEV_ENGINE_FIFO ] = nv108_fifo_oclass; |
264 | device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass; | 265 | device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass; |
265 | device->oclass[NVDEV_ENGINE_GR ] = nv108_graph_oclass; | 266 | device->oclass[NVDEV_ENGINE_GR ] = nv108_graph_oclass; |
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/base.c b/drivers/gpu/drm/nouveau/core/engine/disp/base.c index 9c38c5e40500..22d55f6cde50 100644 --- a/drivers/gpu/drm/nouveau/core/engine/disp/base.c +++ b/drivers/gpu/drm/nouveau/core/engine/disp/base.c | |||
@@ -22,23 +22,93 @@ | |||
22 | * Authors: Ben Skeggs | 22 | * Authors: Ben Skeggs |
23 | */ | 23 | */ |
24 | 24 | ||
25 | #include <core/os.h> | ||
26 | #include <nvif/unpack.h> | ||
27 | #include <nvif/class.h> | ||
28 | #include <nvif/event.h> | ||
29 | |||
25 | #include "priv.h" | 30 | #include "priv.h" |
26 | #include "outp.h" | 31 | #include "outp.h" |
27 | #include "conn.h" | 32 | #include "conn.h" |
28 | 33 | ||
34 | int | ||
35 | nouveau_disp_vblank_ctor(void *data, u32 size, struct nvkm_notify *notify) | ||
36 | { | ||
37 | struct nouveau_disp *disp = | ||
38 | container_of(notify->event, typeof(*disp), vblank); | ||
39 | union { | ||
40 | struct nvif_notify_head_req_v0 v0; | ||
41 | } *req = data; | ||
42 | int ret; | ||
43 | |||
44 | if (nvif_unpack(req->v0, 0, 0, false)) { | ||
45 | notify->size = sizeof(struct nvif_notify_head_rep_v0); | ||
46 | if (ret = -ENXIO, req->v0.head <= disp->vblank.index_nr) { | ||
47 | notify->types = 1; | ||
48 | notify->index = req->v0.head; | ||
49 | return 0; | ||
50 | } | ||
51 | } | ||
52 | |||
53 | return ret; | ||
54 | } | ||
55 | |||
56 | void | ||
57 | nouveau_disp_vblank(struct nouveau_disp *disp, int head) | ||
58 | { | ||
59 | struct nvif_notify_head_rep_v0 rep = {}; | ||
60 | nvkm_event_send(&disp->vblank, 1, head, &rep, sizeof(rep)); | ||
61 | } | ||
62 | |||
29 | static int | 63 | static int |
30 | nouveau_disp_hpd_check(struct nouveau_event *event, u32 types, int index) | 64 | nouveau_disp_hpd_ctor(void *data, u32 size, struct nvkm_notify *notify) |
31 | { | 65 | { |
32 | struct nouveau_disp *disp = event->priv; | 66 | struct nouveau_disp *disp = |
67 | container_of(notify->event, typeof(*disp), hpd); | ||
68 | union { | ||
69 | struct nvif_notify_conn_req_v0 v0; | ||
70 | } *req = data; | ||
33 | struct nvkm_output *outp; | 71 | struct nvkm_output *outp; |
34 | list_for_each_entry(outp, &disp->outp, head) { | 72 | int ret; |
35 | if (outp->conn->index == index) { | 73 | |
36 | if (outp->conn->hpd.event) | 74 | if (nvif_unpack(req->v0, 0, 0, false)) { |
37 | return 0; | 75 | notify->size = sizeof(struct nvif_notify_conn_rep_v0); |
38 | break; | 76 | list_for_each_entry(outp, &disp->outp, head) { |
77 | if (ret = -ENXIO, outp->conn->index == req->v0.conn) { | ||
78 | if (ret = -ENODEV, outp->conn->hpd.event) { | ||
79 | notify->types = req->v0.mask; | ||
80 | notify->index = req->v0.conn; | ||
81 | ret = 0; | ||
82 | } | ||
83 | break; | ||
84 | } | ||
39 | } | 85 | } |
40 | } | 86 | } |
41 | return -ENOSYS; | 87 | |
88 | return ret; | ||
89 | } | ||
90 | |||
91 | static const struct nvkm_event_func | ||
92 | nouveau_disp_hpd_func = { | ||
93 | .ctor = nouveau_disp_hpd_ctor | ||
94 | }; | ||
95 | |||
96 | int | ||
97 | nouveau_disp_ntfy(struct nouveau_object *object, u32 type, | ||
98 | struct nvkm_event **event) | ||
99 | { | ||
100 | struct nouveau_disp *disp = (void *)object->engine; | ||
101 | switch (type) { | ||
102 | case NV04_DISP_NTFY_VBLANK: | ||
103 | *event = &disp->vblank; | ||
104 | return 0; | ||
105 | case NV04_DISP_NTFY_CONN: | ||
106 | *event = &disp->hpd; | ||
107 | return 0; | ||
108 | default: | ||
109 | break; | ||
110 | } | ||
111 | return -EINVAL; | ||
42 | } | 112 | } |
43 | 113 | ||
44 | int | 114 | int |
@@ -97,7 +167,8 @@ _nouveau_disp_dtor(struct nouveau_object *object) | |||
97 | struct nouveau_disp *disp = (void *)object; | 167 | struct nouveau_disp *disp = (void *)object; |
98 | struct nvkm_output *outp, *outt; | 168 | struct nvkm_output *outp, *outt; |
99 | 169 | ||
100 | nouveau_event_destroy(&disp->vblank); | 170 | nvkm_event_fini(&disp->vblank); |
171 | nvkm_event_fini(&disp->hpd); | ||
101 | 172 | ||
102 | if (disp->outp.next) { | 173 | if (disp->outp.next) { |
103 | list_for_each_entry_safe(outp, outt, &disp->outp, head) { | 174 | list_for_each_entry_safe(outp, outt, &disp->outp, head) { |
@@ -157,14 +228,11 @@ nouveau_disp_create_(struct nouveau_object *parent, | |||
157 | hpd = max(hpd, (u8)(dcbE.connector + 1)); | 228 | hpd = max(hpd, (u8)(dcbE.connector + 1)); |
158 | } | 229 | } |
159 | 230 | ||
160 | ret = nouveau_event_create(3, hpd, &disp->hpd); | 231 | ret = nvkm_event_init(&nouveau_disp_hpd_func, 3, hpd, &disp->hpd); |
161 | if (ret) | 232 | if (ret) |
162 | return ret; | 233 | return ret; |
163 | 234 | ||
164 | disp->hpd->priv = disp; | 235 | ret = nvkm_event_init(impl->vblank, 1, heads, &disp->vblank); |
165 | disp->hpd->check = nouveau_disp_hpd_check; | ||
166 | |||
167 | ret = nouveau_event_create(1, heads, &disp->vblank); | ||
168 | if (ret) | 236 | if (ret) |
169 | return ret; | 237 | return ret; |
170 | 238 | ||
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/conn.c b/drivers/gpu/drm/nouveau/core/engine/disp/conn.c index 4ffbc70ecf5a..3d1070228977 100644 --- a/drivers/gpu/drm/nouveau/core/engine/disp/conn.c +++ b/drivers/gpu/drm/nouveau/core/engine/disp/conn.c | |||
@@ -22,39 +22,41 @@ | |||
22 | * Authors: Ben Skeggs | 22 | * Authors: Ben Skeggs |
23 | */ | 23 | */ |
24 | 24 | ||
25 | #include <core/os.h> | ||
26 | #include <nvif/event.h> | ||
27 | |||
25 | #include <subdev/gpio.h> | 28 | #include <subdev/gpio.h> |
26 | 29 | ||
27 | #include "conn.h" | 30 | #include "conn.h" |
28 | #include "outp.h" | 31 | #include "outp.h" |
29 | 32 | ||
30 | static void | 33 | static int |
31 | nvkm_connector_hpd_work(struct work_struct *w) | 34 | nvkm_connector_hpd(struct nvkm_notify *notify) |
32 | { | 35 | { |
33 | struct nvkm_connector *conn = container_of(w, typeof(*conn), hpd.work); | 36 | struct nvkm_connector *conn = container_of(notify, typeof(*conn), hpd); |
34 | struct nouveau_disp *disp = nouveau_disp(conn); | 37 | struct nouveau_disp *disp = nouveau_disp(conn); |
35 | struct nouveau_gpio *gpio = nouveau_gpio(conn); | 38 | struct nouveau_gpio *gpio = nouveau_gpio(conn); |
36 | u32 send = NVKM_HPD_UNPLUG; | 39 | const struct nvkm_gpio_ntfy_rep *line = notify->data; |
37 | if (gpio->get(gpio, 0, DCB_GPIO_UNUSED, conn->hpd.event->index)) | 40 | struct nvif_notify_conn_rep_v0 rep; |
38 | send = NVKM_HPD_PLUG; | 41 | int index = conn->index; |
39 | nouveau_event_trigger(disp->hpd, send, conn->index); | ||
40 | nouveau_event_get(conn->hpd.event); | ||
41 | } | ||
42 | 42 | ||
43 | static int | 43 | DBG("HPD: %d\n", line->mask); |
44 | nvkm_connector_hpd(void *data, u32 type, int index) | 44 | |
45 | { | 45 | if (!gpio->get(gpio, 0, DCB_GPIO_UNUSED, conn->hpd.index)) |
46 | struct nvkm_connector *conn = data; | 46 | rep.mask = NVIF_NOTIFY_CONN_V0_UNPLUG; |
47 | DBG("HPD: %d\n", type); | 47 | else |
48 | schedule_work(&conn->hpd.work); | 48 | rep.mask = NVIF_NOTIFY_CONN_V0_PLUG; |
49 | return NVKM_EVENT_DROP; | 49 | rep.version = 0; |
50 | |||
51 | nvkm_event_send(&disp->hpd, rep.mask, index, &rep, sizeof(rep)); | ||
52 | return NVKM_NOTIFY_KEEP; | ||
50 | } | 53 | } |
51 | 54 | ||
52 | int | 55 | int |
53 | _nvkm_connector_fini(struct nouveau_object *object, bool suspend) | 56 | _nvkm_connector_fini(struct nouveau_object *object, bool suspend) |
54 | { | 57 | { |
55 | struct nvkm_connector *conn = (void *)object; | 58 | struct nvkm_connector *conn = (void *)object; |
56 | if (conn->hpd.event) | 59 | nvkm_notify_put(&conn->hpd); |
57 | nouveau_event_put(conn->hpd.event); | ||
58 | return nouveau_object_fini(&conn->base, suspend); | 60 | return nouveau_object_fini(&conn->base, suspend); |
59 | } | 61 | } |
60 | 62 | ||
@@ -63,10 +65,8 @@ _nvkm_connector_init(struct nouveau_object *object) | |||
63 | { | 65 | { |
64 | struct nvkm_connector *conn = (void *)object; | 66 | struct nvkm_connector *conn = (void *)object; |
65 | int ret = nouveau_object_init(&conn->base); | 67 | int ret = nouveau_object_init(&conn->base); |
66 | if (ret == 0) { | 68 | if (ret == 0) |
67 | if (conn->hpd.event) | 69 | nvkm_notify_get(&conn->hpd); |
68 | nouveau_event_get(conn->hpd.event); | ||
69 | } | ||
70 | return ret; | 70 | return ret; |
71 | } | 71 | } |
72 | 72 | ||
@@ -74,7 +74,7 @@ void | |||
74 | _nvkm_connector_dtor(struct nouveau_object *object) | 74 | _nvkm_connector_dtor(struct nouveau_object *object) |
75 | { | 75 | { |
76 | struct nvkm_connector *conn = (void *)object; | 76 | struct nvkm_connector *conn = (void *)object; |
77 | nouveau_event_ref(NULL, &conn->hpd.event); | 77 | nvkm_notify_fini(&conn->hpd); |
78 | nouveau_object_destroy(&conn->base); | 78 | nouveau_object_destroy(&conn->base); |
79 | } | 79 | } |
80 | 80 | ||
@@ -116,19 +116,24 @@ nvkm_connector_create_(struct nouveau_object *parent, | |||
116 | if ((info->hpd = ffs(info->hpd))) { | 116 | if ((info->hpd = ffs(info->hpd))) { |
117 | if (--info->hpd >= ARRAY_SIZE(hpd)) { | 117 | if (--info->hpd >= ARRAY_SIZE(hpd)) { |
118 | ERR("hpd %02x unknown\n", info->hpd); | 118 | ERR("hpd %02x unknown\n", info->hpd); |
119 | goto done; | 119 | return 0; |
120 | } | 120 | } |
121 | info->hpd = hpd[info->hpd]; | 121 | info->hpd = hpd[info->hpd]; |
122 | 122 | ||
123 | ret = gpio->find(gpio, 0, info->hpd, DCB_GPIO_UNUSED, &func); | 123 | ret = gpio->find(gpio, 0, info->hpd, DCB_GPIO_UNUSED, &func); |
124 | if (ret) { | 124 | if (ret) { |
125 | ERR("func %02x lookup failed, %d\n", info->hpd, ret); | 125 | ERR("func %02x lookup failed, %d\n", info->hpd, ret); |
126 | goto done; | 126 | return 0; |
127 | } | 127 | } |
128 | 128 | ||
129 | ret = nouveau_event_new(gpio->events, NVKM_GPIO_TOGGLED, | 129 | ret = nvkm_notify_init(&gpio->event, nvkm_connector_hpd, true, |
130 | func.line, nvkm_connector_hpd, | 130 | &(struct nvkm_gpio_ntfy_req) { |
131 | conn, &conn->hpd.event); | 131 | .mask = NVKM_GPIO_TOGGLED, |
132 | .line = func.line, | ||
133 | }, | ||
134 | sizeof(struct nvkm_gpio_ntfy_req), | ||
135 | sizeof(struct nvkm_gpio_ntfy_rep), | ||
136 | &conn->hpd); | ||
132 | if (ret) { | 137 | if (ret) { |
133 | ERR("func %02x failed, %d\n", info->hpd, ret); | 138 | ERR("func %02x failed, %d\n", info->hpd, ret); |
134 | } else { | 139 | } else { |
@@ -136,8 +141,6 @@ nvkm_connector_create_(struct nouveau_object *parent, | |||
136 | } | 141 | } |
137 | } | 142 | } |
138 | 143 | ||
139 | done: | ||
140 | INIT_WORK(&conn->hpd.work, nvkm_connector_hpd_work); | ||
141 | return 0; | 144 | return 0; |
142 | } | 145 | } |
143 | 146 | ||
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/conn.h b/drivers/gpu/drm/nouveau/core/engine/disp/conn.h index 035ebeacbb1c..55e5f5c82c14 100644 --- a/drivers/gpu/drm/nouveau/core/engine/disp/conn.h +++ b/drivers/gpu/drm/nouveau/core/engine/disp/conn.h | |||
@@ -10,10 +10,7 @@ struct nvkm_connector { | |||
10 | struct nvbios_connE info; | 10 | struct nvbios_connE info; |
11 | int index; | 11 | int index; |
12 | 12 | ||
13 | struct { | 13 | struct nvkm_notify hpd; |
14 | struct nouveau_eventh *event; | ||
15 | struct work_struct work; | ||
16 | } hpd; | ||
17 | }; | 14 | }; |
18 | 15 | ||
19 | #define nvkm_connector_create(p,e,c,b,i,d) \ | 16 | #define nvkm_connector_create(p,e,c,b,i,d) \ |
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/dacnv50.c b/drivers/gpu/drm/nouveau/core/engine/disp/dacnv50.c index a66b27c0fcab..b36addff06a9 100644 --- a/drivers/gpu/drm/nouveau/core/engine/disp/dacnv50.c +++ b/drivers/gpu/drm/nouveau/core/engine/disp/dacnv50.c | |||
@@ -22,8 +22,9 @@ | |||
22 | * Authors: Ben Skeggs | 22 | * Authors: Ben Skeggs |
23 | */ | 23 | */ |
24 | 24 | ||
25 | #include <core/os.h> | 25 | #include <core/client.h> |
26 | #include <core/class.h> | 26 | #include <nvif/unpack.h> |
27 | #include <nvif/class.h> | ||
27 | 28 | ||
28 | #include <subdev/bios.h> | 29 | #include <subdev/bios.h> |
29 | #include <subdev/bios/dcb.h> | 30 | #include <subdev/bios/dcb.h> |
@@ -32,13 +33,28 @@ | |||
32 | #include "nv50.h" | 33 | #include "nv50.h" |
33 | 34 | ||
34 | int | 35 | int |
35 | nv50_dac_power(struct nv50_disp_priv *priv, int or, u32 data) | 36 | nv50_dac_power(NV50_DISP_MTHD_V1) |
36 | { | 37 | { |
37 | const u32 stat = (data & NV50_DISP_DAC_PWR_HSYNC) | | 38 | const u32 doff = outp->or * 0x800; |
38 | (data & NV50_DISP_DAC_PWR_VSYNC) | | 39 | union { |
39 | (data & NV50_DISP_DAC_PWR_DATA) | | 40 | struct nv50_disp_dac_pwr_v0 v0; |
40 | (data & NV50_DISP_DAC_PWR_STATE); | 41 | } *args = data; |
41 | const u32 doff = (or * 0x800); | 42 | u32 stat; |
43 | int ret; | ||
44 | |||
45 | nv_ioctl(object, "disp dac pwr size %d\n", size); | ||
46 | if (nvif_unpack(args->v0, 0, 0, false)) { | ||
47 | nv_ioctl(object, "disp dac pwr vers %d state %d data %d " | ||
48 | "vsync %d hsync %d\n", | ||
49 | args->v0.version, args->v0.state, args->v0.data, | ||
50 | args->v0.vsync, args->v0.hsync); | ||
51 | stat = 0x00000040 * !args->v0.state; | ||
52 | stat |= 0x00000010 * !args->v0.data; | ||
53 | stat |= 0x00000004 * !args->v0.vsync; | ||
54 | stat |= 0x00000001 * !args->v0.hsync; | ||
55 | } else | ||
56 | return ret; | ||
57 | |||
42 | nv_wait(priv, 0x61a004 + doff, 0x80000000, 0x00000000); | 58 | nv_wait(priv, 0x61a004 + doff, 0x80000000, 0x00000000); |
43 | nv_mask(priv, 0x61a004 + doff, 0xc000007f, 0x80000000 | stat); | 59 | nv_mask(priv, 0x61a004 + doff, 0xc000007f, 0x80000000 | stat); |
44 | nv_wait(priv, 0x61a004 + doff, 0x80000000, 0x00000000); | 60 | nv_wait(priv, 0x61a004 + doff, 0x80000000, 0x00000000); |
@@ -46,9 +62,24 @@ nv50_dac_power(struct nv50_disp_priv *priv, int or, u32 data) | |||
46 | } | 62 | } |
47 | 63 | ||
48 | int | 64 | int |
49 | nv50_dac_sense(struct nv50_disp_priv *priv, int or, u32 loadval) | 65 | nv50_dac_sense(NV50_DISP_MTHD_V1) |
50 | { | 66 | { |
51 | const u32 doff = (or * 0x800); | 67 | union { |
68 | struct nv50_disp_dac_load_v0 v0; | ||
69 | } *args = data; | ||
70 | const u32 doff = outp->or * 0x800; | ||
71 | u32 loadval; | ||
72 | int ret; | ||
73 | |||
74 | nv_ioctl(object, "disp dac load size %d\n", size); | ||
75 | if (nvif_unpack(args->v0, 0, 0, false)) { | ||
76 | nv_ioctl(object, "disp dac load vers %d data %08x\n", | ||
77 | args->v0.version, args->v0.data); | ||
78 | if (args->v0.data & 0xfff00000) | ||
79 | return -EINVAL; | ||
80 | loadval = args->v0.data; | ||
81 | } else | ||
82 | return ret; | ||
52 | 83 | ||
53 | nv_mask(priv, 0x61a004 + doff, 0x807f0000, 0x80150000); | 84 | nv_mask(priv, 0x61a004 + doff, 0x807f0000, 0x80150000); |
54 | nv_wait(priv, 0x61a004 + doff, 0x80000000, 0x00000000); | 85 | nv_wait(priv, 0x61a004 + doff, 0x80000000, 0x00000000); |
@@ -61,38 +92,10 @@ nv50_dac_sense(struct nv50_disp_priv *priv, int or, u32 loadval) | |||
61 | nv_mask(priv, 0x61a004 + doff, 0x807f0000, 0x80550000); | 92 | nv_mask(priv, 0x61a004 + doff, 0x807f0000, 0x80550000); |
62 | nv_wait(priv, 0x61a004 + doff, 0x80000000, 0x00000000); | 93 | nv_wait(priv, 0x61a004 + doff, 0x80000000, 0x00000000); |
63 | 94 | ||
64 | nv_debug(priv, "DAC%d sense: 0x%08x\n", or, loadval); | 95 | nv_debug(priv, "DAC%d sense: 0x%08x\n", outp->or, loadval); |
65 | if (!(loadval & 0x80000000)) | 96 | if (!(loadval & 0x80000000)) |
66 | return -ETIMEDOUT; | 97 | return -ETIMEDOUT; |
67 | 98 | ||
68 | return (loadval & 0x38000000) >> 27; | 99 | args->v0.load = (loadval & 0x38000000) >> 27; |
69 | } | 100 | return 0; |
70 | |||
71 | int | ||
72 | nv50_dac_mthd(struct nouveau_object *object, u32 mthd, void *args, u32 size) | ||
73 | { | ||
74 | struct nv50_disp_priv *priv = (void *)object->engine; | ||
75 | const u8 or = (mthd & NV50_DISP_DAC_MTHD_OR); | ||
76 | u32 *data = args; | ||
77 | int ret; | ||
78 | |||
79 | if (size < sizeof(u32)) | ||
80 | return -EINVAL; | ||
81 | |||
82 | switch (mthd & ~0x3f) { | ||
83 | case NV50_DISP_DAC_PWR: | ||
84 | ret = priv->dac.power(priv, or, data[0]); | ||
85 | break; | ||
86 | case NV50_DISP_DAC_LOAD: | ||
87 | ret = priv->dac.sense(priv, or, data[0]); | ||
88 | if (ret >= 0) { | ||
89 | data[0] = ret; | ||
90 | ret = 0; | ||
91 | } | ||
92 | break; | ||
93 | default: | ||
94 | BUG_ON(1); | ||
95 | } | ||
96 | |||
97 | return ret; | ||
98 | } | 101 | } |
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/dport.c b/drivers/gpu/drm/nouveau/core/engine/disp/dport.c index 5a5b59b21130..39890221b91c 100644 --- a/drivers/gpu/drm/nouveau/core/engine/disp/dport.c +++ b/drivers/gpu/drm/nouveau/core/engine/disp/dport.c | |||
@@ -30,7 +30,7 @@ | |||
30 | 30 | ||
31 | #include <engine/disp.h> | 31 | #include <engine/disp.h> |
32 | 32 | ||
33 | #include <core/class.h> | 33 | #include <nvif/class.h> |
34 | 34 | ||
35 | #include "dport.h" | 35 | #include "dport.h" |
36 | #include "outpdp.h" | 36 | #include "outpdp.h" |
@@ -335,7 +335,7 @@ nouveau_dp_train(struct work_struct *w) | |||
335 | int ret; | 335 | int ret; |
336 | 336 | ||
337 | /* bring capabilities within encoder limits */ | 337 | /* bring capabilities within encoder limits */ |
338 | if (nv_mclass(disp) < NVD0_DISP_CLASS) | 338 | if (nv_mclass(disp) < GF110_DISP) |
339 | outp->dpcd[2] &= ~DPCD_RC02_TPS3_SUPPORTED; | 339 | outp->dpcd[2] &= ~DPCD_RC02_TPS3_SUPPORTED; |
340 | if ((outp->dpcd[2] & 0x1f) > outp->base.info.dpconf.link_nr) { | 340 | if ((outp->dpcd[2] & 0x1f) > outp->base.info.dpconf.link_nr) { |
341 | outp->dpcd[2] &= ~DPCD_RC02_MAX_LANE_COUNT; | 341 | outp->dpcd[2] &= ~DPCD_RC02_MAX_LANE_COUNT; |
@@ -354,7 +354,7 @@ nouveau_dp_train(struct work_struct *w) | |||
354 | cfg--; | 354 | cfg--; |
355 | 355 | ||
356 | /* disable link interrupt handling during link training */ | 356 | /* disable link interrupt handling during link training */ |
357 | nouveau_event_put(outp->irq); | 357 | nvkm_notify_put(&outp->irq); |
358 | 358 | ||
359 | /* enable down-spreading and execute pre-train script from vbios */ | 359 | /* enable down-spreading and execute pre-train script from vbios */ |
360 | dp_link_train_init(dp, outp->dpcd[3] & 0x01); | 360 | dp_link_train_init(dp, outp->dpcd[3] & 0x01); |
@@ -395,5 +395,5 @@ nouveau_dp_train(struct work_struct *w) | |||
395 | DBG("training complete\n"); | 395 | DBG("training complete\n"); |
396 | atomic_set(&outp->lt.done, 1); | 396 | atomic_set(&outp->lt.done, 1); |
397 | wake_up(&outp->lt.wait); | 397 | wake_up(&outp->lt.wait); |
398 | nouveau_event_get(outp->irq); | 398 | nvkm_notify_get(&outp->irq); |
399 | } | 399 | } |
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/gm107.c b/drivers/gpu/drm/nouveau/core/engine/disp/gm107.c index 9fc7447fec90..d54da8b5f87e 100644 --- a/drivers/gpu/drm/nouveau/core/engine/disp/gm107.c +++ b/drivers/gpu/drm/nouveau/core/engine/disp/gm107.c | |||
@@ -25,7 +25,7 @@ | |||
25 | #include <engine/software.h> | 25 | #include <engine/software.h> |
26 | #include <engine/disp.h> | 26 | #include <engine/disp.h> |
27 | 27 | ||
28 | #include <core/class.h> | 28 | #include <nvif/class.h> |
29 | 29 | ||
30 | #include "nv50.h" | 30 | #include "nv50.h" |
31 | 31 | ||
@@ -35,17 +35,17 @@ | |||
35 | 35 | ||
36 | static struct nouveau_oclass | 36 | static struct nouveau_oclass |
37 | gm107_disp_sclass[] = { | 37 | gm107_disp_sclass[] = { |
38 | { GM107_DISP_MAST_CLASS, &nvd0_disp_mast_ofuncs }, | 38 | { GM107_DISP_CORE_CHANNEL_DMA, &nvd0_disp_mast_ofuncs.base }, |
39 | { GM107_DISP_SYNC_CLASS, &nvd0_disp_sync_ofuncs }, | 39 | { GK110_DISP_BASE_CHANNEL_DMA, &nvd0_disp_sync_ofuncs.base }, |
40 | { GM107_DISP_OVLY_CLASS, &nvd0_disp_ovly_ofuncs }, | 40 | { GK104_DISP_OVERLAY_CONTROL_DMA, &nvd0_disp_ovly_ofuncs.base }, |
41 | { GM107_DISP_OIMM_CLASS, &nvd0_disp_oimm_ofuncs }, | 41 | { GK104_DISP_OVERLAY, &nvd0_disp_oimm_ofuncs.base }, |
42 | { GM107_DISP_CURS_CLASS, &nvd0_disp_curs_ofuncs }, | 42 | { GK104_DISP_CURSOR, &nvd0_disp_curs_ofuncs.base }, |
43 | {} | 43 | {} |
44 | }; | 44 | }; |
45 | 45 | ||
46 | static struct nouveau_oclass | 46 | static struct nouveau_oclass |
47 | gm107_disp_base_oclass[] = { | 47 | gm107_disp_base_oclass[] = { |
48 | { GM107_DISP_CLASS, &nvd0_disp_base_ofuncs, nvd0_disp_base_omthds }, | 48 | { GM107_DISP, &nvd0_disp_base_ofuncs }, |
49 | {} | 49 | {} |
50 | }; | 50 | }; |
51 | 51 | ||
@@ -93,9 +93,11 @@ gm107_disp_oclass = &(struct nv50_disp_impl) { | |||
93 | .init = _nouveau_disp_init, | 93 | .init = _nouveau_disp_init, |
94 | .fini = _nouveau_disp_fini, | 94 | .fini = _nouveau_disp_fini, |
95 | }, | 95 | }, |
96 | .base.vblank = &nvd0_disp_vblank_func, | ||
96 | .base.outp = nvd0_disp_outp_sclass, | 97 | .base.outp = nvd0_disp_outp_sclass, |
97 | .mthd.core = &nve0_disp_mast_mthd_chan, | 98 | .mthd.core = &nve0_disp_mast_mthd_chan, |
98 | .mthd.base = &nvd0_disp_sync_mthd_chan, | 99 | .mthd.base = &nvd0_disp_sync_mthd_chan, |
99 | .mthd.ovly = &nve0_disp_ovly_mthd_chan, | 100 | .mthd.ovly = &nve0_disp_ovly_mthd_chan, |
100 | .mthd.prev = -0x020000, | 101 | .mthd.prev = -0x020000, |
102 | .head.scanoutpos = nvd0_disp_base_scanoutpos, | ||
101 | }.base.base; | 103 | }.base.base; |
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/hdanva3.c b/drivers/gpu/drm/nouveau/core/engine/disp/hdanva3.c index a19e7d79b847..8b4e06abe533 100644 --- a/drivers/gpu/drm/nouveau/core/engine/disp/hdanva3.c +++ b/drivers/gpu/drm/nouveau/core/engine/disp/hdanva3.c | |||
@@ -22,25 +22,37 @@ | |||
22 | * Authors: Ben Skeggs | 22 | * Authors: Ben Skeggs |
23 | */ | 23 | */ |
24 | 24 | ||
25 | #include <core/os.h> | 25 | #include <core/client.h> |
26 | #include <core/class.h> | 26 | #include <nvif/unpack.h> |
27 | #include <nvif/class.h> | ||
27 | 28 | ||
28 | #include "nv50.h" | 29 | #include "nv50.h" |
29 | 30 | ||
30 | int | 31 | int |
31 | nva3_hda_eld(struct nv50_disp_priv *priv, int or, u8 *data, u32 size) | 32 | nva3_hda_eld(NV50_DISP_MTHD_V1) |
32 | { | 33 | { |
33 | const u32 soff = (or * 0x800); | 34 | union { |
34 | int i; | 35 | struct nv50_disp_sor_hda_eld_v0 v0; |
36 | } *args = data; | ||
37 | const u32 soff = outp->or * 0x800; | ||
38 | int ret, i; | ||
35 | 39 | ||
36 | if (data && data[0]) { | 40 | nv_ioctl(object, "disp sor hda eld size %d\n", size); |
41 | if (nvif_unpack(args->v0, 0, 0, true)) { | ||
42 | nv_ioctl(object, "disp sor hda eld vers %d\n", args->v0.version); | ||
43 | if (size > 0x60) | ||
44 | return -E2BIG; | ||
45 | } else | ||
46 | return ret; | ||
47 | |||
48 | if (size && args->v0.data[0]) { | ||
37 | for (i = 0; i < size; i++) | 49 | for (i = 0; i < size; i++) |
38 | nv_wr32(priv, 0x61c440 + soff, (i << 8) | data[i]); | 50 | nv_wr32(priv, 0x61c440 + soff, (i << 8) | args->v0.data[0]); |
39 | for (; i < 0x60; i++) | 51 | for (; i < 0x60; i++) |
40 | nv_wr32(priv, 0x61c440 + soff, (i << 8)); | 52 | nv_wr32(priv, 0x61c440 + soff, (i << 8)); |
41 | nv_mask(priv, 0x61c448 + soff, 0x80000003, 0x80000003); | 53 | nv_mask(priv, 0x61c448 + soff, 0x80000003, 0x80000003); |
42 | } else | 54 | } else |
43 | if (data) { | 55 | if (size) { |
44 | nv_mask(priv, 0x61c448 + soff, 0x80000003, 0x80000001); | 56 | nv_mask(priv, 0x61c448 + soff, 0x80000003, 0x80000001); |
45 | } else { | 57 | } else { |
46 | nv_mask(priv, 0x61c448 + soff, 0x80000003, 0x80000000); | 58 | nv_mask(priv, 0x61c448 + soff, 0x80000003, 0x80000000); |
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/hdanvd0.c b/drivers/gpu/drm/nouveau/core/engine/disp/hdanvd0.c index 717639386ced..baf558fc12fb 100644 --- a/drivers/gpu/drm/nouveau/core/engine/disp/hdanvd0.c +++ b/drivers/gpu/drm/nouveau/core/engine/disp/hdanvd0.c | |||
@@ -22,8 +22,9 @@ | |||
22 | * Authors: Ben Skeggs | 22 | * Authors: Ben Skeggs |
23 | */ | 23 | */ |
24 | 24 | ||
25 | #include <core/os.h> | 25 | #include <core/client.h> |
26 | #include <core/class.h> | 26 | #include <nvif/unpack.h> |
27 | #include <nvif/class.h> | ||
27 | 28 | ||
28 | #include <subdev/bios.h> | 29 | #include <subdev/bios.h> |
29 | #include <subdev/bios/dcb.h> | 30 | #include <subdev/bios/dcb.h> |
@@ -33,19 +34,30 @@ | |||
33 | #include "nv50.h" | 34 | #include "nv50.h" |
34 | 35 | ||
35 | int | 36 | int |
36 | nvd0_hda_eld(struct nv50_disp_priv *priv, int or, u8 *data, u32 size) | 37 | nvd0_hda_eld(NV50_DISP_MTHD_V1) |
37 | { | 38 | { |
38 | const u32 soff = (or * 0x030); | 39 | union { |
39 | int i; | 40 | struct nv50_disp_sor_hda_eld_v0 v0; |
41 | } *args = data; | ||
42 | const u32 soff = outp->or * 0x030; | ||
43 | int ret, i; | ||
40 | 44 | ||
41 | if (data && data[0]) { | 45 | nv_ioctl(object, "disp sor hda eld size %d\n", size); |
46 | if (nvif_unpack(args->v0, 0, 0, true)) { | ||
47 | nv_ioctl(object, "disp sor hda eld vers %d\n", args->v0.version); | ||
48 | if (size > 0x60) | ||
49 | return -E2BIG; | ||
50 | } else | ||
51 | return ret; | ||
52 | |||
53 | if (size && args->v0.data[0]) { | ||
42 | for (i = 0; i < size; i++) | 54 | for (i = 0; i < size; i++) |
43 | nv_wr32(priv, 0x10ec00 + soff, (i << 8) | data[i]); | 55 | nv_wr32(priv, 0x10ec00 + soff, (i << 8) | args->v0.data[i]); |
44 | for (; i < 0x60; i++) | 56 | for (; i < 0x60; i++) |
45 | nv_wr32(priv, 0x10ec00 + soff, (i << 8)); | 57 | nv_wr32(priv, 0x10ec00 + soff, (i << 8)); |
46 | nv_mask(priv, 0x10ec10 + soff, 0x80000003, 0x80000003); | 58 | nv_mask(priv, 0x10ec10 + soff, 0x80000003, 0x80000003); |
47 | } else | 59 | } else |
48 | if (data) { | 60 | if (size) { |
49 | nv_mask(priv, 0x10ec10 + soff, 0x80000003, 0x80000001); | 61 | nv_mask(priv, 0x10ec10 + soff, 0x80000003, 0x80000001); |
50 | } else { | 62 | } else { |
51 | nv_mask(priv, 0x10ec10 + soff, 0x80000003, 0x80000000); | 63 | nv_mask(priv, 0x10ec10 + soff, 0x80000003, 0x80000000); |
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/hdminv84.c b/drivers/gpu/drm/nouveau/core/engine/disp/hdminv84.c index 7fdade6e604d..fa276dede9cd 100644 --- a/drivers/gpu/drm/nouveau/core/engine/disp/hdminv84.c +++ b/drivers/gpu/drm/nouveau/core/engine/disp/hdminv84.c | |||
@@ -22,17 +22,38 @@ | |||
22 | * Authors: Ben Skeggs | 22 | * Authors: Ben Skeggs |
23 | */ | 23 | */ |
24 | 24 | ||
25 | #include <core/os.h> | 25 | #include <core/client.h> |
26 | #include <core/class.h> | 26 | #include <nvif/unpack.h> |
27 | #include <nvif/class.h> | ||
27 | 28 | ||
28 | #include "nv50.h" | 29 | #include "nv50.h" |
29 | 30 | ||
30 | int | 31 | int |
31 | nv84_hdmi_ctrl(struct nv50_disp_priv *priv, int head, int or, u32 data) | 32 | nv84_hdmi_ctrl(NV50_DISP_MTHD_V1) |
32 | { | 33 | { |
33 | const u32 hoff = (head * 0x800); | 34 | const u32 hoff = (head * 0x800); |
35 | union { | ||
36 | struct nv50_disp_sor_hdmi_pwr_v0 v0; | ||
37 | } *args = data; | ||
38 | u32 ctrl; | ||
39 | int ret; | ||
34 | 40 | ||
35 | if (!(data & NV84_DISP_SOR_HDMI_PWR_STATE_ON)) { | 41 | nv_ioctl(object, "disp sor hdmi ctrl size %d\n", size); |
42 | if (nvif_unpack(args->v0, 0, 0, false)) { | ||
43 | nv_ioctl(object, "disp sor hdmi ctrl vers %d state %d " | ||
44 | "max_ac_packet %d rekey %d\n", | ||
45 | args->v0.version, args->v0.state, | ||
46 | args->v0.max_ac_packet, args->v0.rekey); | ||
47 | if (args->v0.max_ac_packet > 0x1f || args->v0.rekey > 0x7f) | ||
48 | return -EINVAL; | ||
49 | ctrl = 0x40000000 * !!args->v0.state; | ||
50 | ctrl |= args->v0.max_ac_packet << 16; | ||
51 | ctrl |= args->v0.rekey; | ||
52 | ctrl |= 0x1f000000; /* ??? */ | ||
53 | } else | ||
54 | return ret; | ||
55 | |||
56 | if (!(ctrl & 0x40000000)) { | ||
36 | nv_mask(priv, 0x6165a4 + hoff, 0x40000000, 0x00000000); | 57 | nv_mask(priv, 0x6165a4 + hoff, 0x40000000, 0x00000000); |
37 | nv_mask(priv, 0x616520 + hoff, 0x00000001, 0x00000000); | 58 | nv_mask(priv, 0x616520 + hoff, 0x00000001, 0x00000000); |
38 | nv_mask(priv, 0x616500 + hoff, 0x00000001, 0x00000000); | 59 | nv_mask(priv, 0x616500 + hoff, 0x00000001, 0x00000000); |
@@ -65,6 +86,6 @@ nv84_hdmi_ctrl(struct nv50_disp_priv *priv, int head, int or, u32 data) | |||
65 | nv_mask(priv, 0x61733c, 0x00100000, 0x00000000); /* !RESETF */ | 86 | nv_mask(priv, 0x61733c, 0x00100000, 0x00000000); /* !RESETF */ |
66 | 87 | ||
67 | /* HDMI_CTRL */ | 88 | /* HDMI_CTRL */ |
68 | nv_mask(priv, 0x6165a4 + hoff, 0x5f1f007f, data | 0x1f000000 /* ??? */); | 89 | nv_mask(priv, 0x6165a4 + hoff, 0x5f1f007f, ctrl); |
69 | return 0; | 90 | return 0; |
70 | } | 91 | } |
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/hdminva3.c b/drivers/gpu/drm/nouveau/core/engine/disp/hdminva3.c index db8c6fd46278..57eeed1d1942 100644 --- a/drivers/gpu/drm/nouveau/core/engine/disp/hdminva3.c +++ b/drivers/gpu/drm/nouveau/core/engine/disp/hdminva3.c | |||
@@ -22,17 +22,38 @@ | |||
22 | * Authors: Ben Skeggs | 22 | * Authors: Ben Skeggs |
23 | */ | 23 | */ |
24 | 24 | ||
25 | #include <core/os.h> | 25 | #include <core/client.h> |
26 | #include <core/class.h> | 26 | #include <nvif/unpack.h> |
27 | #include <nvif/class.h> | ||
27 | 28 | ||
28 | #include "nv50.h" | 29 | #include "nv50.h" |
29 | 30 | ||
30 | int | 31 | int |
31 | nva3_hdmi_ctrl(struct nv50_disp_priv *priv, int head, int or, u32 data) | 32 | nva3_hdmi_ctrl(NV50_DISP_MTHD_V1) |
32 | { | 33 | { |
33 | const u32 soff = (or * 0x800); | 34 | const u32 soff = outp->or * 0x800; |
35 | union { | ||
36 | struct nv50_disp_sor_hdmi_pwr_v0 v0; | ||
37 | } *args = data; | ||
38 | u32 ctrl; | ||
39 | int ret; | ||
34 | 40 | ||
35 | if (!(data & NV84_DISP_SOR_HDMI_PWR_STATE_ON)) { | 41 | nv_ioctl(object, "disp sor hdmi ctrl size %d\n", size); |
42 | if (nvif_unpack(args->v0, 0, 0, false)) { | ||
43 | nv_ioctl(object, "disp sor hdmi ctrl vers %d state %d " | ||
44 | "max_ac_packet %d rekey %d\n", | ||
45 | args->v0.version, args->v0.state, | ||
46 | args->v0.max_ac_packet, args->v0.rekey); | ||
47 | if (args->v0.max_ac_packet > 0x1f || args->v0.rekey > 0x7f) | ||
48 | return -EINVAL; | ||
49 | ctrl = 0x40000000 * !!args->v0.state; | ||
50 | ctrl |= args->v0.max_ac_packet << 16; | ||
51 | ctrl |= args->v0.rekey; | ||
52 | ctrl |= 0x1f000000; /* ??? */ | ||
53 | } else | ||
54 | return ret; | ||
55 | |||
56 | if (!(ctrl & 0x40000000)) { | ||
36 | nv_mask(priv, 0x61c5a4 + soff, 0x40000000, 0x00000000); | 57 | nv_mask(priv, 0x61c5a4 + soff, 0x40000000, 0x00000000); |
37 | nv_mask(priv, 0x61c520 + soff, 0x00000001, 0x00000000); | 58 | nv_mask(priv, 0x61c520 + soff, 0x00000001, 0x00000000); |
38 | nv_mask(priv, 0x61c500 + soff, 0x00000001, 0x00000000); | 59 | nv_mask(priv, 0x61c500 + soff, 0x00000001, 0x00000000); |
@@ -65,6 +86,6 @@ nva3_hdmi_ctrl(struct nv50_disp_priv *priv, int head, int or, u32 data) | |||
65 | nv_mask(priv, 0x61733c, 0x00100000, 0x00000000); /* !RESETF */ | 86 | nv_mask(priv, 0x61733c, 0x00100000, 0x00000000); /* !RESETF */ |
66 | 87 | ||
67 | /* HDMI_CTRL */ | 88 | /* HDMI_CTRL */ |
68 | nv_mask(priv, 0x61c5a4 + soff, 0x5f1f007f, data | 0x1f000000 /* ??? */); | 89 | nv_mask(priv, 0x61c5a4 + soff, 0x5f1f007f, ctrl); |
69 | return 0; | 90 | return 0; |
70 | } | 91 | } |
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/hdminvd0.c b/drivers/gpu/drm/nouveau/core/engine/disp/hdminvd0.c index 5151bb261832..3106d295b48d 100644 --- a/drivers/gpu/drm/nouveau/core/engine/disp/hdminvd0.c +++ b/drivers/gpu/drm/nouveau/core/engine/disp/hdminvd0.c | |||
@@ -22,17 +22,37 @@ | |||
22 | * Authors: Ben Skeggs | 22 | * Authors: Ben Skeggs |
23 | */ | 23 | */ |
24 | 24 | ||
25 | #include <core/os.h> | 25 | #include <core/client.h> |
26 | #include <core/class.h> | 26 | #include <nvif/unpack.h> |
27 | #include <nvif/class.h> | ||
27 | 28 | ||
28 | #include "nv50.h" | 29 | #include "nv50.h" |
29 | 30 | ||
30 | int | 31 | int |
31 | nvd0_hdmi_ctrl(struct nv50_disp_priv *priv, int head, int or, u32 data) | 32 | nvd0_hdmi_ctrl(NV50_DISP_MTHD_V1) |
32 | { | 33 | { |
33 | const u32 hoff = (head * 0x800); | 34 | const u32 hoff = (head * 0x800); |
35 | union { | ||
36 | struct nv50_disp_sor_hdmi_pwr_v0 v0; | ||
37 | } *args = data; | ||
38 | u32 ctrl; | ||
39 | int ret; | ||
34 | 40 | ||
35 | if (!(data & NV84_DISP_SOR_HDMI_PWR_STATE_ON)) { | 41 | nv_ioctl(object, "disp sor hdmi ctrl size %d\n", size); |
42 | if (nvif_unpack(args->v0, 0, 0, false)) { | ||
43 | nv_ioctl(object, "disp sor hdmi ctrl vers %d state %d " | ||
44 | "max_ac_packet %d rekey %d\n", | ||
45 | args->v0.version, args->v0.state, | ||
46 | args->v0.max_ac_packet, args->v0.rekey); | ||
47 | if (args->v0.max_ac_packet > 0x1f || args->v0.rekey > 0x7f) | ||
48 | return -EINVAL; | ||
49 | ctrl = 0x40000000 * !!args->v0.state; | ||
50 | ctrl |= args->v0.max_ac_packet << 16; | ||
51 | ctrl |= args->v0.rekey; | ||
52 | } else | ||
53 | return ret; | ||
54 | |||
55 | if (!(ctrl & 0x40000000)) { | ||
36 | nv_mask(priv, 0x616798 + hoff, 0x40000000, 0x00000000); | 56 | nv_mask(priv, 0x616798 + hoff, 0x40000000, 0x00000000); |
37 | nv_mask(priv, 0x6167a4 + hoff, 0x00000001, 0x00000000); | 57 | nv_mask(priv, 0x6167a4 + hoff, 0x00000001, 0x00000000); |
38 | nv_mask(priv, 0x616714 + hoff, 0x00000001, 0x00000000); | 58 | nv_mask(priv, 0x616714 + hoff, 0x00000001, 0x00000000); |
@@ -54,7 +74,7 @@ nvd0_hdmi_ctrl(struct nv50_disp_priv *priv, int head, int or, u32 data) | |||
54 | nv_mask(priv, 0x6167a4 + hoff, 0x00000001, 0x00000001); | 74 | nv_mask(priv, 0x6167a4 + hoff, 0x00000001, 0x00000001); |
55 | 75 | ||
56 | /* HDMI_CTRL */ | 76 | /* HDMI_CTRL */ |
57 | nv_mask(priv, 0x616798 + hoff, 0x401f007f, data); | 77 | nv_mask(priv, 0x616798 + hoff, 0x401f007f, ctrl); |
58 | 78 | ||
59 | /* NFI, audio doesn't work without it though.. */ | 79 | /* NFI, audio doesn't work without it though.. */ |
60 | nv_mask(priv, 0x616548 + hoff, 0x00000070, 0x00000000); | 80 | nv_mask(priv, 0x616548 + hoff, 0x00000070, 0x00000000); |
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nv04.c b/drivers/gpu/drm/nouveau/core/engine/disp/nv04.c index a32666ed0c47..366f315fc9a5 100644 --- a/drivers/gpu/drm/nouveau/core/engine/disp/nv04.c +++ b/drivers/gpu/drm/nouveau/core/engine/disp/nv04.c | |||
@@ -24,60 +24,100 @@ | |||
24 | 24 | ||
25 | #include "priv.h" | 25 | #include "priv.h" |
26 | 26 | ||
27 | #include <core/client.h> | ||
27 | #include <core/event.h> | 28 | #include <core/event.h> |
28 | #include <core/class.h> | 29 | #include <nvif/unpack.h> |
30 | #include <nvif/class.h> | ||
29 | 31 | ||
30 | struct nv04_disp_priv { | 32 | struct nv04_disp_priv { |
31 | struct nouveau_disp base; | 33 | struct nouveau_disp base; |
32 | }; | 34 | }; |
33 | 35 | ||
34 | static int | 36 | static int |
35 | nv04_disp_scanoutpos(struct nouveau_object *object, u32 mthd, | 37 | nv04_disp_scanoutpos(struct nouveau_object *object, struct nv04_disp_priv *priv, |
36 | void *data, u32 size) | 38 | void *data, u32 size, int head) |
37 | { | 39 | { |
38 | struct nv04_disp_priv *priv = (void *)object->engine; | 40 | const u32 hoff = head * 0x2000; |
39 | struct nv04_display_scanoutpos *args = data; | 41 | union { |
40 | const int head = (mthd & NV04_DISP_MTHD_HEAD); | 42 | struct nv04_disp_scanoutpos_v0 v0; |
43 | } *args = data; | ||
41 | u32 line; | 44 | u32 line; |
45 | int ret; | ||
46 | |||
47 | nv_ioctl(object, "disp scanoutpos size %d\n", size); | ||
48 | if (nvif_unpack(args->v0, 0, 0, false)) { | ||
49 | nv_ioctl(object, "disp scanoutpos vers %d\n", args->v0.version); | ||
50 | args->v0.vblanks = nv_rd32(priv, 0x680800 + hoff) & 0xffff; | ||
51 | args->v0.vtotal = nv_rd32(priv, 0x680804 + hoff) & 0xffff; | ||
52 | args->v0.vblanke = args->v0.vtotal - 1; | ||
53 | |||
54 | args->v0.hblanks = nv_rd32(priv, 0x680820 + hoff) & 0xffff; | ||
55 | args->v0.htotal = nv_rd32(priv, 0x680824 + hoff) & 0xffff; | ||
56 | args->v0.hblanke = args->v0.htotal - 1; | ||
57 | |||
58 | /* | ||
59 | * If output is vga instead of digital then vtotal/htotal is | ||
60 | * invalid so we have to give up and trigger the timestamping | ||
61 | * fallback in the drm core. | ||
62 | */ | ||
63 | if (!args->v0.vtotal || !args->v0.htotal) | ||
64 | return -ENOTSUPP; | ||
65 | |||
66 | args->v0.time[0] = ktime_to_ns(ktime_get()); | ||
67 | line = nv_rd32(priv, 0x600868 + hoff); | ||
68 | args->v0.time[1] = ktime_to_ns(ktime_get()); | ||
69 | args->v0.hline = (line & 0xffff0000) >> 16; | ||
70 | args->v0.vline = (line & 0x0000ffff); | ||
71 | } else | ||
72 | return ret; | ||
42 | 73 | ||
43 | if (size < sizeof(*args)) | ||
44 | return -EINVAL; | ||
45 | |||
46 | args->vblanks = nv_rd32(priv, 0x680800 + (head * 0x2000)) & 0xffff; | ||
47 | args->vtotal = nv_rd32(priv, 0x680804 + (head * 0x2000)) & 0xffff; | ||
48 | args->vblanke = args->vtotal - 1; | ||
49 | |||
50 | args->hblanks = nv_rd32(priv, 0x680820 + (head * 0x2000)) & 0xffff; | ||
51 | args->htotal = nv_rd32(priv, 0x680824 + (head * 0x2000)) & 0xffff; | ||
52 | args->hblanke = args->htotal - 1; | ||
53 | |||
54 | /* | ||
55 | * If output is vga instead of digital then vtotal/htotal is invalid | ||
56 | * so we have to give up and trigger the timestamping fallback in the | ||
57 | * drm core. | ||
58 | */ | ||
59 | if (!args->vtotal || !args->htotal) | ||
60 | return -ENOTSUPP; | ||
61 | |||
62 | args->time[0] = ktime_to_ns(ktime_get()); | ||
63 | line = nv_rd32(priv, 0x600868 + (head * 0x2000)); | ||
64 | args->time[1] = ktime_to_ns(ktime_get()); | ||
65 | args->hline = (line & 0xffff0000) >> 16; | ||
66 | args->vline = (line & 0x0000ffff); | ||
67 | return 0; | 74 | return 0; |
68 | } | 75 | } |
69 | 76 | ||
70 | #define HEAD_MTHD(n) (n), (n) + 0x01 | 77 | static int |
78 | nv04_disp_mthd(struct nouveau_object *object, u32 mthd, void *data, u32 size) | ||
79 | { | ||
80 | union { | ||
81 | struct nv04_disp_mthd_v0 v0; | ||
82 | } *args = data; | ||
83 | struct nv04_disp_priv *priv = (void *)object->engine; | ||
84 | int head, ret; | ||
85 | |||
86 | nv_ioctl(object, "disp mthd size %d\n", size); | ||
87 | if (nvif_unpack(args->v0, 0, 0, true)) { | ||
88 | nv_ioctl(object, "disp mthd vers %d mthd %02x head %d\n", | ||
89 | args->v0.version, args->v0.method, args->v0.head); | ||
90 | mthd = args->v0.method; | ||
91 | head = args->v0.head; | ||
92 | } else | ||
93 | return ret; | ||
71 | 94 | ||
72 | static struct nouveau_omthds | 95 | if (head < 0 || head >= 2) |
73 | nv04_disp_omthds[] = { | 96 | return -ENXIO; |
74 | { HEAD_MTHD(NV04_DISP_SCANOUTPOS), nv04_disp_scanoutpos }, | 97 | |
75 | {} | 98 | switch (mthd) { |
99 | case NV04_DISP_SCANOUTPOS: | ||
100 | return nv04_disp_scanoutpos(object, priv, data, size, head); | ||
101 | default: | ||
102 | break; | ||
103 | } | ||
104 | |||
105 | return -EINVAL; | ||
106 | } | ||
107 | |||
108 | static struct nouveau_ofuncs | ||
109 | nv04_disp_ofuncs = { | ||
110 | .ctor = _nouveau_object_ctor, | ||
111 | .dtor = nouveau_object_destroy, | ||
112 | .init = nouveau_object_init, | ||
113 | .fini = nouveau_object_fini, | ||
114 | .mthd = nv04_disp_mthd, | ||
115 | .ntfy = nouveau_disp_ntfy, | ||
76 | }; | 116 | }; |
77 | 117 | ||
78 | static struct nouveau_oclass | 118 | static struct nouveau_oclass |
79 | nv04_disp_sclass[] = { | 119 | nv04_disp_sclass[] = { |
80 | { NV04_DISP_CLASS, &nouveau_object_ofuncs, nv04_disp_omthds }, | 120 | { NV04_DISP, &nv04_disp_ofuncs }, |
81 | {}, | 121 | {}, |
82 | }; | 122 | }; |
83 | 123 | ||
@@ -86,17 +126,26 @@ nv04_disp_sclass[] = { | |||
86 | ******************************************************************************/ | 126 | ******************************************************************************/ |
87 | 127 | ||
88 | static void | 128 | static void |
89 | nv04_disp_vblank_enable(struct nouveau_event *event, int type, int head) | 129 | nv04_disp_vblank_init(struct nvkm_event *event, int type, int head) |
90 | { | 130 | { |
91 | nv_wr32(event->priv, 0x600140 + (head * 0x2000) , 0x00000001); | 131 | struct nouveau_disp *disp = container_of(event, typeof(*disp), vblank); |
132 | nv_wr32(disp, 0x600140 + (head * 0x2000) , 0x00000001); | ||
92 | } | 133 | } |
93 | 134 | ||
94 | static void | 135 | static void |
95 | nv04_disp_vblank_disable(struct nouveau_event *event, int type, int head) | 136 | nv04_disp_vblank_fini(struct nvkm_event *event, int type, int head) |
96 | { | 137 | { |
97 | nv_wr32(event->priv, 0x600140 + (head * 0x2000) , 0x00000000); | 138 | struct nouveau_disp *disp = container_of(event, typeof(*disp), vblank); |
139 | nv_wr32(disp, 0x600140 + (head * 0x2000) , 0x00000000); | ||
98 | } | 140 | } |
99 | 141 | ||
142 | static const struct nvkm_event_func | ||
143 | nv04_disp_vblank_func = { | ||
144 | .ctor = nouveau_disp_vblank_ctor, | ||
145 | .init = nv04_disp_vblank_init, | ||
146 | .fini = nv04_disp_vblank_fini, | ||
147 | }; | ||
148 | |||
100 | static void | 149 | static void |
101 | nv04_disp_intr(struct nouveau_subdev *subdev) | 150 | nv04_disp_intr(struct nouveau_subdev *subdev) |
102 | { | 151 | { |
@@ -106,12 +155,12 @@ nv04_disp_intr(struct nouveau_subdev *subdev) | |||
106 | u32 pvideo; | 155 | u32 pvideo; |
107 | 156 | ||
108 | if (crtc0 & 0x00000001) { | 157 | if (crtc0 & 0x00000001) { |
109 | nouveau_event_trigger(priv->base.vblank, 1, 0); | 158 | nouveau_disp_vblank(&priv->base, 0); |
110 | nv_wr32(priv, 0x600100, 0x00000001); | 159 | nv_wr32(priv, 0x600100, 0x00000001); |
111 | } | 160 | } |
112 | 161 | ||
113 | if (crtc1 & 0x00000001) { | 162 | if (crtc1 & 0x00000001) { |
114 | nouveau_event_trigger(priv->base.vblank, 1, 1); | 163 | nouveau_disp_vblank(&priv->base, 1); |
115 | nv_wr32(priv, 0x602100, 0x00000001); | 164 | nv_wr32(priv, 0x602100, 0x00000001); |
116 | } | 165 | } |
117 | 166 | ||
@@ -140,9 +189,6 @@ nv04_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine, | |||
140 | 189 | ||
141 | nv_engine(priv)->sclass = nv04_disp_sclass; | 190 | nv_engine(priv)->sclass = nv04_disp_sclass; |
142 | nv_subdev(priv)->intr = nv04_disp_intr; | 191 | nv_subdev(priv)->intr = nv04_disp_intr; |
143 | priv->base.vblank->priv = priv; | ||
144 | priv->base.vblank->enable = nv04_disp_vblank_enable; | ||
145 | priv->base.vblank->disable = nv04_disp_vblank_disable; | ||
146 | return 0; | 192 | return 0; |
147 | } | 193 | } |
148 | 194 | ||
@@ -155,4 +201,5 @@ nv04_disp_oclass = &(struct nouveau_disp_impl) { | |||
155 | .init = _nouveau_disp_init, | 201 | .init = _nouveau_disp_init, |
156 | .fini = _nouveau_disp_fini, | 202 | .fini = _nouveau_disp_fini, |
157 | }, | 203 | }, |
204 | .vblank = &nv04_disp_vblank_func, | ||
158 | }.base; | 205 | }.base; |
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c index 2283c442a10d..4b5bb5d58a54 100644 --- a/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c +++ b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c | |||
@@ -23,10 +23,12 @@ | |||
23 | */ | 23 | */ |
24 | 24 | ||
25 | #include <core/object.h> | 25 | #include <core/object.h> |
26 | #include <core/client.h> | ||
26 | #include <core/parent.h> | 27 | #include <core/parent.h> |
27 | #include <core/handle.h> | 28 | #include <core/handle.h> |
28 | #include <core/class.h> | ||
29 | #include <core/enum.h> | 29 | #include <core/enum.h> |
30 | #include <nvif/unpack.h> | ||
31 | #include <nvif/class.h> | ||
30 | 32 | ||
31 | #include <subdev/bios.h> | 33 | #include <subdev/bios.h> |
32 | #include <subdev/bios/dcb.h> | 34 | #include <subdev/bios/dcb.h> |
@@ -43,14 +45,16 @@ | |||
43 | * EVO channel base class | 45 | * EVO channel base class |
44 | ******************************************************************************/ | 46 | ******************************************************************************/ |
45 | 47 | ||
46 | int | 48 | static int |
47 | nv50_disp_chan_create_(struct nouveau_object *parent, | 49 | nv50_disp_chan_create_(struct nouveau_object *parent, |
48 | struct nouveau_object *engine, | 50 | struct nouveau_object *engine, |
49 | struct nouveau_oclass *oclass, int chid, | 51 | struct nouveau_oclass *oclass, int head, |
50 | int length, void **pobject) | 52 | int length, void **pobject) |
51 | { | 53 | { |
54 | const struct nv50_disp_chan_impl *impl = (void *)oclass->ofuncs; | ||
52 | struct nv50_disp_base *base = (void *)parent; | 55 | struct nv50_disp_base *base = (void *)parent; |
53 | struct nv50_disp_chan *chan; | 56 | struct nv50_disp_chan *chan; |
57 | int chid = impl->chid + head; | ||
54 | int ret; | 58 | int ret; |
55 | 59 | ||
56 | if (base->chan & (1 << chid)) | 60 | if (base->chan & (1 << chid)) |
@@ -63,12 +67,14 @@ nv50_disp_chan_create_(struct nouveau_object *parent, | |||
63 | chan = *pobject; | 67 | chan = *pobject; |
64 | if (ret) | 68 | if (ret) |
65 | return ret; | 69 | return ret; |
66 | |||
67 | chan->chid = chid; | 70 | chan->chid = chid; |
71 | |||
72 | nv_parent(chan)->object_attach = impl->attach; | ||
73 | nv_parent(chan)->object_detach = impl->detach; | ||
68 | return 0; | 74 | return 0; |
69 | } | 75 | } |
70 | 76 | ||
71 | void | 77 | static void |
72 | nv50_disp_chan_destroy(struct nv50_disp_chan *chan) | 78 | nv50_disp_chan_destroy(struct nv50_disp_chan *chan) |
73 | { | 79 | { |
74 | struct nv50_disp_base *base = (void *)nv_object(chan)->parent; | 80 | struct nv50_disp_base *base = (void *)nv_object(chan)->parent; |
@@ -76,6 +82,16 @@ nv50_disp_chan_destroy(struct nv50_disp_chan *chan) | |||
76 | nouveau_namedb_destroy(&chan->base); | 82 | nouveau_namedb_destroy(&chan->base); |
77 | } | 83 | } |
78 | 84 | ||
85 | int | ||
86 | nv50_disp_chan_map(struct nouveau_object *object, u64 *addr, u32 *size) | ||
87 | { | ||
88 | struct nv50_disp_chan *chan = (void *)object; | ||
89 | *addr = nv_device_resource_start(nv_device(object), 0) + | ||
90 | 0x640000 + (chan->chid * 0x1000); | ||
91 | *size = 0x001000; | ||
92 | return 0; | ||
93 | } | ||
94 | |||
79 | u32 | 95 | u32 |
80 | nv50_disp_chan_rd32(struct nouveau_object *object, u64 addr) | 96 | nv50_disp_chan_rd32(struct nouveau_object *object, u64 addr) |
81 | { | 97 | { |
@@ -115,16 +131,16 @@ nv50_disp_dmac_object_detach(struct nouveau_object *parent, int cookie) | |||
115 | nouveau_ramht_remove(base->ramht, cookie); | 131 | nouveau_ramht_remove(base->ramht, cookie); |
116 | } | 132 | } |
117 | 133 | ||
118 | int | 134 | static int |
119 | nv50_disp_dmac_create_(struct nouveau_object *parent, | 135 | nv50_disp_dmac_create_(struct nouveau_object *parent, |
120 | struct nouveau_object *engine, | 136 | struct nouveau_object *engine, |
121 | struct nouveau_oclass *oclass, u32 pushbuf, int chid, | 137 | struct nouveau_oclass *oclass, u32 pushbuf, int head, |
122 | int length, void **pobject) | 138 | int length, void **pobject) |
123 | { | 139 | { |
124 | struct nv50_disp_dmac *dmac; | 140 | struct nv50_disp_dmac *dmac; |
125 | int ret; | 141 | int ret; |
126 | 142 | ||
127 | ret = nv50_disp_chan_create_(parent, engine, oclass, chid, | 143 | ret = nv50_disp_chan_create_(parent, engine, oclass, head, |
128 | length, pobject); | 144 | length, pobject); |
129 | dmac = *pobject; | 145 | dmac = *pobject; |
130 | if (ret) | 146 | if (ret) |
@@ -397,27 +413,32 @@ nv50_disp_mast_mthd_chan = { | |||
397 | } | 413 | } |
398 | }; | 414 | }; |
399 | 415 | ||
400 | static int | 416 | int |
401 | nv50_disp_mast_ctor(struct nouveau_object *parent, | 417 | nv50_disp_mast_ctor(struct nouveau_object *parent, |
402 | struct nouveau_object *engine, | 418 | struct nouveau_object *engine, |
403 | struct nouveau_oclass *oclass, void *data, u32 size, | 419 | struct nouveau_oclass *oclass, void *data, u32 size, |
404 | struct nouveau_object **pobject) | 420 | struct nouveau_object **pobject) |
405 | { | 421 | { |
406 | struct nv50_display_mast_class *args = data; | 422 | union { |
423 | struct nv50_disp_core_channel_dma_v0 v0; | ||
424 | } *args = data; | ||
407 | struct nv50_disp_dmac *mast; | 425 | struct nv50_disp_dmac *mast; |
408 | int ret; | 426 | int ret; |
409 | 427 | ||
410 | if (size < sizeof(*args)) | 428 | nv_ioctl(parent, "create disp core channel dma size %d\n", size); |
411 | return -EINVAL; | 429 | if (nvif_unpack(args->v0, 0, 0, false)) { |
430 | nv_ioctl(parent, "create disp core channel dma vers %d " | ||
431 | "pushbuf %08x\n", | ||
432 | args->v0.version, args->v0.pushbuf); | ||
433 | } else | ||
434 | return ret; | ||
412 | 435 | ||
413 | ret = nv50_disp_dmac_create_(parent, engine, oclass, args->pushbuf, | 436 | ret = nv50_disp_dmac_create_(parent, engine, oclass, args->v0.pushbuf, |
414 | 0, sizeof(*mast), (void **)&mast); | 437 | 0, sizeof(*mast), (void **)&mast); |
415 | *pobject = nv_object(mast); | 438 | *pobject = nv_object(mast); |
416 | if (ret) | 439 | if (ret) |
417 | return ret; | 440 | return ret; |
418 | 441 | ||
419 | nv_parent(mast)->object_attach = nv50_disp_dmac_object_attach; | ||
420 | nv_parent(mast)->object_detach = nv50_disp_dmac_object_detach; | ||
421 | return 0; | 442 | return 0; |
422 | } | 443 | } |
423 | 444 | ||
@@ -479,14 +500,18 @@ nv50_disp_mast_fini(struct nouveau_object *object, bool suspend) | |||
479 | return nv50_disp_chan_fini(&mast->base, suspend); | 500 | return nv50_disp_chan_fini(&mast->base, suspend); |
480 | } | 501 | } |
481 | 502 | ||
482 | struct nouveau_ofuncs | 503 | struct nv50_disp_chan_impl |
483 | nv50_disp_mast_ofuncs = { | 504 | nv50_disp_mast_ofuncs = { |
484 | .ctor = nv50_disp_mast_ctor, | 505 | .base.ctor = nv50_disp_mast_ctor, |
485 | .dtor = nv50_disp_dmac_dtor, | 506 | .base.dtor = nv50_disp_dmac_dtor, |
486 | .init = nv50_disp_mast_init, | 507 | .base.init = nv50_disp_mast_init, |
487 | .fini = nv50_disp_mast_fini, | 508 | .base.fini = nv50_disp_mast_fini, |
488 | .rd32 = nv50_disp_chan_rd32, | 509 | .base.map = nv50_disp_chan_map, |
489 | .wr32 = nv50_disp_chan_wr32, | 510 | .base.rd32 = nv50_disp_chan_rd32, |
511 | .base.wr32 = nv50_disp_chan_wr32, | ||
512 | .chid = 0, | ||
513 | .attach = nv50_disp_dmac_object_attach, | ||
514 | .detach = nv50_disp_dmac_object_detach, | ||
490 | }; | 515 | }; |
491 | 516 | ||
492 | /******************************************************************************* | 517 | /******************************************************************************* |
@@ -543,39 +568,51 @@ nv50_disp_sync_mthd_chan = { | |||
543 | } | 568 | } |
544 | }; | 569 | }; |
545 | 570 | ||
546 | static int | 571 | int |
547 | nv50_disp_sync_ctor(struct nouveau_object *parent, | 572 | nv50_disp_sync_ctor(struct nouveau_object *parent, |
548 | struct nouveau_object *engine, | 573 | struct nouveau_object *engine, |
549 | struct nouveau_oclass *oclass, void *data, u32 size, | 574 | struct nouveau_oclass *oclass, void *data, u32 size, |
550 | struct nouveau_object **pobject) | 575 | struct nouveau_object **pobject) |
551 | { | 576 | { |
552 | struct nv50_display_sync_class *args = data; | 577 | union { |
578 | struct nv50_disp_base_channel_dma_v0 v0; | ||
579 | } *args = data; | ||
580 | struct nv50_disp_priv *priv = (void *)engine; | ||
553 | struct nv50_disp_dmac *dmac; | 581 | struct nv50_disp_dmac *dmac; |
554 | int ret; | 582 | int ret; |
555 | 583 | ||
556 | if (size < sizeof(*args) || args->head > 1) | 584 | nv_ioctl(parent, "create disp base channel dma size %d\n", size); |
557 | return -EINVAL; | 585 | if (nvif_unpack(args->v0, 0, 0, false)) { |
586 | nv_ioctl(parent, "create disp base channel dma vers %d " | ||
587 | "pushbuf %08x head %d\n", | ||
588 | args->v0.version, args->v0.pushbuf, args->v0.head); | ||
589 | if (args->v0.head > priv->head.nr) | ||
590 | return -EINVAL; | ||
591 | } else | ||
592 | return ret; | ||
558 | 593 | ||
559 | ret = nv50_disp_dmac_create_(parent, engine, oclass, args->pushbuf, | 594 | ret = nv50_disp_dmac_create_(parent, engine, oclass, args->v0.pushbuf, |
560 | 1 + args->head, sizeof(*dmac), | 595 | args->v0.head, sizeof(*dmac), |
561 | (void **)&dmac); | 596 | (void **)&dmac); |
562 | *pobject = nv_object(dmac); | 597 | *pobject = nv_object(dmac); |
563 | if (ret) | 598 | if (ret) |
564 | return ret; | 599 | return ret; |
565 | 600 | ||
566 | nv_parent(dmac)->object_attach = nv50_disp_dmac_object_attach; | ||
567 | nv_parent(dmac)->object_detach = nv50_disp_dmac_object_detach; | ||
568 | return 0; | 601 | return 0; |
569 | } | 602 | } |
570 | 603 | ||
571 | struct nouveau_ofuncs | 604 | struct nv50_disp_chan_impl |
572 | nv50_disp_sync_ofuncs = { | 605 | nv50_disp_sync_ofuncs = { |
573 | .ctor = nv50_disp_sync_ctor, | 606 | .base.ctor = nv50_disp_sync_ctor, |
574 | .dtor = nv50_disp_dmac_dtor, | 607 | .base.dtor = nv50_disp_dmac_dtor, |
575 | .init = nv50_disp_dmac_init, | 608 | .base.init = nv50_disp_dmac_init, |
576 | .fini = nv50_disp_dmac_fini, | 609 | .base.fini = nv50_disp_dmac_fini, |
577 | .rd32 = nv50_disp_chan_rd32, | 610 | .base.map = nv50_disp_chan_map, |
578 | .wr32 = nv50_disp_chan_wr32, | 611 | .base.rd32 = nv50_disp_chan_rd32, |
612 | .base.wr32 = nv50_disp_chan_wr32, | ||
613 | .chid = 1, | ||
614 | .attach = nv50_disp_dmac_object_attach, | ||
615 | .detach = nv50_disp_dmac_object_detach, | ||
579 | }; | 616 | }; |
580 | 617 | ||
581 | /******************************************************************************* | 618 | /******************************************************************************* |
@@ -620,39 +657,51 @@ nv50_disp_ovly_mthd_chan = { | |||
620 | } | 657 | } |
621 | }; | 658 | }; |
622 | 659 | ||
623 | static int | 660 | int |
624 | nv50_disp_ovly_ctor(struct nouveau_object *parent, | 661 | nv50_disp_ovly_ctor(struct nouveau_object *parent, |
625 | struct nouveau_object *engine, | 662 | struct nouveau_object *engine, |
626 | struct nouveau_oclass *oclass, void *data, u32 size, | 663 | struct nouveau_oclass *oclass, void *data, u32 size, |
627 | struct nouveau_object **pobject) | 664 | struct nouveau_object **pobject) |
628 | { | 665 | { |
629 | struct nv50_display_ovly_class *args = data; | 666 | union { |
667 | struct nv50_disp_overlay_channel_dma_v0 v0; | ||
668 | } *args = data; | ||
669 | struct nv50_disp_priv *priv = (void *)engine; | ||
630 | struct nv50_disp_dmac *dmac; | 670 | struct nv50_disp_dmac *dmac; |
631 | int ret; | 671 | int ret; |
632 | 672 | ||
633 | if (size < sizeof(*args) || args->head > 1) | 673 | nv_ioctl(parent, "create disp overlay channel dma size %d\n", size); |
634 | return -EINVAL; | 674 | if (nvif_unpack(args->v0, 0, 0, false)) { |
675 | nv_ioctl(parent, "create disp overlay channel dma vers %d " | ||
676 | "pushbuf %08x head %d\n", | ||
677 | args->v0.version, args->v0.pushbuf, args->v0.head); | ||
678 | if (args->v0.head > priv->head.nr) | ||
679 | return -EINVAL; | ||
680 | } else | ||
681 | return ret; | ||
635 | 682 | ||
636 | ret = nv50_disp_dmac_create_(parent, engine, oclass, args->pushbuf, | 683 | ret = nv50_disp_dmac_create_(parent, engine, oclass, args->v0.pushbuf, |
637 | 3 + args->head, sizeof(*dmac), | 684 | args->v0.head, sizeof(*dmac), |
638 | (void **)&dmac); | 685 | (void **)&dmac); |
639 | *pobject = nv_object(dmac); | 686 | *pobject = nv_object(dmac); |
640 | if (ret) | 687 | if (ret) |
641 | return ret; | 688 | return ret; |
642 | 689 | ||
643 | nv_parent(dmac)->object_attach = nv50_disp_dmac_object_attach; | ||
644 | nv_parent(dmac)->object_detach = nv50_disp_dmac_object_detach; | ||
645 | return 0; | 690 | return 0; |
646 | } | 691 | } |
647 | 692 | ||
648 | struct nouveau_ofuncs | 693 | struct nv50_disp_chan_impl |
649 | nv50_disp_ovly_ofuncs = { | 694 | nv50_disp_ovly_ofuncs = { |
650 | .ctor = nv50_disp_ovly_ctor, | 695 | .base.ctor = nv50_disp_ovly_ctor, |
651 | .dtor = nv50_disp_dmac_dtor, | 696 | .base.dtor = nv50_disp_dmac_dtor, |
652 | .init = nv50_disp_dmac_init, | 697 | .base.init = nv50_disp_dmac_init, |
653 | .fini = nv50_disp_dmac_fini, | 698 | .base.fini = nv50_disp_dmac_fini, |
654 | .rd32 = nv50_disp_chan_rd32, | 699 | .base.map = nv50_disp_chan_map, |
655 | .wr32 = nv50_disp_chan_wr32, | 700 | .base.rd32 = nv50_disp_chan_rd32, |
701 | .base.wr32 = nv50_disp_chan_wr32, | ||
702 | .chid = 3, | ||
703 | .attach = nv50_disp_dmac_object_attach, | ||
704 | .detach = nv50_disp_dmac_object_detach, | ||
656 | }; | 705 | }; |
657 | 706 | ||
658 | /******************************************************************************* | 707 | /******************************************************************************* |
@@ -662,14 +711,14 @@ nv50_disp_ovly_ofuncs = { | |||
662 | static int | 711 | static int |
663 | nv50_disp_pioc_create_(struct nouveau_object *parent, | 712 | nv50_disp_pioc_create_(struct nouveau_object *parent, |
664 | struct nouveau_object *engine, | 713 | struct nouveau_object *engine, |
665 | struct nouveau_oclass *oclass, int chid, | 714 | struct nouveau_oclass *oclass, int head, |
666 | int length, void **pobject) | 715 | int length, void **pobject) |
667 | { | 716 | { |
668 | return nv50_disp_chan_create_(parent, engine, oclass, chid, | 717 | return nv50_disp_chan_create_(parent, engine, oclass, head, |
669 | length, pobject); | 718 | length, pobject); |
670 | } | 719 | } |
671 | 720 | ||
672 | static void | 721 | void |
673 | nv50_disp_pioc_dtor(struct nouveau_object *object) | 722 | nv50_disp_pioc_dtor(struct nouveau_object *object) |
674 | { | 723 | { |
675 | struct nv50_disp_pioc *pioc = (void *)object; | 724 | struct nv50_disp_pioc *pioc = (void *)object; |
@@ -727,20 +776,29 @@ nv50_disp_pioc_fini(struct nouveau_object *object, bool suspend) | |||
727 | * EVO immediate overlay channel objects | 776 | * EVO immediate overlay channel objects |
728 | ******************************************************************************/ | 777 | ******************************************************************************/ |
729 | 778 | ||
730 | static int | 779 | int |
731 | nv50_disp_oimm_ctor(struct nouveau_object *parent, | 780 | nv50_disp_oimm_ctor(struct nouveau_object *parent, |
732 | struct nouveau_object *engine, | 781 | struct nouveau_object *engine, |
733 | struct nouveau_oclass *oclass, void *data, u32 size, | 782 | struct nouveau_oclass *oclass, void *data, u32 size, |
734 | struct nouveau_object **pobject) | 783 | struct nouveau_object **pobject) |
735 | { | 784 | { |
736 | struct nv50_display_oimm_class *args = data; | 785 | union { |
786 | struct nv50_disp_overlay_v0 v0; | ||
787 | } *args = data; | ||
788 | struct nv50_disp_priv *priv = (void *)engine; | ||
737 | struct nv50_disp_pioc *pioc; | 789 | struct nv50_disp_pioc *pioc; |
738 | int ret; | 790 | int ret; |
739 | 791 | ||
740 | if (size < sizeof(*args) || args->head > 1) | 792 | nv_ioctl(parent, "create disp overlay size %d\n", size); |
741 | return -EINVAL; | 793 | if (nvif_unpack(args->v0, 0, 0, false)) { |
794 | nv_ioctl(parent, "create disp overlay vers %d head %d\n", | ||
795 | args->v0.version, args->v0.head); | ||
796 | if (args->v0.head > priv->head.nr) | ||
797 | return -EINVAL; | ||
798 | } else | ||
799 | return ret; | ||
742 | 800 | ||
743 | ret = nv50_disp_pioc_create_(parent, engine, oclass, 5 + args->head, | 801 | ret = nv50_disp_pioc_create_(parent, engine, oclass, args->v0.head, |
744 | sizeof(*pioc), (void **)&pioc); | 802 | sizeof(*pioc), (void **)&pioc); |
745 | *pobject = nv_object(pioc); | 803 | *pobject = nv_object(pioc); |
746 | if (ret) | 804 | if (ret) |
@@ -749,34 +807,45 @@ nv50_disp_oimm_ctor(struct nouveau_object *parent, | |||
749 | return 0; | 807 | return 0; |
750 | } | 808 | } |
751 | 809 | ||
752 | struct nouveau_ofuncs | 810 | struct nv50_disp_chan_impl |
753 | nv50_disp_oimm_ofuncs = { | 811 | nv50_disp_oimm_ofuncs = { |
754 | .ctor = nv50_disp_oimm_ctor, | 812 | .base.ctor = nv50_disp_oimm_ctor, |
755 | .dtor = nv50_disp_pioc_dtor, | 813 | .base.dtor = nv50_disp_pioc_dtor, |
756 | .init = nv50_disp_pioc_init, | 814 | .base.init = nv50_disp_pioc_init, |
757 | .fini = nv50_disp_pioc_fini, | 815 | .base.fini = nv50_disp_pioc_fini, |
758 | .rd32 = nv50_disp_chan_rd32, | 816 | .base.map = nv50_disp_chan_map, |
759 | .wr32 = nv50_disp_chan_wr32, | 817 | .base.rd32 = nv50_disp_chan_rd32, |
818 | .base.wr32 = nv50_disp_chan_wr32, | ||
819 | .chid = 5, | ||
760 | }; | 820 | }; |
761 | 821 | ||
762 | /******************************************************************************* | 822 | /******************************************************************************* |
763 | * EVO cursor channel objects | 823 | * EVO cursor channel objects |
764 | ******************************************************************************/ | 824 | ******************************************************************************/ |
765 | 825 | ||
766 | static int | 826 | int |
767 | nv50_disp_curs_ctor(struct nouveau_object *parent, | 827 | nv50_disp_curs_ctor(struct nouveau_object *parent, |
768 | struct nouveau_object *engine, | 828 | struct nouveau_object *engine, |
769 | struct nouveau_oclass *oclass, void *data, u32 size, | 829 | struct nouveau_oclass *oclass, void *data, u32 size, |
770 | struct nouveau_object **pobject) | 830 | struct nouveau_object **pobject) |
771 | { | 831 | { |
772 | struct nv50_display_curs_class *args = data; | 832 | union { |
833 | struct nv50_disp_cursor_v0 v0; | ||
834 | } *args = data; | ||
835 | struct nv50_disp_priv *priv = (void *)engine; | ||
773 | struct nv50_disp_pioc *pioc; | 836 | struct nv50_disp_pioc *pioc; |
774 | int ret; | 837 | int ret; |
775 | 838 | ||
776 | if (size < sizeof(*args) || args->head > 1) | 839 | nv_ioctl(parent, "create disp cursor size %d\n", size); |
777 | return -EINVAL; | 840 | if (nvif_unpack(args->v0, 0, 0, false)) { |
841 | nv_ioctl(parent, "create disp cursor vers %d head %d\n", | ||
842 | args->v0.version, args->v0.head); | ||
843 | if (args->v0.head > priv->head.nr) | ||
844 | return -EINVAL; | ||
845 | } else | ||
846 | return ret; | ||
778 | 847 | ||
779 | ret = nv50_disp_pioc_create_(parent, engine, oclass, 7 + args->head, | 848 | ret = nv50_disp_pioc_create_(parent, engine, oclass, args->v0.head, |
780 | sizeof(*pioc), (void **)&pioc); | 849 | sizeof(*pioc), (void **)&pioc); |
781 | *pobject = nv_object(pioc); | 850 | *pobject = nv_object(pioc); |
782 | if (ret) | 851 | if (ret) |
@@ -785,14 +854,16 @@ nv50_disp_curs_ctor(struct nouveau_object *parent, | |||
785 | return 0; | 854 | return 0; |
786 | } | 855 | } |
787 | 856 | ||
788 | struct nouveau_ofuncs | 857 | struct nv50_disp_chan_impl |
789 | nv50_disp_curs_ofuncs = { | 858 | nv50_disp_curs_ofuncs = { |
790 | .ctor = nv50_disp_curs_ctor, | 859 | .base.ctor = nv50_disp_curs_ctor, |
791 | .dtor = nv50_disp_pioc_dtor, | 860 | .base.dtor = nv50_disp_pioc_dtor, |
792 | .init = nv50_disp_pioc_init, | 861 | .base.init = nv50_disp_pioc_init, |
793 | .fini = nv50_disp_pioc_fini, | 862 | .base.fini = nv50_disp_pioc_fini, |
794 | .rd32 = nv50_disp_chan_rd32, | 863 | .base.map = nv50_disp_chan_map, |
795 | .wr32 = nv50_disp_chan_wr32, | 864 | .base.rd32 = nv50_disp_chan_rd32, |
865 | .base.wr32 = nv50_disp_chan_wr32, | ||
866 | .chid = 7, | ||
796 | }; | 867 | }; |
797 | 868 | ||
798 | /******************************************************************************* | 869 | /******************************************************************************* |
@@ -800,47 +871,162 @@ nv50_disp_curs_ofuncs = { | |||
800 | ******************************************************************************/ | 871 | ******************************************************************************/ |
801 | 872 | ||
802 | int | 873 | int |
803 | nv50_disp_base_scanoutpos(struct nouveau_object *object, u32 mthd, | 874 | nv50_disp_base_scanoutpos(NV50_DISP_MTHD_V0) |
804 | void *data, u32 size) | ||
805 | { | 875 | { |
806 | struct nv50_disp_priv *priv = (void *)object->engine; | 876 | const u32 blanke = nv_rd32(priv, 0x610aec + (head * 0x540)); |
807 | struct nv04_display_scanoutpos *args = data; | 877 | const u32 blanks = nv_rd32(priv, 0x610af4 + (head * 0x540)); |
808 | const int head = (mthd & NV50_DISP_MTHD_HEAD); | 878 | const u32 total = nv_rd32(priv, 0x610afc + (head * 0x540)); |
809 | u32 blanke, blanks, total; | 879 | union { |
880 | struct nv04_disp_scanoutpos_v0 v0; | ||
881 | } *args = data; | ||
882 | int ret; | ||
883 | |||
884 | nv_ioctl(object, "disp scanoutpos size %d\n", size); | ||
885 | if (nvif_unpack(args->v0, 0, 0, false)) { | ||
886 | nv_ioctl(object, "disp scanoutpos vers %d\n", args->v0.version); | ||
887 | args->v0.vblanke = (blanke & 0xffff0000) >> 16; | ||
888 | args->v0.hblanke = (blanke & 0x0000ffff); | ||
889 | args->v0.vblanks = (blanks & 0xffff0000) >> 16; | ||
890 | args->v0.hblanks = (blanks & 0x0000ffff); | ||
891 | args->v0.vtotal = ( total & 0xffff0000) >> 16; | ||
892 | args->v0.htotal = ( total & 0x0000ffff); | ||
893 | args->v0.time[0] = ktime_to_ns(ktime_get()); | ||
894 | args->v0.vline = /* vline read locks hline */ | ||
895 | nv_rd32(priv, 0x616340 + (head * 0x800)) & 0xffff; | ||
896 | args->v0.time[1] = ktime_to_ns(ktime_get()); | ||
897 | args->v0.hline = | ||
898 | nv_rd32(priv, 0x616344 + (head * 0x800)) & 0xffff; | ||
899 | } else | ||
900 | return ret; | ||
810 | 901 | ||
811 | if (size < sizeof(*args) || head >= priv->head.nr) | ||
812 | return -EINVAL; | ||
813 | blanke = nv_rd32(priv, 0x610aec + (head * 0x540)); | ||
814 | blanks = nv_rd32(priv, 0x610af4 + (head * 0x540)); | ||
815 | total = nv_rd32(priv, 0x610afc + (head * 0x540)); | ||
816 | |||
817 | args->vblanke = (blanke & 0xffff0000) >> 16; | ||
818 | args->hblanke = (blanke & 0x0000ffff); | ||
819 | args->vblanks = (blanks & 0xffff0000) >> 16; | ||
820 | args->hblanks = (blanks & 0x0000ffff); | ||
821 | args->vtotal = ( total & 0xffff0000) >> 16; | ||
822 | args->htotal = ( total & 0x0000ffff); | ||
823 | |||
824 | args->time[0] = ktime_to_ns(ktime_get()); | ||
825 | args->vline = nv_rd32(priv, 0x616340 + (head * 0x800)) & 0xffff; | ||
826 | args->time[1] = ktime_to_ns(ktime_get()); /* vline read locks hline */ | ||
827 | args->hline = nv_rd32(priv, 0x616344 + (head * 0x800)) & 0xffff; | ||
828 | return 0; | 902 | return 0; |
829 | } | 903 | } |
830 | 904 | ||
831 | static void | 905 | int |
832 | nv50_disp_base_vblank_enable(struct nouveau_event *event, int type, int head) | 906 | nv50_disp_base_mthd(struct nouveau_object *object, u32 mthd, |
907 | void *data, u32 size) | ||
833 | { | 908 | { |
834 | nv_mask(event->priv, 0x61002c, (4 << head), (4 << head)); | 909 | const struct nv50_disp_impl *impl = (void *)nv_oclass(object->engine); |
835 | } | 910 | union { |
911 | struct nv50_disp_mthd_v0 v0; | ||
912 | struct nv50_disp_mthd_v1 v1; | ||
913 | } *args = data; | ||
914 | struct nv50_disp_priv *priv = (void *)object->engine; | ||
915 | struct nvkm_output *outp = NULL; | ||
916 | struct nvkm_output *temp; | ||
917 | u16 type, mask = 0; | ||
918 | int head, ret; | ||
836 | 919 | ||
837 | static void | 920 | if (mthd != NV50_DISP_MTHD) |
838 | nv50_disp_base_vblank_disable(struct nouveau_event *event, int type, int head) | 921 | return -EINVAL; |
839 | { | 922 | |
840 | nv_mask(event->priv, 0x61002c, (4 << head), 0); | 923 | nv_ioctl(object, "disp mthd size %d\n", size); |
924 | if (nvif_unpack(args->v0, 0, 0, true)) { | ||
925 | nv_ioctl(object, "disp mthd vers %d mthd %02x head %d\n", | ||
926 | args->v0.version, args->v0.method, args->v0.head); | ||
927 | mthd = args->v0.method; | ||
928 | head = args->v0.head; | ||
929 | } else | ||
930 | if (nvif_unpack(args->v1, 1, 1, true)) { | ||
931 | nv_ioctl(object, "disp mthd vers %d mthd %02x " | ||
932 | "type %04x mask %04x\n", | ||
933 | args->v1.version, args->v1.method, | ||
934 | args->v1.hasht, args->v1.hashm); | ||
935 | mthd = args->v1.method; | ||
936 | type = args->v1.hasht; | ||
937 | mask = args->v1.hashm; | ||
938 | head = ffs((mask >> 8) & 0x0f) - 1; | ||
939 | } else | ||
940 | return ret; | ||
941 | |||
942 | if (head < 0 || head >= priv->head.nr) | ||
943 | return -ENXIO; | ||
944 | |||
945 | if (mask) { | ||
946 | list_for_each_entry(temp, &priv->base.outp, head) { | ||
947 | if ((temp->info.hasht == type) && | ||
948 | (temp->info.hashm & mask) == mask) { | ||
949 | outp = temp; | ||
950 | break; | ||
951 | } | ||
952 | } | ||
953 | if (outp == NULL) | ||
954 | return -ENXIO; | ||
955 | } | ||
956 | |||
957 | switch (mthd) { | ||
958 | case NV50_DISP_SCANOUTPOS: | ||
959 | return impl->head.scanoutpos(object, priv, data, size, head); | ||
960 | default: | ||
961 | break; | ||
962 | } | ||
963 | |||
964 | switch (mthd * !!outp) { | ||
965 | case NV50_DISP_MTHD_V1_DAC_PWR: | ||
966 | return priv->dac.power(object, priv, data, size, head, outp); | ||
967 | case NV50_DISP_MTHD_V1_DAC_LOAD: | ||
968 | return priv->dac.sense(object, priv, data, size, head, outp); | ||
969 | case NV50_DISP_MTHD_V1_SOR_PWR: | ||
970 | return priv->sor.power(object, priv, data, size, head, outp); | ||
971 | case NV50_DISP_MTHD_V1_SOR_HDA_ELD: | ||
972 | if (!priv->sor.hda_eld) | ||
973 | return -ENODEV; | ||
974 | return priv->sor.hda_eld(object, priv, data, size, head, outp); | ||
975 | case NV50_DISP_MTHD_V1_SOR_HDMI_PWR: | ||
976 | if (!priv->sor.hdmi) | ||
977 | return -ENODEV; | ||
978 | return priv->sor.hdmi(object, priv, data, size, head, outp); | ||
979 | case NV50_DISP_MTHD_V1_SOR_LVDS_SCRIPT: { | ||
980 | union { | ||
981 | struct nv50_disp_sor_lvds_script_v0 v0; | ||
982 | } *args = data; | ||
983 | nv_ioctl(object, "disp sor lvds script size %d\n", size); | ||
984 | if (nvif_unpack(args->v0, 0, 0, false)) { | ||
985 | nv_ioctl(object, "disp sor lvds script " | ||
986 | "vers %d name %04x\n", | ||
987 | args->v0.version, args->v0.script); | ||
988 | priv->sor.lvdsconf = args->v0.script; | ||
989 | return 0; | ||
990 | } else | ||
991 | return ret; | ||
992 | } | ||
993 | break; | ||
994 | case NV50_DISP_MTHD_V1_SOR_DP_PWR: { | ||
995 | struct nvkm_output_dp *outpdp = (void *)outp; | ||
996 | union { | ||
997 | struct nv50_disp_sor_dp_pwr_v0 v0; | ||
998 | } *args = data; | ||
999 | nv_ioctl(object, "disp sor dp pwr size %d\n", size); | ||
1000 | if (nvif_unpack(args->v0, 0, 0, false)) { | ||
1001 | nv_ioctl(object, "disp sor dp pwr vers %d state %d\n", | ||
1002 | args->v0.version, args->v0.state); | ||
1003 | if (args->v0.state == 0) { | ||
1004 | nvkm_notify_put(&outpdp->irq); | ||
1005 | ((struct nvkm_output_dp_impl *)nv_oclass(outp)) | ||
1006 | ->lnk_pwr(outpdp, 0); | ||
1007 | atomic_set(&outpdp->lt.done, 0); | ||
1008 | return 0; | ||
1009 | } else | ||
1010 | if (args->v0.state != 0) { | ||
1011 | nvkm_output_dp_train(&outpdp->base, 0, true); | ||
1012 | return 0; | ||
1013 | } | ||
1014 | } else | ||
1015 | return ret; | ||
1016 | } | ||
1017 | break; | ||
1018 | case NV50_DISP_MTHD_V1_PIOR_PWR: | ||
1019 | if (!priv->pior.power) | ||
1020 | return -ENODEV; | ||
1021 | return priv->pior.power(object, priv, data, size, head, outp); | ||
1022 | default: | ||
1023 | break; | ||
1024 | } | ||
1025 | |||
1026 | return -EINVAL; | ||
841 | } | 1027 | } |
842 | 1028 | ||
843 | static int | 1029 | int |
844 | nv50_disp_base_ctor(struct nouveau_object *parent, | 1030 | nv50_disp_base_ctor(struct nouveau_object *parent, |
845 | struct nouveau_object *engine, | 1031 | struct nouveau_object *engine, |
846 | struct nouveau_oclass *oclass, void *data, u32 size, | 1032 | struct nouveau_oclass *oclass, void *data, u32 size, |
@@ -856,14 +1042,11 @@ nv50_disp_base_ctor(struct nouveau_object *parent, | |||
856 | if (ret) | 1042 | if (ret) |
857 | return ret; | 1043 | return ret; |
858 | 1044 | ||
859 | priv->base.vblank->priv = priv; | ||
860 | priv->base.vblank->enable = nv50_disp_base_vblank_enable; | ||
861 | priv->base.vblank->disable = nv50_disp_base_vblank_disable; | ||
862 | return nouveau_ramht_new(nv_object(base), nv_object(base), 0x1000, 0, | 1045 | return nouveau_ramht_new(nv_object(base), nv_object(base), 0x1000, 0, |
863 | &base->ramht); | 1046 | &base->ramht); |
864 | } | 1047 | } |
865 | 1048 | ||
866 | static void | 1049 | void |
867 | nv50_disp_base_dtor(struct nouveau_object *object) | 1050 | nv50_disp_base_dtor(struct nouveau_object *object) |
868 | { | 1051 | { |
869 | struct nv50_disp_base *base = (void *)object; | 1052 | struct nv50_disp_base *base = (void *)object; |
@@ -958,34 +1141,23 @@ nv50_disp_base_ofuncs = { | |||
958 | .dtor = nv50_disp_base_dtor, | 1141 | .dtor = nv50_disp_base_dtor, |
959 | .init = nv50_disp_base_init, | 1142 | .init = nv50_disp_base_init, |
960 | .fini = nv50_disp_base_fini, | 1143 | .fini = nv50_disp_base_fini, |
961 | }; | 1144 | .mthd = nv50_disp_base_mthd, |
962 | 1145 | .ntfy = nouveau_disp_ntfy, | |
963 | static struct nouveau_omthds | ||
964 | nv50_disp_base_omthds[] = { | ||
965 | { HEAD_MTHD(NV50_DISP_SCANOUTPOS) , nv50_disp_base_scanoutpos }, | ||
966 | { SOR_MTHD(NV50_DISP_SOR_PWR) , nv50_sor_mthd }, | ||
967 | { SOR_MTHD(NV50_DISP_SOR_LVDS_SCRIPT) , nv50_sor_mthd }, | ||
968 | { DAC_MTHD(NV50_DISP_DAC_PWR) , nv50_dac_mthd }, | ||
969 | { DAC_MTHD(NV50_DISP_DAC_LOAD) , nv50_dac_mthd }, | ||
970 | { PIOR_MTHD(NV50_DISP_PIOR_PWR) , nv50_pior_mthd }, | ||
971 | { PIOR_MTHD(NV50_DISP_PIOR_TMDS_PWR) , nv50_pior_mthd }, | ||
972 | { PIOR_MTHD(NV50_DISP_PIOR_DP_PWR) , nv50_pior_mthd }, | ||
973 | {}, | ||
974 | }; | 1146 | }; |
975 | 1147 | ||
976 | static struct nouveau_oclass | 1148 | static struct nouveau_oclass |
977 | nv50_disp_base_oclass[] = { | 1149 | nv50_disp_base_oclass[] = { |
978 | { NV50_DISP_CLASS, &nv50_disp_base_ofuncs, nv50_disp_base_omthds }, | 1150 | { NV50_DISP, &nv50_disp_base_ofuncs }, |
979 | {} | 1151 | {} |
980 | }; | 1152 | }; |
981 | 1153 | ||
982 | static struct nouveau_oclass | 1154 | static struct nouveau_oclass |
983 | nv50_disp_sclass[] = { | 1155 | nv50_disp_sclass[] = { |
984 | { NV50_DISP_MAST_CLASS, &nv50_disp_mast_ofuncs }, | 1156 | { NV50_DISP_CORE_CHANNEL_DMA, &nv50_disp_mast_ofuncs.base }, |
985 | { NV50_DISP_SYNC_CLASS, &nv50_disp_sync_ofuncs }, | 1157 | { NV50_DISP_BASE_CHANNEL_DMA, &nv50_disp_sync_ofuncs.base }, |
986 | { NV50_DISP_OVLY_CLASS, &nv50_disp_ovly_ofuncs }, | 1158 | { NV50_DISP_OVERLAY_CHANNEL_DMA, &nv50_disp_ovly_ofuncs.base }, |
987 | { NV50_DISP_OIMM_CLASS, &nv50_disp_oimm_ofuncs }, | 1159 | { NV50_DISP_OVERLAY, &nv50_disp_oimm_ofuncs.base }, |
988 | { NV50_DISP_CURS_CLASS, &nv50_disp_curs_ofuncs }, | 1160 | { NV50_DISP_CURSOR, &nv50_disp_curs_ofuncs.base }, |
989 | {} | 1161 | {} |
990 | }; | 1162 | }; |
991 | 1163 | ||
@@ -1005,7 +1177,7 @@ nv50_disp_data_ctor(struct nouveau_object *parent, | |||
1005 | int ret = -EBUSY; | 1177 | int ret = -EBUSY; |
1006 | 1178 | ||
1007 | /* no context needed for channel objects... */ | 1179 | /* no context needed for channel objects... */ |
1008 | if (nv_mclass(parent) != NV_DEVICE_CLASS) { | 1180 | if (nv_mclass(parent) != NV_DEVICE) { |
1009 | atomic_inc(&parent->refcount); | 1181 | atomic_inc(&parent->refcount); |
1010 | *pobject = parent; | 1182 | *pobject = parent; |
1011 | return 1; | 1183 | return 1; |
@@ -1040,6 +1212,27 @@ nv50_disp_cclass = { | |||
1040 | * Display engine implementation | 1212 | * Display engine implementation |
1041 | ******************************************************************************/ | 1213 | ******************************************************************************/ |
1042 | 1214 | ||
1215 | static void | ||
1216 | nv50_disp_vblank_fini(struct nvkm_event *event, int type, int head) | ||
1217 | { | ||
1218 | struct nouveau_disp *disp = container_of(event, typeof(*disp), vblank); | ||
1219 | nv_mask(disp, 0x61002c, (4 << head), 0); | ||
1220 | } | ||
1221 | |||
1222 | static void | ||
1223 | nv50_disp_vblank_init(struct nvkm_event *event, int type, int head) | ||
1224 | { | ||
1225 | struct nouveau_disp *disp = container_of(event, typeof(*disp), vblank); | ||
1226 | nv_mask(disp, 0x61002c, (4 << head), (4 << head)); | ||
1227 | } | ||
1228 | |||
1229 | const struct nvkm_event_func | ||
1230 | nv50_disp_vblank_func = { | ||
1231 | .ctor = nouveau_disp_vblank_ctor, | ||
1232 | .init = nv50_disp_vblank_init, | ||
1233 | .fini = nv50_disp_vblank_fini, | ||
1234 | }; | ||
1235 | |||
1043 | static const struct nouveau_enum | 1236 | static const struct nouveau_enum |
1044 | nv50_disp_intr_error_type[] = { | 1237 | nv50_disp_intr_error_type[] = { |
1045 | { 3, "ILLEGAL_MTHD" }, | 1238 | { 3, "ILLEGAL_MTHD" }, |
@@ -1381,7 +1574,7 @@ nv50_disp_intr_unk20_2_dp(struct nv50_disp_priv *priv, | |||
1381 | int TU, VTUi, VTUf, VTUa; | 1574 | int TU, VTUi, VTUf, VTUa; |
1382 | u64 link_data_rate, link_ratio, unk; | 1575 | u64 link_data_rate, link_ratio, unk; |
1383 | u32 best_diff = 64 * symbol; | 1576 | u32 best_diff = 64 * symbol; |
1384 | u32 link_nr, link_bw, bits, r; | 1577 | u32 link_nr, link_bw, bits; |
1385 | 1578 | ||
1386 | /* calculate packed data rate for each lane */ | 1579 | /* calculate packed data rate for each lane */ |
1387 | if (dpctrl > 0x00030000) link_nr = 4; | 1580 | if (dpctrl > 0x00030000) link_nr = 4; |
@@ -1401,7 +1594,7 @@ nv50_disp_intr_unk20_2_dp(struct nv50_disp_priv *priv, | |||
1401 | 1594 | ||
1402 | /* calculate ratio of packed data rate to link symbol rate */ | 1595 | /* calculate ratio of packed data rate to link symbol rate */ |
1403 | link_ratio = link_data_rate * symbol; | 1596 | link_ratio = link_data_rate * symbol; |
1404 | r = do_div(link_ratio, link_bw); | 1597 | do_div(link_ratio, link_bw); |
1405 | 1598 | ||
1406 | for (TU = 64; TU >= 32; TU--) { | 1599 | for (TU = 64; TU >= 32; TU--) { |
1407 | /* calculate average number of valid symbols in each TU */ | 1600 | /* calculate average number of valid symbols in each TU */ |
@@ -1462,8 +1655,8 @@ nv50_disp_intr_unk20_2_dp(struct nv50_disp_priv *priv, | |||
1462 | /* XXX close to vbios numbers, but not right */ | 1655 | /* XXX close to vbios numbers, but not right */ |
1463 | unk = (symbol - link_ratio) * bestTU; | 1656 | unk = (symbol - link_ratio) * bestTU; |
1464 | unk *= link_ratio; | 1657 | unk *= link_ratio; |
1465 | r = do_div(unk, symbol); | 1658 | do_div(unk, symbol); |
1466 | r = do_div(unk, symbol); | 1659 | do_div(unk, symbol); |
1467 | unk += 6; | 1660 | unk += 6; |
1468 | 1661 | ||
1469 | nv_mask(priv, 0x61c10c + loff, 0x000001fc, bestTU << 2); | 1662 | nv_mask(priv, 0x61c10c + loff, 0x000001fc, bestTU << 2); |
@@ -1654,13 +1847,13 @@ nv50_disp_intr(struct nouveau_subdev *subdev) | |||
1654 | } | 1847 | } |
1655 | 1848 | ||
1656 | if (intr1 & 0x00000004) { | 1849 | if (intr1 & 0x00000004) { |
1657 | nouveau_event_trigger(priv->base.vblank, 1, 0); | 1850 | nouveau_disp_vblank(&priv->base, 0); |
1658 | nv_wr32(priv, 0x610024, 0x00000004); | 1851 | nv_wr32(priv, 0x610024, 0x00000004); |
1659 | intr1 &= ~0x00000004; | 1852 | intr1 &= ~0x00000004; |
1660 | } | 1853 | } |
1661 | 1854 | ||
1662 | if (intr1 & 0x00000008) { | 1855 | if (intr1 & 0x00000008) { |
1663 | nouveau_event_trigger(priv->base.vblank, 1, 1); | 1856 | nouveau_disp_vblank(&priv->base, 1); |
1664 | nv_wr32(priv, 0x610024, 0x00000008); | 1857 | nv_wr32(priv, 0x610024, 0x00000008); |
1665 | intr1 &= ~0x00000008; | 1858 | intr1 &= ~0x00000008; |
1666 | } | 1859 | } |
@@ -1718,9 +1911,11 @@ nv50_disp_oclass = &(struct nv50_disp_impl) { | |||
1718 | .init = _nouveau_disp_init, | 1911 | .init = _nouveau_disp_init, |
1719 | .fini = _nouveau_disp_fini, | 1912 | .fini = _nouveau_disp_fini, |
1720 | }, | 1913 | }, |
1914 | .base.vblank = &nv50_disp_vblank_func, | ||
1721 | .base.outp = nv50_disp_outp_sclass, | 1915 | .base.outp = nv50_disp_outp_sclass, |
1722 | .mthd.core = &nv50_disp_mast_mthd_chan, | 1916 | .mthd.core = &nv50_disp_mast_mthd_chan, |
1723 | .mthd.base = &nv50_disp_sync_mthd_chan, | 1917 | .mthd.base = &nv50_disp_sync_mthd_chan, |
1724 | .mthd.ovly = &nv50_disp_ovly_mthd_chan, | 1918 | .mthd.ovly = &nv50_disp_ovly_mthd_chan, |
1725 | .mthd.prev = 0x000004, | 1919 | .mthd.prev = 0x000004, |
1920 | .head.scanoutpos = nv50_disp_base_scanoutpos, | ||
1726 | }.base.base; | 1921 | }.base.base; |
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nv50.h b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.h index 1a886472b6f5..8ab14461f70c 100644 --- a/drivers/gpu/drm/nouveau/core/engine/disp/nv50.h +++ b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.h | |||
@@ -14,15 +14,10 @@ | |||
14 | #include "outp.h" | 14 | #include "outp.h" |
15 | #include "outpdp.h" | 15 | #include "outpdp.h" |
16 | 16 | ||
17 | struct nv50_disp_impl { | 17 | #define NV50_DISP_MTHD_ struct nouveau_object *object, \ |
18 | struct nouveau_disp_impl base; | 18 | struct nv50_disp_priv *priv, void *data, u32 size |
19 | struct { | 19 | #define NV50_DISP_MTHD_V0 NV50_DISP_MTHD_, int head |
20 | const struct nv50_disp_mthd_chan *core; | 20 | #define NV50_DISP_MTHD_V1 NV50_DISP_MTHD_, int head, struct nvkm_output *outp |
21 | const struct nv50_disp_mthd_chan *base; | ||
22 | const struct nv50_disp_mthd_chan *ovly; | ||
23 | int prev; | ||
24 | } mthd; | ||
25 | }; | ||
26 | 21 | ||
27 | struct nv50_disp_priv { | 22 | struct nv50_disp_priv { |
28 | struct nouveau_disp base; | 23 | struct nouveau_disp base; |
@@ -36,44 +31,52 @@ struct nv50_disp_priv { | |||
36 | } head; | 31 | } head; |
37 | struct { | 32 | struct { |
38 | int nr; | 33 | int nr; |
39 | int (*power)(struct nv50_disp_priv *, int dac, u32 data); | 34 | int (*power)(NV50_DISP_MTHD_V1); |
40 | int (*sense)(struct nv50_disp_priv *, int dac, u32 load); | 35 | int (*sense)(NV50_DISP_MTHD_V1); |
41 | } dac; | 36 | } dac; |
42 | struct { | 37 | struct { |
43 | int nr; | 38 | int nr; |
44 | int (*power)(struct nv50_disp_priv *, int sor, u32 data); | 39 | int (*power)(NV50_DISP_MTHD_V1); |
45 | int (*hda_eld)(struct nv50_disp_priv *, int sor, u8 *, u32); | 40 | int (*hda_eld)(NV50_DISP_MTHD_V1); |
46 | int (*hdmi)(struct nv50_disp_priv *, int head, int sor, u32); | 41 | int (*hdmi)(NV50_DISP_MTHD_V1); |
47 | u32 lvdsconf; | 42 | u32 lvdsconf; |
48 | } sor; | 43 | } sor; |
49 | struct { | 44 | struct { |
50 | int nr; | 45 | int nr; |
51 | int (*power)(struct nv50_disp_priv *, int ext, u32 data); | 46 | int (*power)(NV50_DISP_MTHD_V1); |
52 | u8 type[3]; | 47 | u8 type[3]; |
53 | } pior; | 48 | } pior; |
54 | }; | 49 | }; |
55 | 50 | ||
56 | #define HEAD_MTHD(n) (n), (n) + 0x03 | 51 | struct nv50_disp_impl { |
57 | 52 | struct nouveau_disp_impl base; | |
58 | int nv50_disp_base_scanoutpos(struct nouveau_object *, u32, void *, u32); | 53 | struct { |
54 | const struct nv50_disp_mthd_chan *core; | ||
55 | const struct nv50_disp_mthd_chan *base; | ||
56 | const struct nv50_disp_mthd_chan *ovly; | ||
57 | int prev; | ||
58 | } mthd; | ||
59 | struct { | ||
60 | int (*scanoutpos)(NV50_DISP_MTHD_V0); | ||
61 | } head; | ||
62 | }; | ||
59 | 63 | ||
60 | #define DAC_MTHD(n) (n), (n) + 0x03 | 64 | int nv50_disp_base_scanoutpos(NV50_DISP_MTHD_V0); |
65 | int nv50_disp_base_mthd(struct nouveau_object *, u32, void *, u32); | ||
61 | 66 | ||
62 | int nv50_dac_mthd(struct nouveau_object *, u32, void *, u32); | 67 | int nvd0_disp_base_scanoutpos(NV50_DISP_MTHD_V0); |
63 | int nv50_dac_power(struct nv50_disp_priv *, int, u32); | ||
64 | int nv50_dac_sense(struct nv50_disp_priv *, int, u32); | ||
65 | 68 | ||
66 | #define SOR_MTHD(n) (n), (n) + 0x3f | 69 | int nv50_dac_power(NV50_DISP_MTHD_V1); |
70 | int nv50_dac_sense(NV50_DISP_MTHD_V1); | ||
67 | 71 | ||
68 | int nva3_hda_eld(struct nv50_disp_priv *, int, u8 *, u32); | 72 | int nva3_hda_eld(NV50_DISP_MTHD_V1); |
69 | int nvd0_hda_eld(struct nv50_disp_priv *, int, u8 *, u32); | 73 | int nvd0_hda_eld(NV50_DISP_MTHD_V1); |
70 | 74 | ||
71 | int nv84_hdmi_ctrl(struct nv50_disp_priv *, int, int, u32); | 75 | int nv84_hdmi_ctrl(NV50_DISP_MTHD_V1); |
72 | int nva3_hdmi_ctrl(struct nv50_disp_priv *, int, int, u32); | 76 | int nva3_hdmi_ctrl(NV50_DISP_MTHD_V1); |
73 | int nvd0_hdmi_ctrl(struct nv50_disp_priv *, int, int, u32); | 77 | int nvd0_hdmi_ctrl(NV50_DISP_MTHD_V1); |
74 | 78 | ||
75 | int nv50_sor_mthd(struct nouveau_object *, u32, void *, u32); | 79 | int nv50_sor_power(NV50_DISP_MTHD_V1); |
76 | int nv50_sor_power(struct nv50_disp_priv *, int, u32); | ||
77 | 80 | ||
78 | int nv94_sor_dp_train_init(struct nv50_disp_priv *, int, int, int, u16, u16, | 81 | int nv94_sor_dp_train_init(struct nv50_disp_priv *, int, int, int, u16, u16, |
79 | u32, struct dcb_output *); | 82 | u32, struct dcb_output *); |
@@ -93,10 +96,7 @@ int nvd0_sor_dp_lnkctl(struct nv50_disp_priv *, int, int, int, u16, u16, u32, | |||
93 | int nvd0_sor_dp_drvctl(struct nv50_disp_priv *, int, int, int, u16, u16, u32, | 96 | int nvd0_sor_dp_drvctl(struct nv50_disp_priv *, int, int, int, u16, u16, u32, |
94 | struct dcb_output *); | 97 | struct dcb_output *); |
95 | 98 | ||
96 | #define PIOR_MTHD(n) (n), (n) + 0x03 | 99 | int nv50_pior_power(NV50_DISP_MTHD_V1); |
97 | |||
98 | int nv50_pior_mthd(struct nouveau_object *, u32, void *, u32); | ||
99 | int nv50_pior_power(struct nv50_disp_priv *, int, u32); | ||
100 | 100 | ||
101 | struct nv50_disp_base { | 101 | struct nv50_disp_base { |
102 | struct nouveau_parent base; | 102 | struct nouveau_parent base; |
@@ -104,14 +104,19 @@ struct nv50_disp_base { | |||
104 | u32 chan; | 104 | u32 chan; |
105 | }; | 105 | }; |
106 | 106 | ||
107 | struct nv50_disp_chan_impl { | ||
108 | struct nouveau_ofuncs base; | ||
109 | int chid; | ||
110 | int (*attach)(struct nouveau_object *, struct nouveau_object *, u32); | ||
111 | void (*detach)(struct nouveau_object *, int); | ||
112 | }; | ||
113 | |||
107 | struct nv50_disp_chan { | 114 | struct nv50_disp_chan { |
108 | struct nouveau_namedb base; | 115 | struct nouveau_namedb base; |
109 | int chid; | 116 | int chid; |
110 | }; | 117 | }; |
111 | 118 | ||
112 | int nv50_disp_chan_create_(struct nouveau_object *, struct nouveau_object *, | 119 | int nv50_disp_chan_map(struct nouveau_object *, u64 *, u32 *); |
113 | struct nouveau_oclass *, int, int, void **); | ||
114 | void nv50_disp_chan_destroy(struct nv50_disp_chan *); | ||
115 | u32 nv50_disp_chan_rd32(struct nouveau_object *, u64); | 120 | u32 nv50_disp_chan_rd32(struct nouveau_object *, u64); |
116 | void nv50_disp_chan_wr32(struct nouveau_object *, u64, u32); | 121 | void nv50_disp_chan_wr32(struct nouveau_object *, u64, u32); |
117 | 122 | ||
@@ -120,20 +125,20 @@ void nv50_disp_chan_wr32(struct nouveau_object *, u64, u32); | |||
120 | #define nv50_disp_chan_fini(a,b) \ | 125 | #define nv50_disp_chan_fini(a,b) \ |
121 | nouveau_namedb_fini(&(a)->base, (b)) | 126 | nouveau_namedb_fini(&(a)->base, (b)) |
122 | 127 | ||
123 | int nv50_disp_dmac_create_(struct nouveau_object *, struct nouveau_object *, | ||
124 | struct nouveau_oclass *, u32, int, int, void **); | ||
125 | void nv50_disp_dmac_dtor(struct nouveau_object *); | ||
126 | |||
127 | struct nv50_disp_dmac { | 128 | struct nv50_disp_dmac { |
128 | struct nv50_disp_chan base; | 129 | struct nv50_disp_chan base; |
129 | struct nouveau_dmaobj *pushdma; | 130 | struct nouveau_dmaobj *pushdma; |
130 | u32 push; | 131 | u32 push; |
131 | }; | 132 | }; |
132 | 133 | ||
134 | void nv50_disp_dmac_dtor(struct nouveau_object *); | ||
135 | |||
133 | struct nv50_disp_pioc { | 136 | struct nv50_disp_pioc { |
134 | struct nv50_disp_chan base; | 137 | struct nv50_disp_chan base; |
135 | }; | 138 | }; |
136 | 139 | ||
140 | void nv50_disp_pioc_dtor(struct nouveau_object *); | ||
141 | |||
137 | struct nv50_disp_mthd_list { | 142 | struct nv50_disp_mthd_list { |
138 | u32 mthd; | 143 | u32 mthd; |
139 | u32 addr; | 144 | u32 addr; |
@@ -154,47 +159,67 @@ struct nv50_disp_mthd_chan { | |||
154 | } data[]; | 159 | } data[]; |
155 | }; | 160 | }; |
156 | 161 | ||
157 | extern struct nouveau_ofuncs nv50_disp_mast_ofuncs; | 162 | extern struct nv50_disp_chan_impl nv50_disp_mast_ofuncs; |
163 | int nv50_disp_mast_ctor(struct nouveau_object *, struct nouveau_object *, | ||
164 | struct nouveau_oclass *, void *, u32, | ||
165 | struct nouveau_object **); | ||
158 | extern const struct nv50_disp_mthd_list nv50_disp_mast_mthd_base; | 166 | extern const struct nv50_disp_mthd_list nv50_disp_mast_mthd_base; |
159 | extern const struct nv50_disp_mthd_list nv50_disp_mast_mthd_sor; | 167 | extern const struct nv50_disp_mthd_list nv50_disp_mast_mthd_sor; |
160 | extern const struct nv50_disp_mthd_list nv50_disp_mast_mthd_pior; | 168 | extern const struct nv50_disp_mthd_list nv50_disp_mast_mthd_pior; |
161 | extern struct nouveau_ofuncs nv50_disp_sync_ofuncs; | 169 | extern struct nv50_disp_chan_impl nv50_disp_sync_ofuncs; |
170 | int nv50_disp_sync_ctor(struct nouveau_object *, struct nouveau_object *, | ||
171 | struct nouveau_oclass *, void *, u32, | ||
172 | struct nouveau_object **); | ||
162 | extern const struct nv50_disp_mthd_list nv50_disp_sync_mthd_image; | 173 | extern const struct nv50_disp_mthd_list nv50_disp_sync_mthd_image; |
163 | extern struct nouveau_ofuncs nv50_disp_ovly_ofuncs; | 174 | extern struct nv50_disp_chan_impl nv50_disp_ovly_ofuncs; |
175 | int nv50_disp_ovly_ctor(struct nouveau_object *, struct nouveau_object *, | ||
176 | struct nouveau_oclass *, void *, u32, | ||
177 | struct nouveau_object **); | ||
164 | extern const struct nv50_disp_mthd_list nv50_disp_ovly_mthd_base; | 178 | extern const struct nv50_disp_mthd_list nv50_disp_ovly_mthd_base; |
165 | extern struct nouveau_ofuncs nv50_disp_oimm_ofuncs; | 179 | extern struct nv50_disp_chan_impl nv50_disp_oimm_ofuncs; |
166 | extern struct nouveau_ofuncs nv50_disp_curs_ofuncs; | 180 | int nv50_disp_oimm_ctor(struct nouveau_object *, struct nouveau_object *, |
181 | struct nouveau_oclass *, void *, u32, | ||
182 | struct nouveau_object **); | ||
183 | extern struct nv50_disp_chan_impl nv50_disp_curs_ofuncs; | ||
184 | int nv50_disp_curs_ctor(struct nouveau_object *, struct nouveau_object *, | ||
185 | struct nouveau_oclass *, void *, u32, | ||
186 | struct nouveau_object **); | ||
167 | extern struct nouveau_ofuncs nv50_disp_base_ofuncs; | 187 | extern struct nouveau_ofuncs nv50_disp_base_ofuncs; |
188 | int nv50_disp_base_ctor(struct nouveau_object *, struct nouveau_object *, | ||
189 | struct nouveau_oclass *, void *, u32, | ||
190 | struct nouveau_object **); | ||
191 | void nv50_disp_base_dtor(struct nouveau_object *); | ||
192 | extern struct nouveau_omthds nv50_disp_base_omthds[]; | ||
168 | extern struct nouveau_oclass nv50_disp_cclass; | 193 | extern struct nouveau_oclass nv50_disp_cclass; |
169 | void nv50_disp_mthd_chan(struct nv50_disp_priv *, int debug, int head, | 194 | void nv50_disp_mthd_chan(struct nv50_disp_priv *, int debug, int head, |
170 | const struct nv50_disp_mthd_chan *); | 195 | const struct nv50_disp_mthd_chan *); |
171 | void nv50_disp_intr_supervisor(struct work_struct *); | 196 | void nv50_disp_intr_supervisor(struct work_struct *); |
172 | void nv50_disp_intr(struct nouveau_subdev *); | 197 | void nv50_disp_intr(struct nouveau_subdev *); |
198 | extern const struct nvkm_event_func nv50_disp_vblank_func; | ||
173 | 199 | ||
174 | extern const struct nv50_disp_mthd_chan nv84_disp_mast_mthd_chan; | 200 | extern const struct nv50_disp_mthd_chan nv84_disp_mast_mthd_chan; |
175 | extern const struct nv50_disp_mthd_list nv84_disp_mast_mthd_dac; | 201 | extern const struct nv50_disp_mthd_list nv84_disp_mast_mthd_dac; |
176 | extern const struct nv50_disp_mthd_list nv84_disp_mast_mthd_head; | 202 | extern const struct nv50_disp_mthd_list nv84_disp_mast_mthd_head; |
177 | extern const struct nv50_disp_mthd_chan nv84_disp_sync_mthd_chan; | 203 | extern const struct nv50_disp_mthd_chan nv84_disp_sync_mthd_chan; |
178 | extern const struct nv50_disp_mthd_chan nv84_disp_ovly_mthd_chan; | 204 | extern const struct nv50_disp_mthd_chan nv84_disp_ovly_mthd_chan; |
179 | extern struct nouveau_omthds nv84_disp_base_omthds[]; | ||
180 | 205 | ||
181 | extern const struct nv50_disp_mthd_chan nv94_disp_mast_mthd_chan; | 206 | extern const struct nv50_disp_mthd_chan nv94_disp_mast_mthd_chan; |
182 | 207 | ||
183 | extern struct nouveau_ofuncs nvd0_disp_mast_ofuncs; | 208 | extern struct nv50_disp_chan_impl nvd0_disp_mast_ofuncs; |
184 | extern const struct nv50_disp_mthd_list nvd0_disp_mast_mthd_base; | 209 | extern const struct nv50_disp_mthd_list nvd0_disp_mast_mthd_base; |
185 | extern const struct nv50_disp_mthd_list nvd0_disp_mast_mthd_dac; | 210 | extern const struct nv50_disp_mthd_list nvd0_disp_mast_mthd_dac; |
186 | extern const struct nv50_disp_mthd_list nvd0_disp_mast_mthd_sor; | 211 | extern const struct nv50_disp_mthd_list nvd0_disp_mast_mthd_sor; |
187 | extern const struct nv50_disp_mthd_list nvd0_disp_mast_mthd_pior; | 212 | extern const struct nv50_disp_mthd_list nvd0_disp_mast_mthd_pior; |
188 | extern struct nouveau_ofuncs nvd0_disp_sync_ofuncs; | 213 | extern struct nv50_disp_chan_impl nvd0_disp_sync_ofuncs; |
189 | extern struct nouveau_ofuncs nvd0_disp_ovly_ofuncs; | 214 | extern struct nv50_disp_chan_impl nvd0_disp_ovly_ofuncs; |
190 | extern const struct nv50_disp_mthd_chan nvd0_disp_sync_mthd_chan; | 215 | extern const struct nv50_disp_mthd_chan nvd0_disp_sync_mthd_chan; |
191 | extern struct nouveau_ofuncs nvd0_disp_oimm_ofuncs; | 216 | extern struct nv50_disp_chan_impl nvd0_disp_oimm_ofuncs; |
192 | extern struct nouveau_ofuncs nvd0_disp_curs_ofuncs; | 217 | extern struct nv50_disp_chan_impl nvd0_disp_curs_ofuncs; |
193 | extern struct nouveau_omthds nvd0_disp_base_omthds[]; | ||
194 | extern struct nouveau_ofuncs nvd0_disp_base_ofuncs; | 218 | extern struct nouveau_ofuncs nvd0_disp_base_ofuncs; |
195 | extern struct nouveau_oclass nvd0_disp_cclass; | 219 | extern struct nouveau_oclass nvd0_disp_cclass; |
196 | void nvd0_disp_intr_supervisor(struct work_struct *); | 220 | void nvd0_disp_intr_supervisor(struct work_struct *); |
197 | void nvd0_disp_intr(struct nouveau_subdev *); | 221 | void nvd0_disp_intr(struct nouveau_subdev *); |
222 | extern const struct nvkm_event_func nvd0_disp_vblank_func; | ||
198 | 223 | ||
199 | extern const struct nv50_disp_mthd_chan nve0_disp_mast_mthd_chan; | 224 | extern const struct nv50_disp_mthd_chan nve0_disp_mast_mthd_chan; |
200 | extern const struct nv50_disp_mthd_chan nve0_disp_ovly_mthd_chan; | 225 | extern const struct nv50_disp_mthd_chan nve0_disp_ovly_mthd_chan; |
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nv84.c b/drivers/gpu/drm/nouveau/core/engine/disp/nv84.c index 1cc62e434683..788ced1b6182 100644 --- a/drivers/gpu/drm/nouveau/core/engine/disp/nv84.c +++ b/drivers/gpu/drm/nouveau/core/engine/disp/nv84.c | |||
@@ -25,7 +25,7 @@ | |||
25 | #include <engine/software.h> | 25 | #include <engine/software.h> |
26 | #include <engine/disp.h> | 26 | #include <engine/disp.h> |
27 | 27 | ||
28 | #include <core/class.h> | 28 | #include <nvif/class.h> |
29 | 29 | ||
30 | #include "nv50.h" | 30 | #include "nv50.h" |
31 | 31 | ||
@@ -204,31 +204,17 @@ nv84_disp_ovly_mthd_chan = { | |||
204 | 204 | ||
205 | static struct nouveau_oclass | 205 | static struct nouveau_oclass |
206 | nv84_disp_sclass[] = { | 206 | nv84_disp_sclass[] = { |
207 | { NV84_DISP_MAST_CLASS, &nv50_disp_mast_ofuncs }, | 207 | { G82_DISP_CORE_CHANNEL_DMA, &nv50_disp_mast_ofuncs.base }, |
208 | { NV84_DISP_SYNC_CLASS, &nv50_disp_sync_ofuncs }, | 208 | { G82_DISP_BASE_CHANNEL_DMA, &nv50_disp_sync_ofuncs.base }, |
209 | { NV84_DISP_OVLY_CLASS, &nv50_disp_ovly_ofuncs }, | 209 | { G82_DISP_OVERLAY_CHANNEL_DMA, &nv50_disp_ovly_ofuncs.base }, |
210 | { NV84_DISP_OIMM_CLASS, &nv50_disp_oimm_ofuncs }, | 210 | { G82_DISP_OVERLAY, &nv50_disp_oimm_ofuncs.base }, |
211 | { NV84_DISP_CURS_CLASS, &nv50_disp_curs_ofuncs }, | 211 | { G82_DISP_CURSOR, &nv50_disp_curs_ofuncs.base }, |
212 | {} | 212 | {} |
213 | }; | 213 | }; |
214 | 214 | ||
215 | struct nouveau_omthds | ||
216 | nv84_disp_base_omthds[] = { | ||
217 | { HEAD_MTHD(NV50_DISP_SCANOUTPOS) , nv50_disp_base_scanoutpos }, | ||
218 | { SOR_MTHD(NV50_DISP_SOR_PWR) , nv50_sor_mthd }, | ||
219 | { SOR_MTHD(NV84_DISP_SOR_HDMI_PWR) , nv50_sor_mthd }, | ||
220 | { SOR_MTHD(NV50_DISP_SOR_LVDS_SCRIPT) , nv50_sor_mthd }, | ||
221 | { DAC_MTHD(NV50_DISP_DAC_PWR) , nv50_dac_mthd }, | ||
222 | { DAC_MTHD(NV50_DISP_DAC_LOAD) , nv50_dac_mthd }, | ||
223 | { PIOR_MTHD(NV50_DISP_PIOR_PWR) , nv50_pior_mthd }, | ||
224 | { PIOR_MTHD(NV50_DISP_PIOR_TMDS_PWR) , nv50_pior_mthd }, | ||
225 | { PIOR_MTHD(NV50_DISP_PIOR_DP_PWR) , nv50_pior_mthd }, | ||
226 | {}, | ||
227 | }; | ||
228 | |||
229 | static struct nouveau_oclass | 215 | static struct nouveau_oclass |
230 | nv84_disp_base_oclass[] = { | 216 | nv84_disp_base_oclass[] = { |
231 | { NV84_DISP_CLASS, &nv50_disp_base_ofuncs, nv84_disp_base_omthds }, | 217 | { G82_DISP, &nv50_disp_base_ofuncs }, |
232 | {} | 218 | {} |
233 | }; | 219 | }; |
234 | 220 | ||
@@ -276,9 +262,11 @@ nv84_disp_oclass = &(struct nv50_disp_impl) { | |||
276 | .init = _nouveau_disp_init, | 262 | .init = _nouveau_disp_init, |
277 | .fini = _nouveau_disp_fini, | 263 | .fini = _nouveau_disp_fini, |
278 | }, | 264 | }, |
265 | .base.vblank = &nv50_disp_vblank_func, | ||
279 | .base.outp = nv50_disp_outp_sclass, | 266 | .base.outp = nv50_disp_outp_sclass, |
280 | .mthd.core = &nv84_disp_mast_mthd_chan, | 267 | .mthd.core = &nv84_disp_mast_mthd_chan, |
281 | .mthd.base = &nv84_disp_sync_mthd_chan, | 268 | .mthd.base = &nv84_disp_sync_mthd_chan, |
282 | .mthd.ovly = &nv84_disp_ovly_mthd_chan, | 269 | .mthd.ovly = &nv84_disp_ovly_mthd_chan, |
283 | .mthd.prev = 0x000004, | 270 | .mthd.prev = 0x000004, |
271 | .head.scanoutpos = nv50_disp_base_scanoutpos, | ||
284 | }.base.base; | 272 | }.base.base; |
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nv94.c b/drivers/gpu/drm/nouveau/core/engine/disp/nv94.c index 4f718a9f5aef..fa79de906eae 100644 --- a/drivers/gpu/drm/nouveau/core/engine/disp/nv94.c +++ b/drivers/gpu/drm/nouveau/core/engine/disp/nv94.c | |||
@@ -25,7 +25,7 @@ | |||
25 | #include <engine/software.h> | 25 | #include <engine/software.h> |
26 | #include <engine/disp.h> | 26 | #include <engine/disp.h> |
27 | 27 | ||
28 | #include <core/class.h> | 28 | #include <nvif/class.h> |
29 | 29 | ||
30 | #include "nv50.h" | 30 | #include "nv50.h" |
31 | 31 | ||
@@ -63,32 +63,17 @@ nv94_disp_mast_mthd_chan = { | |||
63 | 63 | ||
64 | static struct nouveau_oclass | 64 | static struct nouveau_oclass |
65 | nv94_disp_sclass[] = { | 65 | nv94_disp_sclass[] = { |
66 | { NV94_DISP_MAST_CLASS, &nv50_disp_mast_ofuncs }, | 66 | { GT206_DISP_CORE_CHANNEL_DMA, &nv50_disp_mast_ofuncs.base }, |
67 | { NV94_DISP_SYNC_CLASS, &nv50_disp_sync_ofuncs }, | 67 | { GT200_DISP_BASE_CHANNEL_DMA, &nv50_disp_sync_ofuncs.base }, |
68 | { NV94_DISP_OVLY_CLASS, &nv50_disp_ovly_ofuncs }, | 68 | { GT200_DISP_OVERLAY_CHANNEL_DMA, &nv50_disp_ovly_ofuncs.base }, |
69 | { NV94_DISP_OIMM_CLASS, &nv50_disp_oimm_ofuncs }, | 69 | { G82_DISP_OVERLAY, &nv50_disp_oimm_ofuncs.base }, |
70 | { NV94_DISP_CURS_CLASS, &nv50_disp_curs_ofuncs }, | 70 | { G82_DISP_CURSOR, &nv50_disp_curs_ofuncs.base }, |
71 | {} | 71 | {} |
72 | }; | 72 | }; |
73 | 73 | ||
74 | static struct nouveau_omthds | ||
75 | nv94_disp_base_omthds[] = { | ||
76 | { HEAD_MTHD(NV50_DISP_SCANOUTPOS) , nv50_disp_base_scanoutpos }, | ||
77 | { SOR_MTHD(NV50_DISP_SOR_PWR) , nv50_sor_mthd }, | ||
78 | { SOR_MTHD(NV84_DISP_SOR_HDMI_PWR) , nv50_sor_mthd }, | ||
79 | { SOR_MTHD(NV50_DISP_SOR_LVDS_SCRIPT) , nv50_sor_mthd }, | ||
80 | { SOR_MTHD(NV94_DISP_SOR_DP_PWR) , nv50_sor_mthd }, | ||
81 | { DAC_MTHD(NV50_DISP_DAC_PWR) , nv50_dac_mthd }, | ||
82 | { DAC_MTHD(NV50_DISP_DAC_LOAD) , nv50_dac_mthd }, | ||
83 | { PIOR_MTHD(NV50_DISP_PIOR_PWR) , nv50_pior_mthd }, | ||
84 | { PIOR_MTHD(NV50_DISP_PIOR_TMDS_PWR) , nv50_pior_mthd }, | ||
85 | { PIOR_MTHD(NV50_DISP_PIOR_DP_PWR) , nv50_pior_mthd }, | ||
86 | {}, | ||
87 | }; | ||
88 | |||
89 | static struct nouveau_oclass | 74 | static struct nouveau_oclass |
90 | nv94_disp_base_oclass[] = { | 75 | nv94_disp_base_oclass[] = { |
91 | { NV94_DISP_CLASS, &nv50_disp_base_ofuncs, nv94_disp_base_omthds }, | 76 | { GT206_DISP, &nv50_disp_base_ofuncs }, |
92 | {} | 77 | {} |
93 | }; | 78 | }; |
94 | 79 | ||
@@ -143,9 +128,11 @@ nv94_disp_oclass = &(struct nv50_disp_impl) { | |||
143 | .init = _nouveau_disp_init, | 128 | .init = _nouveau_disp_init, |
144 | .fini = _nouveau_disp_fini, | 129 | .fini = _nouveau_disp_fini, |
145 | }, | 130 | }, |
131 | .base.vblank = &nv50_disp_vblank_func, | ||
146 | .base.outp = nv94_disp_outp_sclass, | 132 | .base.outp = nv94_disp_outp_sclass, |
147 | .mthd.core = &nv94_disp_mast_mthd_chan, | 133 | .mthd.core = &nv94_disp_mast_mthd_chan, |
148 | .mthd.base = &nv84_disp_sync_mthd_chan, | 134 | .mthd.base = &nv84_disp_sync_mthd_chan, |
149 | .mthd.ovly = &nv84_disp_ovly_mthd_chan, | 135 | .mthd.ovly = &nv84_disp_ovly_mthd_chan, |
150 | .mthd.prev = 0x000004, | 136 | .mthd.prev = 0x000004, |
137 | .head.scanoutpos = nv50_disp_base_scanoutpos, | ||
151 | }.base.base; | 138 | }.base.base; |
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nva0.c b/drivers/gpu/drm/nouveau/core/engine/disp/nva0.c index 6237a9a36f70..7af15f5d48dc 100644 --- a/drivers/gpu/drm/nouveau/core/engine/disp/nva0.c +++ b/drivers/gpu/drm/nouveau/core/engine/disp/nva0.c | |||
@@ -25,7 +25,7 @@ | |||
25 | #include <engine/software.h> | 25 | #include <engine/software.h> |
26 | #include <engine/disp.h> | 26 | #include <engine/disp.h> |
27 | 27 | ||
28 | #include <core/class.h> | 28 | #include <nvif/class.h> |
29 | 29 | ||
30 | #include "nv50.h" | 30 | #include "nv50.h" |
31 | 31 | ||
@@ -80,17 +80,17 @@ nva0_disp_ovly_mthd_chan = { | |||
80 | 80 | ||
81 | static struct nouveau_oclass | 81 | static struct nouveau_oclass |
82 | nva0_disp_sclass[] = { | 82 | nva0_disp_sclass[] = { |
83 | { NVA0_DISP_MAST_CLASS, &nv50_disp_mast_ofuncs }, | 83 | { GT200_DISP_CORE_CHANNEL_DMA, &nv50_disp_mast_ofuncs.base }, |
84 | { NVA0_DISP_SYNC_CLASS, &nv50_disp_sync_ofuncs }, | 84 | { GT200_DISP_BASE_CHANNEL_DMA, &nv50_disp_sync_ofuncs.base }, |
85 | { NVA0_DISP_OVLY_CLASS, &nv50_disp_ovly_ofuncs }, | 85 | { GT200_DISP_OVERLAY_CHANNEL_DMA, &nv50_disp_ovly_ofuncs.base }, |
86 | { NVA0_DISP_OIMM_CLASS, &nv50_disp_oimm_ofuncs }, | 86 | { G82_DISP_OVERLAY, &nv50_disp_oimm_ofuncs.base }, |
87 | { NVA0_DISP_CURS_CLASS, &nv50_disp_curs_ofuncs }, | 87 | { G82_DISP_CURSOR, &nv50_disp_curs_ofuncs.base }, |
88 | {} | 88 | {} |
89 | }; | 89 | }; |
90 | 90 | ||
91 | static struct nouveau_oclass | 91 | static struct nouveau_oclass |
92 | nva0_disp_base_oclass[] = { | 92 | nva0_disp_base_oclass[] = { |
93 | { NVA0_DISP_CLASS, &nv50_disp_base_ofuncs, nv84_disp_base_omthds }, | 93 | { GT200_DISP, &nv50_disp_base_ofuncs }, |
94 | {} | 94 | {} |
95 | }; | 95 | }; |
96 | 96 | ||
@@ -138,9 +138,11 @@ nva0_disp_oclass = &(struct nv50_disp_impl) { | |||
138 | .init = _nouveau_disp_init, | 138 | .init = _nouveau_disp_init, |
139 | .fini = _nouveau_disp_fini, | 139 | .fini = _nouveau_disp_fini, |
140 | }, | 140 | }, |
141 | .base.vblank = &nv50_disp_vblank_func, | ||
141 | .base.outp = nv50_disp_outp_sclass, | 142 | .base.outp = nv50_disp_outp_sclass, |
142 | .mthd.core = &nv84_disp_mast_mthd_chan, | 143 | .mthd.core = &nv84_disp_mast_mthd_chan, |
143 | .mthd.base = &nv84_disp_sync_mthd_chan, | 144 | .mthd.base = &nv84_disp_sync_mthd_chan, |
144 | .mthd.ovly = &nva0_disp_ovly_mthd_chan, | 145 | .mthd.ovly = &nva0_disp_ovly_mthd_chan, |
145 | .mthd.prev = 0x000004, | 146 | .mthd.prev = 0x000004, |
147 | .head.scanoutpos = nv50_disp_base_scanoutpos, | ||
146 | }.base.base; | 148 | }.base.base; |
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nva3.c b/drivers/gpu/drm/nouveau/core/engine/disp/nva3.c index 019124d4782b..6bd39448f8da 100644 --- a/drivers/gpu/drm/nouveau/core/engine/disp/nva3.c +++ b/drivers/gpu/drm/nouveau/core/engine/disp/nva3.c | |||
@@ -25,7 +25,7 @@ | |||
25 | #include <engine/software.h> | 25 | #include <engine/software.h> |
26 | #include <engine/disp.h> | 26 | #include <engine/disp.h> |
27 | 27 | ||
28 | #include <core/class.h> | 28 | #include <nvif/class.h> |
29 | 29 | ||
30 | #include "nv50.h" | 30 | #include "nv50.h" |
31 | 31 | ||
@@ -35,33 +35,17 @@ | |||
35 | 35 | ||
36 | static struct nouveau_oclass | 36 | static struct nouveau_oclass |
37 | nva3_disp_sclass[] = { | 37 | nva3_disp_sclass[] = { |
38 | { NVA3_DISP_MAST_CLASS, &nv50_disp_mast_ofuncs }, | 38 | { GT214_DISP_CORE_CHANNEL_DMA, &nv50_disp_mast_ofuncs.base }, |
39 | { NVA3_DISP_SYNC_CLASS, &nv50_disp_sync_ofuncs }, | 39 | { GT214_DISP_BASE_CHANNEL_DMA, &nv50_disp_sync_ofuncs.base }, |
40 | { NVA3_DISP_OVLY_CLASS, &nv50_disp_ovly_ofuncs }, | 40 | { GT214_DISP_OVERLAY_CHANNEL_DMA, &nv50_disp_ovly_ofuncs.base }, |
41 | { NVA3_DISP_OIMM_CLASS, &nv50_disp_oimm_ofuncs }, | 41 | { GT214_DISP_OVERLAY, &nv50_disp_oimm_ofuncs.base }, |
42 | { NVA3_DISP_CURS_CLASS, &nv50_disp_curs_ofuncs }, | 42 | { GT214_DISP_CURSOR, &nv50_disp_curs_ofuncs.base }, |
43 | {} | 43 | {} |
44 | }; | 44 | }; |
45 | 45 | ||
46 | static struct nouveau_omthds | ||
47 | nva3_disp_base_omthds[] = { | ||
48 | { HEAD_MTHD(NV50_DISP_SCANOUTPOS) , nv50_disp_base_scanoutpos }, | ||
49 | { SOR_MTHD(NV50_DISP_SOR_PWR) , nv50_sor_mthd }, | ||
50 | { SOR_MTHD(NVA3_DISP_SOR_HDA_ELD) , nv50_sor_mthd }, | ||
51 | { SOR_MTHD(NV84_DISP_SOR_HDMI_PWR) , nv50_sor_mthd }, | ||
52 | { SOR_MTHD(NV50_DISP_SOR_LVDS_SCRIPT) , nv50_sor_mthd }, | ||
53 | { SOR_MTHD(NV94_DISP_SOR_DP_PWR) , nv50_sor_mthd }, | ||
54 | { DAC_MTHD(NV50_DISP_DAC_PWR) , nv50_dac_mthd }, | ||
55 | { DAC_MTHD(NV50_DISP_DAC_LOAD) , nv50_dac_mthd }, | ||
56 | { PIOR_MTHD(NV50_DISP_PIOR_PWR) , nv50_pior_mthd }, | ||
57 | { PIOR_MTHD(NV50_DISP_PIOR_TMDS_PWR) , nv50_pior_mthd }, | ||
58 | { PIOR_MTHD(NV50_DISP_PIOR_DP_PWR) , nv50_pior_mthd }, | ||
59 | {}, | ||
60 | }; | ||
61 | |||
62 | static struct nouveau_oclass | 46 | static struct nouveau_oclass |
63 | nva3_disp_base_oclass[] = { | 47 | nva3_disp_base_oclass[] = { |
64 | { NVA3_DISP_CLASS, &nv50_disp_base_ofuncs, nva3_disp_base_omthds }, | 48 | { GT214_DISP, &nv50_disp_base_ofuncs }, |
65 | {} | 49 | {} |
66 | }; | 50 | }; |
67 | 51 | ||
@@ -110,9 +94,11 @@ nva3_disp_oclass = &(struct nv50_disp_impl) { | |||
110 | .init = _nouveau_disp_init, | 94 | .init = _nouveau_disp_init, |
111 | .fini = _nouveau_disp_fini, | 95 | .fini = _nouveau_disp_fini, |
112 | }, | 96 | }, |
97 | .base.vblank = &nv50_disp_vblank_func, | ||
113 | .base.outp = nv94_disp_outp_sclass, | 98 | .base.outp = nv94_disp_outp_sclass, |
114 | .mthd.core = &nv94_disp_mast_mthd_chan, | 99 | .mthd.core = &nv94_disp_mast_mthd_chan, |
115 | .mthd.base = &nv84_disp_sync_mthd_chan, | 100 | .mthd.base = &nv84_disp_sync_mthd_chan, |
116 | .mthd.ovly = &nv84_disp_ovly_mthd_chan, | 101 | .mthd.ovly = &nv84_disp_ovly_mthd_chan, |
117 | .mthd.prev = 0x000004, | 102 | .mthd.prev = 0x000004, |
103 | .head.scanoutpos = nv50_disp_base_scanoutpos, | ||
118 | }.base.base; | 104 | }.base.base; |
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c b/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c index fa30d8196f35..a4bb3c774ee1 100644 --- a/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c +++ b/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c | |||
@@ -23,9 +23,11 @@ | |||
23 | */ | 23 | */ |
24 | 24 | ||
25 | #include <core/object.h> | 25 | #include <core/object.h> |
26 | #include <core/client.h> | ||
26 | #include <core/parent.h> | 27 | #include <core/parent.h> |
27 | #include <core/handle.h> | 28 | #include <core/handle.h> |
28 | #include <core/class.h> | 29 | #include <nvif/unpack.h> |
30 | #include <nvif/class.h> | ||
29 | 31 | ||
30 | #include <engine/disp.h> | 32 | #include <engine/disp.h> |
31 | 33 | ||
@@ -265,30 +267,6 @@ nvd0_disp_mast_mthd_chan = { | |||
265 | }; | 267 | }; |
266 | 268 | ||
267 | static int | 269 | static int |
268 | nvd0_disp_mast_ctor(struct nouveau_object *parent, | ||
269 | struct nouveau_object *engine, | ||
270 | struct nouveau_oclass *oclass, void *data, u32 size, | ||
271 | struct nouveau_object **pobject) | ||
272 | { | ||
273 | struct nv50_display_mast_class *args = data; | ||
274 | struct nv50_disp_dmac *mast; | ||
275 | int ret; | ||
276 | |||
277 | if (size < sizeof(*args)) | ||
278 | return -EINVAL; | ||
279 | |||
280 | ret = nv50_disp_dmac_create_(parent, engine, oclass, args->pushbuf, | ||
281 | 0, sizeof(*mast), (void **)&mast); | ||
282 | *pobject = nv_object(mast); | ||
283 | if (ret) | ||
284 | return ret; | ||
285 | |||
286 | nv_parent(mast)->object_attach = nvd0_disp_dmac_object_attach; | ||
287 | nv_parent(mast)->object_detach = nvd0_disp_dmac_object_detach; | ||
288 | return 0; | ||
289 | } | ||
290 | |||
291 | static int | ||
292 | nvd0_disp_mast_init(struct nouveau_object *object) | 270 | nvd0_disp_mast_init(struct nouveau_object *object) |
293 | { | 271 | { |
294 | struct nv50_disp_priv *priv = (void *)object->engine; | 272 | struct nv50_disp_priv *priv = (void *)object->engine; |
@@ -342,14 +320,18 @@ nvd0_disp_mast_fini(struct nouveau_object *object, bool suspend) | |||
342 | return nv50_disp_chan_fini(&mast->base, suspend); | 320 | return nv50_disp_chan_fini(&mast->base, suspend); |
343 | } | 321 | } |
344 | 322 | ||
345 | struct nouveau_ofuncs | 323 | struct nv50_disp_chan_impl |
346 | nvd0_disp_mast_ofuncs = { | 324 | nvd0_disp_mast_ofuncs = { |
347 | .ctor = nvd0_disp_mast_ctor, | 325 | .base.ctor = nv50_disp_mast_ctor, |
348 | .dtor = nv50_disp_dmac_dtor, | 326 | .base.dtor = nv50_disp_dmac_dtor, |
349 | .init = nvd0_disp_mast_init, | 327 | .base.init = nvd0_disp_mast_init, |
350 | .fini = nvd0_disp_mast_fini, | 328 | .base.fini = nvd0_disp_mast_fini, |
351 | .rd32 = nv50_disp_chan_rd32, | 329 | .base.map = nv50_disp_chan_map, |
352 | .wr32 = nv50_disp_chan_wr32, | 330 | .base.rd32 = nv50_disp_chan_rd32, |
331 | .base.wr32 = nv50_disp_chan_wr32, | ||
332 | .chid = 0, | ||
333 | .attach = nvd0_disp_dmac_object_attach, | ||
334 | .detach = nvd0_disp_dmac_object_detach, | ||
353 | }; | 335 | }; |
354 | 336 | ||
355 | /******************************************************************************* | 337 | /******************************************************************************* |
@@ -431,40 +413,18 @@ nvd0_disp_sync_mthd_chan = { | |||
431 | } | 413 | } |
432 | }; | 414 | }; |
433 | 415 | ||
434 | static int | 416 | struct nv50_disp_chan_impl |
435 | nvd0_disp_sync_ctor(struct nouveau_object *parent, | ||
436 | struct nouveau_object *engine, | ||
437 | struct nouveau_oclass *oclass, void *data, u32 size, | ||
438 | struct nouveau_object **pobject) | ||
439 | { | ||
440 | struct nv50_display_sync_class *args = data; | ||
441 | struct nv50_disp_priv *priv = (void *)engine; | ||
442 | struct nv50_disp_dmac *dmac; | ||
443 | int ret; | ||
444 | |||
445 | if (size < sizeof(*args) || args->head >= priv->head.nr) | ||
446 | return -EINVAL; | ||
447 | |||
448 | ret = nv50_disp_dmac_create_(parent, engine, oclass, args->pushbuf, | ||
449 | 1 + args->head, sizeof(*dmac), | ||
450 | (void **)&dmac); | ||
451 | *pobject = nv_object(dmac); | ||
452 | if (ret) | ||
453 | return ret; | ||
454 | |||
455 | nv_parent(dmac)->object_attach = nvd0_disp_dmac_object_attach; | ||
456 | nv_parent(dmac)->object_detach = nvd0_disp_dmac_object_detach; | ||
457 | return 0; | ||
458 | } | ||
459 | |||
460 | struct nouveau_ofuncs | ||
461 | nvd0_disp_sync_ofuncs = { | 417 | nvd0_disp_sync_ofuncs = { |
462 | .ctor = nvd0_disp_sync_ctor, | 418 | .base.ctor = nv50_disp_sync_ctor, |
463 | .dtor = nv50_disp_dmac_dtor, | 419 | .base.dtor = nv50_disp_dmac_dtor, |
464 | .init = nvd0_disp_dmac_init, | 420 | .base.init = nvd0_disp_dmac_init, |
465 | .fini = nvd0_disp_dmac_fini, | 421 | .base.fini = nvd0_disp_dmac_fini, |
466 | .rd32 = nv50_disp_chan_rd32, | 422 | .base.map = nv50_disp_chan_map, |
467 | .wr32 = nv50_disp_chan_wr32, | 423 | .base.rd32 = nv50_disp_chan_rd32, |
424 | .base.wr32 = nv50_disp_chan_wr32, | ||
425 | .chid = 1, | ||
426 | .attach = nvd0_disp_dmac_object_attach, | ||
427 | .detach = nvd0_disp_dmac_object_detach, | ||
468 | }; | 428 | }; |
469 | 429 | ||
470 | /******************************************************************************* | 430 | /******************************************************************************* |
@@ -533,40 +493,18 @@ nvd0_disp_ovly_mthd_chan = { | |||
533 | } | 493 | } |
534 | }; | 494 | }; |
535 | 495 | ||
536 | static int | 496 | struct nv50_disp_chan_impl |
537 | nvd0_disp_ovly_ctor(struct nouveau_object *parent, | ||
538 | struct nouveau_object *engine, | ||
539 | struct nouveau_oclass *oclass, void *data, u32 size, | ||
540 | struct nouveau_object **pobject) | ||
541 | { | ||
542 | struct nv50_display_ovly_class *args = data; | ||
543 | struct nv50_disp_priv *priv = (void *)engine; | ||
544 | struct nv50_disp_dmac *dmac; | ||
545 | int ret; | ||
546 | |||
547 | if (size < sizeof(*args) || args->head >= priv->head.nr) | ||
548 | return -EINVAL; | ||
549 | |||
550 | ret = nv50_disp_dmac_create_(parent, engine, oclass, args->pushbuf, | ||
551 | 5 + args->head, sizeof(*dmac), | ||
552 | (void **)&dmac); | ||
553 | *pobject = nv_object(dmac); | ||
554 | if (ret) | ||
555 | return ret; | ||
556 | |||
557 | nv_parent(dmac)->object_attach = nvd0_disp_dmac_object_attach; | ||
558 | nv_parent(dmac)->object_detach = nvd0_disp_dmac_object_detach; | ||
559 | return 0; | ||
560 | } | ||
561 | |||
562 | struct nouveau_ofuncs | ||
563 | nvd0_disp_ovly_ofuncs = { | 497 | nvd0_disp_ovly_ofuncs = { |
564 | .ctor = nvd0_disp_ovly_ctor, | 498 | .base.ctor = nv50_disp_ovly_ctor, |
565 | .dtor = nv50_disp_dmac_dtor, | 499 | .base.dtor = nv50_disp_dmac_dtor, |
566 | .init = nvd0_disp_dmac_init, | 500 | .base.init = nvd0_disp_dmac_init, |
567 | .fini = nvd0_disp_dmac_fini, | 501 | .base.fini = nvd0_disp_dmac_fini, |
568 | .rd32 = nv50_disp_chan_rd32, | 502 | .base.map = nv50_disp_chan_map, |
569 | .wr32 = nv50_disp_chan_wr32, | 503 | .base.rd32 = nv50_disp_chan_rd32, |
504 | .base.wr32 = nv50_disp_chan_wr32, | ||
505 | .chid = 5, | ||
506 | .attach = nvd0_disp_dmac_object_attach, | ||
507 | .detach = nvd0_disp_dmac_object_detach, | ||
570 | }; | 508 | }; |
571 | 509 | ||
572 | /******************************************************************************* | 510 | /******************************************************************************* |
@@ -574,23 +512,6 @@ nvd0_disp_ovly_ofuncs = { | |||
574 | ******************************************************************************/ | 512 | ******************************************************************************/ |
575 | 513 | ||
576 | static int | 514 | static int |
577 | nvd0_disp_pioc_create_(struct nouveau_object *parent, | ||
578 | struct nouveau_object *engine, | ||
579 | struct nouveau_oclass *oclass, int chid, | ||
580 | int length, void **pobject) | ||
581 | { | ||
582 | return nv50_disp_chan_create_(parent, engine, oclass, chid, | ||
583 | length, pobject); | ||
584 | } | ||
585 | |||
586 | static void | ||
587 | nvd0_disp_pioc_dtor(struct nouveau_object *object) | ||
588 | { | ||
589 | struct nv50_disp_pioc *pioc = (void *)object; | ||
590 | nv50_disp_chan_destroy(&pioc->base); | ||
591 | } | ||
592 | |||
593 | static int | ||
594 | nvd0_disp_pioc_init(struct nouveau_object *object) | 515 | nvd0_disp_pioc_init(struct nouveau_object *object) |
595 | { | 516 | { |
596 | struct nv50_disp_priv *priv = (void *)object->engine; | 517 | struct nv50_disp_priv *priv = (void *)object->engine; |
@@ -643,152 +564,68 @@ nvd0_disp_pioc_fini(struct nouveau_object *object, bool suspend) | |||
643 | * EVO immediate overlay channel objects | 564 | * EVO immediate overlay channel objects |
644 | ******************************************************************************/ | 565 | ******************************************************************************/ |
645 | 566 | ||
646 | static int | 567 | struct nv50_disp_chan_impl |
647 | nvd0_disp_oimm_ctor(struct nouveau_object *parent, | ||
648 | struct nouveau_object *engine, | ||
649 | struct nouveau_oclass *oclass, void *data, u32 size, | ||
650 | struct nouveau_object **pobject) | ||
651 | { | ||
652 | struct nv50_display_oimm_class *args = data; | ||
653 | struct nv50_disp_priv *priv = (void *)engine; | ||
654 | struct nv50_disp_pioc *pioc; | ||
655 | int ret; | ||
656 | |||
657 | if (size < sizeof(*args) || args->head >= priv->head.nr) | ||
658 | return -EINVAL; | ||
659 | |||
660 | ret = nvd0_disp_pioc_create_(parent, engine, oclass, 9 + args->head, | ||
661 | sizeof(*pioc), (void **)&pioc); | ||
662 | *pobject = nv_object(pioc); | ||
663 | if (ret) | ||
664 | return ret; | ||
665 | |||
666 | return 0; | ||
667 | } | ||
668 | |||
669 | struct nouveau_ofuncs | ||
670 | nvd0_disp_oimm_ofuncs = { | 568 | nvd0_disp_oimm_ofuncs = { |
671 | .ctor = nvd0_disp_oimm_ctor, | 569 | .base.ctor = nv50_disp_oimm_ctor, |
672 | .dtor = nvd0_disp_pioc_dtor, | 570 | .base.dtor = nv50_disp_pioc_dtor, |
673 | .init = nvd0_disp_pioc_init, | 571 | .base.init = nvd0_disp_pioc_init, |
674 | .fini = nvd0_disp_pioc_fini, | 572 | .base.fini = nvd0_disp_pioc_fini, |
675 | .rd32 = nv50_disp_chan_rd32, | 573 | .base.map = nv50_disp_chan_map, |
676 | .wr32 = nv50_disp_chan_wr32, | 574 | .base.rd32 = nv50_disp_chan_rd32, |
575 | .base.wr32 = nv50_disp_chan_wr32, | ||
576 | .chid = 9, | ||
677 | }; | 577 | }; |
678 | 578 | ||
679 | /******************************************************************************* | 579 | /******************************************************************************* |
680 | * EVO cursor channel objects | 580 | * EVO cursor channel objects |
681 | ******************************************************************************/ | 581 | ******************************************************************************/ |
682 | 582 | ||
683 | static int | 583 | struct nv50_disp_chan_impl |
684 | nvd0_disp_curs_ctor(struct nouveau_object *parent, | ||
685 | struct nouveau_object *engine, | ||
686 | struct nouveau_oclass *oclass, void *data, u32 size, | ||
687 | struct nouveau_object **pobject) | ||
688 | { | ||
689 | struct nv50_display_curs_class *args = data; | ||
690 | struct nv50_disp_priv *priv = (void *)engine; | ||
691 | struct nv50_disp_pioc *pioc; | ||
692 | int ret; | ||
693 | |||
694 | if (size < sizeof(*args) || args->head >= priv->head.nr) | ||
695 | return -EINVAL; | ||
696 | |||
697 | ret = nvd0_disp_pioc_create_(parent, engine, oclass, 13 + args->head, | ||
698 | sizeof(*pioc), (void **)&pioc); | ||
699 | *pobject = nv_object(pioc); | ||
700 | if (ret) | ||
701 | return ret; | ||
702 | |||
703 | return 0; | ||
704 | } | ||
705 | |||
706 | struct nouveau_ofuncs | ||
707 | nvd0_disp_curs_ofuncs = { | 584 | nvd0_disp_curs_ofuncs = { |
708 | .ctor = nvd0_disp_curs_ctor, | 585 | .base.ctor = nv50_disp_curs_ctor, |
709 | .dtor = nvd0_disp_pioc_dtor, | 586 | .base.dtor = nv50_disp_pioc_dtor, |
710 | .init = nvd0_disp_pioc_init, | 587 | .base.init = nvd0_disp_pioc_init, |
711 | .fini = nvd0_disp_pioc_fini, | 588 | .base.fini = nvd0_disp_pioc_fini, |
712 | .rd32 = nv50_disp_chan_rd32, | 589 | .base.map = nv50_disp_chan_map, |
713 | .wr32 = nv50_disp_chan_wr32, | 590 | .base.rd32 = nv50_disp_chan_rd32, |
591 | .base.wr32 = nv50_disp_chan_wr32, | ||
592 | .chid = 13, | ||
714 | }; | 593 | }; |
715 | 594 | ||
716 | /******************************************************************************* | 595 | /******************************************************************************* |
717 | * Base display object | 596 | * Base display object |
718 | ******************************************************************************/ | 597 | ******************************************************************************/ |
719 | 598 | ||
720 | static int | 599 | int |
721 | nvd0_disp_base_scanoutpos(struct nouveau_object *object, u32 mthd, | 600 | nvd0_disp_base_scanoutpos(NV50_DISP_MTHD_V0) |
722 | void *data, u32 size) | ||
723 | { | ||
724 | struct nv50_disp_priv *priv = (void *)object->engine; | ||
725 | struct nv04_display_scanoutpos *args = data; | ||
726 | const int head = (mthd & NV50_DISP_MTHD_HEAD); | ||
727 | u32 blanke, blanks, total; | ||
728 | |||
729 | if (size < sizeof(*args) || head >= priv->head.nr) | ||
730 | return -EINVAL; | ||
731 | |||
732 | total = nv_rd32(priv, 0x640414 + (head * 0x300)); | ||
733 | blanke = nv_rd32(priv, 0x64041c + (head * 0x300)); | ||
734 | blanks = nv_rd32(priv, 0x640420 + (head * 0x300)); | ||
735 | |||
736 | args->vblanke = (blanke & 0xffff0000) >> 16; | ||
737 | args->hblanke = (blanke & 0x0000ffff); | ||
738 | args->vblanks = (blanks & 0xffff0000) >> 16; | ||
739 | args->hblanks = (blanks & 0x0000ffff); | ||
740 | args->vtotal = ( total & 0xffff0000) >> 16; | ||
741 | args->htotal = ( total & 0x0000ffff); | ||
742 | |||
743 | args->time[0] = ktime_to_ns(ktime_get()); | ||
744 | args->vline = nv_rd32(priv, 0x616340 + (head * 0x800)) & 0xffff; | ||
745 | args->time[1] = ktime_to_ns(ktime_get()); /* vline read locks hline */ | ||
746 | args->hline = nv_rd32(priv, 0x616344 + (head * 0x800)) & 0xffff; | ||
747 | return 0; | ||
748 | } | ||
749 | |||
750 | static void | ||
751 | nvd0_disp_base_vblank_enable(struct nouveau_event *event, int type, int head) | ||
752 | { | ||
753 | nv_mask(event->priv, 0x6100c0 + (head * 0x800), 0x00000001, 0x00000001); | ||
754 | } | ||
755 | |||
756 | static void | ||
757 | nvd0_disp_base_vblank_disable(struct nouveau_event *event, int type, int head) | ||
758 | { | 601 | { |
759 | nv_mask(event->priv, 0x6100c0 + (head * 0x800), 0x00000001, 0x00000000); | 602 | const u32 total = nv_rd32(priv, 0x640414 + (head * 0x300)); |
760 | } | 603 | const u32 blanke = nv_rd32(priv, 0x64041c + (head * 0x300)); |
761 | 604 | const u32 blanks = nv_rd32(priv, 0x640420 + (head * 0x300)); | |
762 | static int | 605 | union { |
763 | nvd0_disp_base_ctor(struct nouveau_object *parent, | 606 | struct nv04_disp_scanoutpos_v0 v0; |
764 | struct nouveau_object *engine, | 607 | } *args = data; |
765 | struct nouveau_oclass *oclass, void *data, u32 size, | ||
766 | struct nouveau_object **pobject) | ||
767 | { | ||
768 | struct nv50_disp_priv *priv = (void *)engine; | ||
769 | struct nv50_disp_base *base; | ||
770 | int ret; | 608 | int ret; |
771 | 609 | ||
772 | ret = nouveau_parent_create(parent, engine, oclass, 0, | 610 | nv_ioctl(object, "disp scanoutpos size %d\n", size); |
773 | priv->sclass, 0, &base); | 611 | if (nvif_unpack(args->v0, 0, 0, false)) { |
774 | *pobject = nv_object(base); | 612 | nv_ioctl(object, "disp scanoutpos vers %d\n", args->v0.version); |
775 | if (ret) | 613 | args->v0.vblanke = (blanke & 0xffff0000) >> 16; |
614 | args->v0.hblanke = (blanke & 0x0000ffff); | ||
615 | args->v0.vblanks = (blanks & 0xffff0000) >> 16; | ||
616 | args->v0.hblanks = (blanks & 0x0000ffff); | ||
617 | args->v0.vtotal = ( total & 0xffff0000) >> 16; | ||
618 | args->v0.htotal = ( total & 0x0000ffff); | ||
619 | args->v0.time[0] = ktime_to_ns(ktime_get()); | ||
620 | args->v0.vline = /* vline read locks hline */ | ||
621 | nv_rd32(priv, 0x616340 + (head * 0x800)) & 0xffff; | ||
622 | args->v0.time[1] = ktime_to_ns(ktime_get()); | ||
623 | args->v0.hline = | ||
624 | nv_rd32(priv, 0x616344 + (head * 0x800)) & 0xffff; | ||
625 | } else | ||
776 | return ret; | 626 | return ret; |
777 | 627 | ||
778 | priv->base.vblank->priv = priv; | 628 | return 0; |
779 | priv->base.vblank->enable = nvd0_disp_base_vblank_enable; | ||
780 | priv->base.vblank->disable = nvd0_disp_base_vblank_disable; | ||
781 | |||
782 | return nouveau_ramht_new(nv_object(base), nv_object(base), 0x1000, 0, | ||
783 | &base->ramht); | ||
784 | } | ||
785 | |||
786 | static void | ||
787 | nvd0_disp_base_dtor(struct nouveau_object *object) | ||
788 | { | ||
789 | struct nv50_disp_base *base = (void *)object; | ||
790 | nouveau_ramht_ref(NULL, &base->ramht); | ||
791 | nouveau_parent_destroy(&base->base); | ||
792 | } | 629 | } |
793 | 630 | ||
794 | static int | 631 | static int |
@@ -874,41 +711,27 @@ nvd0_disp_base_fini(struct nouveau_object *object, bool suspend) | |||
874 | 711 | ||
875 | struct nouveau_ofuncs | 712 | struct nouveau_ofuncs |
876 | nvd0_disp_base_ofuncs = { | 713 | nvd0_disp_base_ofuncs = { |
877 | .ctor = nvd0_disp_base_ctor, | 714 | .ctor = nv50_disp_base_ctor, |
878 | .dtor = nvd0_disp_base_dtor, | 715 | .dtor = nv50_disp_base_dtor, |
879 | .init = nvd0_disp_base_init, | 716 | .init = nvd0_disp_base_init, |
880 | .fini = nvd0_disp_base_fini, | 717 | .fini = nvd0_disp_base_fini, |
881 | }; | 718 | .mthd = nv50_disp_base_mthd, |
882 | 719 | .ntfy = nouveau_disp_ntfy, | |
883 | struct nouveau_omthds | ||
884 | nvd0_disp_base_omthds[] = { | ||
885 | { HEAD_MTHD(NV50_DISP_SCANOUTPOS) , nvd0_disp_base_scanoutpos }, | ||
886 | { SOR_MTHD(NV50_DISP_SOR_PWR) , nv50_sor_mthd }, | ||
887 | { SOR_MTHD(NVA3_DISP_SOR_HDA_ELD) , nv50_sor_mthd }, | ||
888 | { SOR_MTHD(NV84_DISP_SOR_HDMI_PWR) , nv50_sor_mthd }, | ||
889 | { SOR_MTHD(NV50_DISP_SOR_LVDS_SCRIPT) , nv50_sor_mthd }, | ||
890 | { SOR_MTHD(NV94_DISP_SOR_DP_PWR) , nv50_sor_mthd }, | ||
891 | { DAC_MTHD(NV50_DISP_DAC_PWR) , nv50_dac_mthd }, | ||
892 | { DAC_MTHD(NV50_DISP_DAC_LOAD) , nv50_dac_mthd }, | ||
893 | { PIOR_MTHD(NV50_DISP_PIOR_PWR) , nv50_pior_mthd }, | ||
894 | { PIOR_MTHD(NV50_DISP_PIOR_TMDS_PWR) , nv50_pior_mthd }, | ||
895 | { PIOR_MTHD(NV50_DISP_PIOR_DP_PWR) , nv50_pior_mthd }, | ||
896 | {}, | ||
897 | }; | 720 | }; |
898 | 721 | ||
899 | static struct nouveau_oclass | 722 | static struct nouveau_oclass |
900 | nvd0_disp_base_oclass[] = { | 723 | nvd0_disp_base_oclass[] = { |
901 | { NVD0_DISP_CLASS, &nvd0_disp_base_ofuncs, nvd0_disp_base_omthds }, | 724 | { GF110_DISP, &nvd0_disp_base_ofuncs }, |
902 | {} | 725 | {} |
903 | }; | 726 | }; |
904 | 727 | ||
905 | static struct nouveau_oclass | 728 | static struct nouveau_oclass |
906 | nvd0_disp_sclass[] = { | 729 | nvd0_disp_sclass[] = { |
907 | { NVD0_DISP_MAST_CLASS, &nvd0_disp_mast_ofuncs }, | 730 | { GF110_DISP_CORE_CHANNEL_DMA, &nvd0_disp_mast_ofuncs.base }, |
908 | { NVD0_DISP_SYNC_CLASS, &nvd0_disp_sync_ofuncs }, | 731 | { GF110_DISP_BASE_CHANNEL_DMA, &nvd0_disp_sync_ofuncs.base }, |
909 | { NVD0_DISP_OVLY_CLASS, &nvd0_disp_ovly_ofuncs }, | 732 | { GF110_DISP_OVERLAY_CONTROL_DMA, &nvd0_disp_ovly_ofuncs.base }, |
910 | { NVD0_DISP_OIMM_CLASS, &nvd0_disp_oimm_ofuncs }, | 733 | { GF110_DISP_OVERLAY, &nvd0_disp_oimm_ofuncs.base }, |
911 | { NVD0_DISP_CURS_CLASS, &nvd0_disp_curs_ofuncs }, | 734 | { GF110_DISP_CURSOR, &nvd0_disp_curs_ofuncs.base }, |
912 | {} | 735 | {} |
913 | }; | 736 | }; |
914 | 737 | ||
@@ -916,6 +739,27 @@ nvd0_disp_sclass[] = { | |||
916 | * Display engine implementation | 739 | * Display engine implementation |
917 | ******************************************************************************/ | 740 | ******************************************************************************/ |
918 | 741 | ||
742 | static void | ||
743 | nvd0_disp_vblank_init(struct nvkm_event *event, int type, int head) | ||
744 | { | ||
745 | struct nouveau_disp *disp = container_of(event, typeof(*disp), vblank); | ||
746 | nv_mask(disp, 0x6100c0 + (head * 0x800), 0x00000001, 0x00000001); | ||
747 | } | ||
748 | |||
749 | static void | ||
750 | nvd0_disp_vblank_fini(struct nvkm_event *event, int type, int head) | ||
751 | { | ||
752 | struct nouveau_disp *disp = container_of(event, typeof(*disp), vblank); | ||
753 | nv_mask(disp, 0x6100c0 + (head * 0x800), 0x00000001, 0x00000000); | ||
754 | } | ||
755 | |||
756 | const struct nvkm_event_func | ||
757 | nvd0_disp_vblank_func = { | ||
758 | .ctor = nouveau_disp_vblank_ctor, | ||
759 | .init = nvd0_disp_vblank_init, | ||
760 | .fini = nvd0_disp_vblank_fini, | ||
761 | }; | ||
762 | |||
919 | static struct nvkm_output * | 763 | static struct nvkm_output * |
920 | exec_lookup(struct nv50_disp_priv *priv, int head, int or, u32 ctrl, | 764 | exec_lookup(struct nv50_disp_priv *priv, int head, int or, u32 ctrl, |
921 | u32 *data, u8 *ver, u8 *hdr, u8 *cnt, u8 *len, | 765 | u32 *data, u8 *ver, u8 *hdr, u8 *cnt, u8 *len, |
@@ -1343,7 +1187,7 @@ nvd0_disp_intr(struct nouveau_subdev *subdev) | |||
1343 | if (mask & intr) { | 1187 | if (mask & intr) { |
1344 | u32 stat = nv_rd32(priv, 0x6100bc + (i * 0x800)); | 1188 | u32 stat = nv_rd32(priv, 0x6100bc + (i * 0x800)); |
1345 | if (stat & 0x00000001) | 1189 | if (stat & 0x00000001) |
1346 | nouveau_event_trigger(priv->base.vblank, 1, i); | 1190 | nouveau_disp_vblank(&priv->base, i); |
1347 | nv_mask(priv, 0x6100bc + (i * 0x800), 0, 0); | 1191 | nv_mask(priv, 0x6100bc + (i * 0x800), 0, 0); |
1348 | nv_rd32(priv, 0x6100c0 + (i * 0x800)); | 1192 | nv_rd32(priv, 0x6100c0 + (i * 0x800)); |
1349 | } | 1193 | } |
@@ -1396,9 +1240,11 @@ nvd0_disp_oclass = &(struct nv50_disp_impl) { | |||
1396 | .init = _nouveau_disp_init, | 1240 | .init = _nouveau_disp_init, |
1397 | .fini = _nouveau_disp_fini, | 1241 | .fini = _nouveau_disp_fini, |
1398 | }, | 1242 | }, |
1243 | .base.vblank = &nvd0_disp_vblank_func, | ||
1399 | .base.outp = nvd0_disp_outp_sclass, | 1244 | .base.outp = nvd0_disp_outp_sclass, |
1400 | .mthd.core = &nvd0_disp_mast_mthd_chan, | 1245 | .mthd.core = &nvd0_disp_mast_mthd_chan, |
1401 | .mthd.base = &nvd0_disp_sync_mthd_chan, | 1246 | .mthd.base = &nvd0_disp_sync_mthd_chan, |
1402 | .mthd.ovly = &nvd0_disp_ovly_mthd_chan, | 1247 | .mthd.ovly = &nvd0_disp_ovly_mthd_chan, |
1403 | .mthd.prev = -0x020000, | 1248 | .mthd.prev = -0x020000, |
1249 | .head.scanoutpos = nvd0_disp_base_scanoutpos, | ||
1404 | }.base.base; | 1250 | }.base.base; |
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nve0.c b/drivers/gpu/drm/nouveau/core/engine/disp/nve0.c index 11328e3f5df1..47fef1e398c4 100644 --- a/drivers/gpu/drm/nouveau/core/engine/disp/nve0.c +++ b/drivers/gpu/drm/nouveau/core/engine/disp/nve0.c | |||
@@ -25,7 +25,7 @@ | |||
25 | #include <engine/software.h> | 25 | #include <engine/software.h> |
26 | #include <engine/disp.h> | 26 | #include <engine/disp.h> |
27 | 27 | ||
28 | #include <core/class.h> | 28 | #include <nvif/class.h> |
29 | 29 | ||
30 | #include "nv50.h" | 30 | #include "nv50.h" |
31 | 31 | ||
@@ -200,17 +200,17 @@ nve0_disp_ovly_mthd_chan = { | |||
200 | 200 | ||
201 | static struct nouveau_oclass | 201 | static struct nouveau_oclass |
202 | nve0_disp_sclass[] = { | 202 | nve0_disp_sclass[] = { |
203 | { NVE0_DISP_MAST_CLASS, &nvd0_disp_mast_ofuncs }, | 203 | { GK104_DISP_CORE_CHANNEL_DMA, &nvd0_disp_mast_ofuncs.base }, |
204 | { NVE0_DISP_SYNC_CLASS, &nvd0_disp_sync_ofuncs }, | 204 | { GK104_DISP_BASE_CHANNEL_DMA, &nvd0_disp_sync_ofuncs.base }, |
205 | { NVE0_DISP_OVLY_CLASS, &nvd0_disp_ovly_ofuncs }, | 205 | { GK104_DISP_OVERLAY_CONTROL_DMA, &nvd0_disp_ovly_ofuncs.base }, |
206 | { NVE0_DISP_OIMM_CLASS, &nvd0_disp_oimm_ofuncs }, | 206 | { GK104_DISP_OVERLAY, &nvd0_disp_oimm_ofuncs.base }, |
207 | { NVE0_DISP_CURS_CLASS, &nvd0_disp_curs_ofuncs }, | 207 | { GK104_DISP_CURSOR, &nvd0_disp_curs_ofuncs.base }, |
208 | {} | 208 | {} |
209 | }; | 209 | }; |
210 | 210 | ||
211 | static struct nouveau_oclass | 211 | static struct nouveau_oclass |
212 | nve0_disp_base_oclass[] = { | 212 | nve0_disp_base_oclass[] = { |
213 | { NVE0_DISP_CLASS, &nvd0_disp_base_ofuncs, nvd0_disp_base_omthds }, | 213 | { GK104_DISP, &nvd0_disp_base_ofuncs }, |
214 | {} | 214 | {} |
215 | }; | 215 | }; |
216 | 216 | ||
@@ -258,9 +258,11 @@ nve0_disp_oclass = &(struct nv50_disp_impl) { | |||
258 | .init = _nouveau_disp_init, | 258 | .init = _nouveau_disp_init, |
259 | .fini = _nouveau_disp_fini, | 259 | .fini = _nouveau_disp_fini, |
260 | }, | 260 | }, |
261 | .base.vblank = &nvd0_disp_vblank_func, | ||
261 | .base.outp = nvd0_disp_outp_sclass, | 262 | .base.outp = nvd0_disp_outp_sclass, |
262 | .mthd.core = &nve0_disp_mast_mthd_chan, | 263 | .mthd.core = &nve0_disp_mast_mthd_chan, |
263 | .mthd.base = &nvd0_disp_sync_mthd_chan, | 264 | .mthd.base = &nvd0_disp_sync_mthd_chan, |
264 | .mthd.ovly = &nve0_disp_ovly_mthd_chan, | 265 | .mthd.ovly = &nve0_disp_ovly_mthd_chan, |
265 | .mthd.prev = -0x020000, | 266 | .mthd.prev = -0x020000, |
267 | .head.scanoutpos = nvd0_disp_base_scanoutpos, | ||
266 | }.base.base; | 268 | }.base.base; |
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nvf0.c b/drivers/gpu/drm/nouveau/core/engine/disp/nvf0.c index 104388081d73..04bda4ac4ed3 100644 --- a/drivers/gpu/drm/nouveau/core/engine/disp/nvf0.c +++ b/drivers/gpu/drm/nouveau/core/engine/disp/nvf0.c | |||
@@ -25,7 +25,7 @@ | |||
25 | #include <engine/software.h> | 25 | #include <engine/software.h> |
26 | #include <engine/disp.h> | 26 | #include <engine/disp.h> |
27 | 27 | ||
28 | #include <core/class.h> | 28 | #include <nvif/class.h> |
29 | 29 | ||
30 | #include "nv50.h" | 30 | #include "nv50.h" |
31 | 31 | ||
@@ -35,17 +35,17 @@ | |||
35 | 35 | ||
36 | static struct nouveau_oclass | 36 | static struct nouveau_oclass |
37 | nvf0_disp_sclass[] = { | 37 | nvf0_disp_sclass[] = { |
38 | { NVF0_DISP_MAST_CLASS, &nvd0_disp_mast_ofuncs }, | 38 | { GK110_DISP_CORE_CHANNEL_DMA, &nvd0_disp_mast_ofuncs.base }, |
39 | { NVF0_DISP_SYNC_CLASS, &nvd0_disp_sync_ofuncs }, | 39 | { GK110_DISP_BASE_CHANNEL_DMA, &nvd0_disp_sync_ofuncs.base }, |
40 | { NVF0_DISP_OVLY_CLASS, &nvd0_disp_ovly_ofuncs }, | 40 | { GK104_DISP_OVERLAY_CONTROL_DMA, &nvd0_disp_ovly_ofuncs.base }, |
41 | { NVF0_DISP_OIMM_CLASS, &nvd0_disp_oimm_ofuncs }, | 41 | { GK104_DISP_OVERLAY, &nvd0_disp_oimm_ofuncs.base }, |
42 | { NVF0_DISP_CURS_CLASS, &nvd0_disp_curs_ofuncs }, | 42 | { GK104_DISP_CURSOR, &nvd0_disp_curs_ofuncs.base }, |
43 | {} | 43 | {} |
44 | }; | 44 | }; |
45 | 45 | ||
46 | static struct nouveau_oclass | 46 | static struct nouveau_oclass |
47 | nvf0_disp_base_oclass[] = { | 47 | nvf0_disp_base_oclass[] = { |
48 | { NVF0_DISP_CLASS, &nvd0_disp_base_ofuncs, nvd0_disp_base_omthds }, | 48 | { GK110_DISP, &nvd0_disp_base_ofuncs }, |
49 | {} | 49 | {} |
50 | }; | 50 | }; |
51 | 51 | ||
@@ -93,9 +93,11 @@ nvf0_disp_oclass = &(struct nv50_disp_impl) { | |||
93 | .init = _nouveau_disp_init, | 93 | .init = _nouveau_disp_init, |
94 | .fini = _nouveau_disp_fini, | 94 | .fini = _nouveau_disp_fini, |
95 | }, | 95 | }, |
96 | .base.vblank = &nvd0_disp_vblank_func, | ||
96 | .base.outp = nvd0_disp_outp_sclass, | 97 | .base.outp = nvd0_disp_outp_sclass, |
97 | .mthd.core = &nve0_disp_mast_mthd_chan, | 98 | .mthd.core = &nve0_disp_mast_mthd_chan, |
98 | .mthd.base = &nvd0_disp_sync_mthd_chan, | 99 | .mthd.base = &nvd0_disp_sync_mthd_chan, |
99 | .mthd.ovly = &nve0_disp_ovly_mthd_chan, | 100 | .mthd.ovly = &nve0_disp_ovly_mthd_chan, |
100 | .mthd.prev = -0x020000, | 101 | .mthd.prev = -0x020000, |
102 | .head.scanoutpos = nvd0_disp_base_scanoutpos, | ||
101 | }.base.base; | 103 | }.base.base; |
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/outp.c b/drivers/gpu/drm/nouveau/core/engine/disp/outp.c index ad9ba7ccec7f..a5ff00a9cedc 100644 --- a/drivers/gpu/drm/nouveau/core/engine/disp/outp.c +++ b/drivers/gpu/drm/nouveau/core/engine/disp/outp.c | |||
@@ -78,6 +78,7 @@ nvkm_output_create_(struct nouveau_object *parent, | |||
78 | 78 | ||
79 | outp->info = *dcbE; | 79 | outp->info = *dcbE; |
80 | outp->index = index; | 80 | outp->index = index; |
81 | outp->or = ffs(outp->info.or) - 1; | ||
81 | 82 | ||
82 | DBG("type %02x loc %d or %d link %d con %x edid %x bus %d head %x\n", | 83 | DBG("type %02x loc %d or %d link %d con %x edid %x bus %d head %x\n", |
83 | dcbE->type, dcbE->location, dcbE->or, dcbE->type >= 2 ? | 84 | dcbE->type, dcbE->location, dcbE->or, dcbE->type >= 2 ? |
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/outp.h b/drivers/gpu/drm/nouveau/core/engine/disp/outp.h index bc76fbf85710..187f435ad0e2 100644 --- a/drivers/gpu/drm/nouveau/core/engine/disp/outp.h +++ b/drivers/gpu/drm/nouveau/core/engine/disp/outp.h | |||
@@ -9,6 +9,7 @@ struct nvkm_output { | |||
9 | 9 | ||
10 | struct dcb_output info; | 10 | struct dcb_output info; |
11 | int index; | 11 | int index; |
12 | int or; | ||
12 | 13 | ||
13 | struct nouveau_i2c_port *port; | 14 | struct nouveau_i2c_port *port; |
14 | struct nouveau_i2c_port *edid; | 15 | struct nouveau_i2c_port *edid; |
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/outpdp.c b/drivers/gpu/drm/nouveau/core/engine/disp/outpdp.c index eb2d7789555d..6f6e2a898270 100644 --- a/drivers/gpu/drm/nouveau/core/engine/disp/outpdp.c +++ b/drivers/gpu/drm/nouveau/core/engine/disp/outpdp.c | |||
@@ -22,6 +22,9 @@ | |||
22 | * Authors: Ben Skeggs | 22 | * Authors: Ben Skeggs |
23 | */ | 23 | */ |
24 | 24 | ||
25 | #include <core/os.h> | ||
26 | #include <nvif/event.h> | ||
27 | |||
25 | #include <subdev/i2c.h> | 28 | #include <subdev/i2c.h> |
26 | 29 | ||
27 | #include "outpdp.h" | 30 | #include "outpdp.h" |
@@ -86,7 +89,7 @@ done: | |||
86 | atomic_set(&outp->lt.done, 0); | 89 | atomic_set(&outp->lt.done, 0); |
87 | schedule_work(&outp->lt.work); | 90 | schedule_work(&outp->lt.work); |
88 | } else { | 91 | } else { |
89 | nouveau_event_get(outp->irq); | 92 | nvkm_notify_get(&outp->irq); |
90 | } | 93 | } |
91 | 94 | ||
92 | if (wait) { | 95 | if (wait) { |
@@ -133,46 +136,59 @@ nvkm_output_dp_detect(struct nvkm_output_dp *outp) | |||
133 | } | 136 | } |
134 | } | 137 | } |
135 | 138 | ||
136 | static void | 139 | static int |
137 | nvkm_output_dp_service_work(struct work_struct *work) | 140 | nvkm_output_dp_hpd(struct nvkm_notify *notify) |
138 | { | 141 | { |
139 | struct nvkm_output_dp *outp = container_of(work, typeof(*outp), work); | 142 | struct nvkm_connector *conn = container_of(notify, typeof(*conn), hpd); |
140 | struct nouveau_disp *disp = nouveau_disp(outp); | 143 | struct nvkm_output_dp *outp; |
141 | int type = atomic_xchg(&outp->pending, 0); | 144 | struct nouveau_disp *disp = nouveau_disp(conn); |
142 | u32 send = 0; | 145 | const struct nvkm_i2c_ntfy_rep *line = notify->data; |
143 | 146 | struct nvif_notify_conn_rep_v0 rep = {}; | |
144 | if (type & (NVKM_I2C_PLUG | NVKM_I2C_UNPLUG)) { | 147 | |
145 | nvkm_output_dp_detect(outp); | 148 | list_for_each_entry(outp, &disp->outp, base.head) { |
146 | if (type & NVKM_I2C_UNPLUG) | 149 | if (outp->base.conn == conn && |
147 | send |= NVKM_HPD_UNPLUG; | 150 | outp->info.type == DCB_OUTPUT_DP) { |
148 | if (type & NVKM_I2C_PLUG) | 151 | DBG("HPD: %d\n", line->mask); |
149 | send |= NVKM_HPD_PLUG; | 152 | nvkm_output_dp_detect(outp); |
150 | nouveau_event_get(outp->base.conn->hpd.event); | 153 | |
151 | } | 154 | if (line->mask & NVKM_I2C_UNPLUG) |
152 | 155 | rep.mask |= NVIF_NOTIFY_CONN_V0_UNPLUG; | |
153 | if (type & NVKM_I2C_IRQ) { | 156 | if (line->mask & NVKM_I2C_PLUG) |
154 | nvkm_output_dp_train(&outp->base, 0, true); | 157 | rep.mask |= NVIF_NOTIFY_CONN_V0_PLUG; |
155 | send |= NVKM_HPD_IRQ; | 158 | |
159 | nvkm_event_send(&disp->hpd, rep.mask, conn->index, | ||
160 | &rep, sizeof(rep)); | ||
161 | return NVKM_NOTIFY_KEEP; | ||
162 | } | ||
156 | } | 163 | } |
157 | 164 | ||
158 | nouveau_event_trigger(disp->hpd, send, outp->base.info.connector); | 165 | WARN_ON(1); |
166 | return NVKM_NOTIFY_DROP; | ||
159 | } | 167 | } |
160 | 168 | ||
161 | static int | 169 | static int |
162 | nvkm_output_dp_service(void *data, u32 type, int index) | 170 | nvkm_output_dp_irq(struct nvkm_notify *notify) |
163 | { | 171 | { |
164 | struct nvkm_output_dp *outp = data; | 172 | struct nvkm_output_dp *outp = container_of(notify, typeof(*outp), irq); |
165 | DBG("HPD: %d\n", type); | 173 | struct nouveau_disp *disp = nouveau_disp(outp); |
166 | atomic_or(type, &outp->pending); | 174 | const struct nvkm_i2c_ntfy_rep *line = notify->data; |
167 | schedule_work(&outp->work); | 175 | struct nvif_notify_conn_rep_v0 rep = { |
168 | return NVKM_EVENT_DROP; | 176 | .mask = NVIF_NOTIFY_CONN_V0_IRQ, |
177 | }; | ||
178 | int index = outp->base.info.connector; | ||
179 | |||
180 | DBG("IRQ: %d\n", line->mask); | ||
181 | nvkm_output_dp_train(&outp->base, 0, true); | ||
182 | |||
183 | nvkm_event_send(&disp->hpd, rep.mask, index, &rep, sizeof(rep)); | ||
184 | return NVKM_NOTIFY_DROP; | ||
169 | } | 185 | } |
170 | 186 | ||
171 | int | 187 | int |
172 | _nvkm_output_dp_fini(struct nouveau_object *object, bool suspend) | 188 | _nvkm_output_dp_fini(struct nouveau_object *object, bool suspend) |
173 | { | 189 | { |
174 | struct nvkm_output_dp *outp = (void *)object; | 190 | struct nvkm_output_dp *outp = (void *)object; |
175 | nouveau_event_put(outp->irq); | 191 | nvkm_notify_put(&outp->irq); |
176 | nvkm_output_dp_enable(outp, false); | 192 | nvkm_output_dp_enable(outp, false); |
177 | return nvkm_output_fini(&outp->base, suspend); | 193 | return nvkm_output_fini(&outp->base, suspend); |
178 | } | 194 | } |
@@ -189,7 +205,7 @@ void | |||
189 | _nvkm_output_dp_dtor(struct nouveau_object *object) | 205 | _nvkm_output_dp_dtor(struct nouveau_object *object) |
190 | { | 206 | { |
191 | struct nvkm_output_dp *outp = (void *)object; | 207 | struct nvkm_output_dp *outp = (void *)object; |
192 | nouveau_event_ref(NULL, &outp->irq); | 208 | nvkm_notify_fini(&outp->irq); |
193 | nvkm_output_destroy(&outp->base); | 209 | nvkm_output_destroy(&outp->base); |
194 | } | 210 | } |
195 | 211 | ||
@@ -213,7 +229,7 @@ nvkm_output_dp_create_(struct nouveau_object *parent, | |||
213 | if (ret) | 229 | if (ret) |
214 | return ret; | 230 | return ret; |
215 | 231 | ||
216 | nouveau_event_ref(NULL, &outp->base.conn->hpd.event); | 232 | nvkm_notify_fini(&outp->base.conn->hpd); |
217 | 233 | ||
218 | /* access to the aux channel is not optional... */ | 234 | /* access to the aux channel is not optional... */ |
219 | if (!outp->base.edid) { | 235 | if (!outp->base.edid) { |
@@ -238,20 +254,28 @@ nvkm_output_dp_create_(struct nouveau_object *parent, | |||
238 | atomic_set(&outp->lt.done, 0); | 254 | atomic_set(&outp->lt.done, 0); |
239 | 255 | ||
240 | /* link maintenance */ | 256 | /* link maintenance */ |
241 | ret = nouveau_event_new(i2c->ntfy, NVKM_I2C_IRQ, outp->base.edid->index, | 257 | ret = nvkm_notify_init(&i2c->event, nvkm_output_dp_irq, true, |
242 | nvkm_output_dp_service, outp, &outp->irq); | 258 | &(struct nvkm_i2c_ntfy_req) { |
259 | .mask = NVKM_I2C_IRQ, | ||
260 | .port = outp->base.edid->index, | ||
261 | }, | ||
262 | sizeof(struct nvkm_i2c_ntfy_req), | ||
263 | sizeof(struct nvkm_i2c_ntfy_rep), | ||
264 | &outp->irq); | ||
243 | if (ret) { | 265 | if (ret) { |
244 | ERR("error monitoring aux irq event: %d\n", ret); | 266 | ERR("error monitoring aux irq event: %d\n", ret); |
245 | return ret; | 267 | return ret; |
246 | } | 268 | } |
247 | 269 | ||
248 | INIT_WORK(&outp->work, nvkm_output_dp_service_work); | ||
249 | |||
250 | /* hotplug detect, replaces gpio-based mechanism with aux events */ | 270 | /* hotplug detect, replaces gpio-based mechanism with aux events */ |
251 | ret = nouveau_event_new(i2c->ntfy, NVKM_I2C_PLUG | NVKM_I2C_UNPLUG, | 271 | ret = nvkm_notify_init(&i2c->event, nvkm_output_dp_hpd, true, |
252 | outp->base.edid->index, | 272 | &(struct nvkm_i2c_ntfy_req) { |
253 | nvkm_output_dp_service, outp, | 273 | .mask = NVKM_I2C_PLUG | NVKM_I2C_UNPLUG, |
254 | &outp->base.conn->hpd.event); | 274 | .port = outp->base.edid->index, |
275 | }, | ||
276 | sizeof(struct nvkm_i2c_ntfy_req), | ||
277 | sizeof(struct nvkm_i2c_ntfy_rep), | ||
278 | &outp->base.conn->hpd); | ||
255 | if (ret) { | 279 | if (ret) { |
256 | ERR("error monitoring aux hpd events: %d\n", ret); | 280 | ERR("error monitoring aux hpd events: %d\n", ret); |
257 | return ret; | 281 | return ret; |
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/outpdp.h b/drivers/gpu/drm/nouveau/core/engine/disp/outpdp.h index ff33ba12cb67..1fac367cc867 100644 --- a/drivers/gpu/drm/nouveau/core/engine/disp/outpdp.h +++ b/drivers/gpu/drm/nouveau/core/engine/disp/outpdp.h | |||
@@ -12,10 +12,7 @@ struct nvkm_output_dp { | |||
12 | struct nvbios_dpout info; | 12 | struct nvbios_dpout info; |
13 | u8 version; | 13 | u8 version; |
14 | 14 | ||
15 | struct nouveau_eventh *irq; | 15 | struct nvkm_notify irq; |
16 | struct nouveau_eventh *hpd; | ||
17 | struct work_struct work; | ||
18 | atomic_t pending; | ||
19 | bool present; | 16 | bool present; |
20 | u8 dpcd[16]; | 17 | u8 dpcd[16]; |
21 | 18 | ||
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/piornv50.c b/drivers/gpu/drm/nouveau/core/engine/disp/piornv50.c index fe0f256f11bf..d00f89a468a7 100644 --- a/drivers/gpu/drm/nouveau/core/engine/disp/piornv50.c +++ b/drivers/gpu/drm/nouveau/core/engine/disp/piornv50.c | |||
@@ -22,8 +22,9 @@ | |||
22 | * Authors: Ben Skeggs | 22 | * Authors: Ben Skeggs |
23 | */ | 23 | */ |
24 | 24 | ||
25 | #include <core/os.h> | 25 | #include <core/client.h> |
26 | #include <core/class.h> | 26 | #include <nvif/unpack.h> |
27 | #include <nvif/class.h> | ||
27 | 28 | ||
28 | #include <subdev/bios.h> | 29 | #include <subdev/bios.h> |
29 | #include <subdev/bios/dcb.h> | 30 | #include <subdev/bios/dcb.h> |
@@ -143,38 +144,29 @@ nv50_pior_dp_impl = { | |||
143 | *****************************************************************************/ | 144 | *****************************************************************************/ |
144 | 145 | ||
145 | int | 146 | int |
146 | nv50_pior_power(struct nv50_disp_priv *priv, int or, u32 data) | 147 | nv50_pior_power(NV50_DISP_MTHD_V1) |
147 | { | 148 | { |
148 | const u32 stat = data & NV50_DISP_PIOR_PWR_STATE; | 149 | const u32 soff = outp->or * 0x800; |
149 | const u32 soff = (or * 0x800); | 150 | union { |
151 | struct nv50_disp_pior_pwr_v0 v0; | ||
152 | } *args = data; | ||
153 | u32 ctrl, type; | ||
154 | int ret; | ||
155 | |||
156 | nv_ioctl(object, "disp pior pwr size %d\n", size); | ||
157 | if (nvif_unpack(args->v0, 0, 0, false)) { | ||
158 | nv_ioctl(object, "disp pior pwr vers %d state %d type %x\n", | ||
159 | args->v0.version, args->v0.state, args->v0.type); | ||
160 | if (args->v0.type > 0x0f) | ||
161 | return -EINVAL; | ||
162 | ctrl = !!args->v0.state; | ||
163 | type = args->v0.type; | ||
164 | } else | ||
165 | return ret; | ||
166 | |||
150 | nv_wait(priv, 0x61e004 + soff, 0x80000000, 0x00000000); | 167 | nv_wait(priv, 0x61e004 + soff, 0x80000000, 0x00000000); |
151 | nv_mask(priv, 0x61e004 + soff, 0x80000101, 0x80000000 | stat); | 168 | nv_mask(priv, 0x61e004 + soff, 0x80000101, 0x80000000 | ctrl); |
152 | nv_wait(priv, 0x61e004 + soff, 0x80000000, 0x00000000); | 169 | nv_wait(priv, 0x61e004 + soff, 0x80000000, 0x00000000); |
170 | priv->pior.type[outp->or] = type; | ||
153 | return 0; | 171 | return 0; |
154 | } | 172 | } |
155 | |||
156 | int | ||
157 | nv50_pior_mthd(struct nouveau_object *object, u32 mthd, void *args, u32 size) | ||
158 | { | ||
159 | struct nv50_disp_priv *priv = (void *)object->engine; | ||
160 | const u8 type = (mthd & NV50_DISP_PIOR_MTHD_TYPE) >> 12; | ||
161 | const u8 or = (mthd & NV50_DISP_PIOR_MTHD_OR); | ||
162 | u32 *data = args; | ||
163 | int ret; | ||
164 | |||
165 | if (size < sizeof(u32)) | ||
166 | return -EINVAL; | ||
167 | |||
168 | mthd &= ~NV50_DISP_PIOR_MTHD_TYPE; | ||
169 | mthd &= ~NV50_DISP_PIOR_MTHD_OR; | ||
170 | switch (mthd) { | ||
171 | case NV50_DISP_PIOR_PWR: | ||
172 | ret = priv->pior.power(priv, or, data[0]); | ||
173 | priv->pior.type[or] = type; | ||
174 | break; | ||
175 | default: | ||
176 | return -EINVAL; | ||
177 | } | ||
178 | |||
179 | return ret; | ||
180 | } | ||
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/priv.h b/drivers/gpu/drm/nouveau/core/engine/disp/priv.h index 26e9a42569c7..dbd43ae9df81 100644 --- a/drivers/gpu/drm/nouveau/core/engine/disp/priv.h +++ b/drivers/gpu/drm/nouveau/core/engine/disp/priv.h | |||
@@ -11,6 +11,7 @@ struct nouveau_disp_impl { | |||
11 | struct nouveau_oclass base; | 11 | struct nouveau_oclass base; |
12 | struct nouveau_oclass **outp; | 12 | struct nouveau_oclass **outp; |
13 | struct nouveau_oclass **conn; | 13 | struct nouveau_oclass **conn; |
14 | const struct nvkm_event_func *vblank; | ||
14 | }; | 15 | }; |
15 | 16 | ||
16 | #define nouveau_disp_create(p,e,c,h,i,x,d) \ | 17 | #define nouveau_disp_create(p,e,c,h,i,x,d) \ |
@@ -39,4 +40,8 @@ int _nouveau_disp_fini(struct nouveau_object *, bool); | |||
39 | extern struct nouveau_oclass *nvkm_output_oclass; | 40 | extern struct nouveau_oclass *nvkm_output_oclass; |
40 | extern struct nouveau_oclass *nvkm_connector_oclass; | 41 | extern struct nouveau_oclass *nvkm_connector_oclass; |
41 | 42 | ||
43 | int nouveau_disp_vblank_ctor(void *data, u32 size, struct nvkm_notify *); | ||
44 | void nouveau_disp_vblank(struct nouveau_disp *, int head); | ||
45 | int nouveau_disp_ntfy(struct nouveau_object *, u32, struct nvkm_event **); | ||
46 | |||
42 | #endif | 47 | #endif |
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/sornv50.c b/drivers/gpu/drm/nouveau/core/engine/disp/sornv50.c index 7a1ebdfa9e1b..ddf1760c4400 100644 --- a/drivers/gpu/drm/nouveau/core/engine/disp/sornv50.c +++ b/drivers/gpu/drm/nouveau/core/engine/disp/sornv50.c | |||
@@ -22,8 +22,9 @@ | |||
22 | * Authors: Ben Skeggs | 22 | * Authors: Ben Skeggs |
23 | */ | 23 | */ |
24 | 24 | ||
25 | #include <core/os.h> | 25 | #include <core/client.h> |
26 | #include <core/class.h> | 26 | #include <nvif/unpack.h> |
27 | #include <nvif/class.h> | ||
27 | 28 | ||
28 | #include <subdev/bios.h> | 29 | #include <subdev/bios.h> |
29 | #include <subdev/bios/dcb.h> | 30 | #include <subdev/bios/dcb.h> |
@@ -32,77 +33,26 @@ | |||
32 | #include "nv50.h" | 33 | #include "nv50.h" |
33 | 34 | ||
34 | int | 35 | int |
35 | nv50_sor_power(struct nv50_disp_priv *priv, int or, u32 data) | 36 | nv50_sor_power(NV50_DISP_MTHD_V1) |
36 | { | 37 | { |
37 | const u32 stat = data & NV50_DISP_SOR_PWR_STATE; | 38 | union { |
38 | const u32 soff = (or * 0x800); | 39 | struct nv50_disp_sor_pwr_v0 v0; |
40 | } *args = data; | ||
41 | const u32 soff = outp->or * 0x800; | ||
42 | u32 stat; | ||
43 | int ret; | ||
44 | |||
45 | nv_ioctl(object, "disp sor pwr size %d\n", size); | ||
46 | if (nvif_unpack(args->v0, 0, 0, false)) { | ||
47 | nv_ioctl(object, "disp sor pwr vers %d state %d\n", | ||
48 | args->v0.version, args->v0.state); | ||
49 | stat = !!args->v0.state; | ||
50 | } else | ||
51 | return ret; | ||
52 | |||
39 | nv_wait(priv, 0x61c004 + soff, 0x80000000, 0x00000000); | 53 | nv_wait(priv, 0x61c004 + soff, 0x80000000, 0x00000000); |
40 | nv_mask(priv, 0x61c004 + soff, 0x80000001, 0x80000000 | stat); | 54 | nv_mask(priv, 0x61c004 + soff, 0x80000001, 0x80000000 | stat); |
41 | nv_wait(priv, 0x61c004 + soff, 0x80000000, 0x00000000); | 55 | nv_wait(priv, 0x61c004 + soff, 0x80000000, 0x00000000); |
42 | nv_wait(priv, 0x61c030 + soff, 0x10000000, 0x00000000); | 56 | nv_wait(priv, 0x61c030 + soff, 0x10000000, 0x00000000); |
43 | return 0; | 57 | return 0; |
44 | } | 58 | } |
45 | |||
46 | int | ||
47 | nv50_sor_mthd(struct nouveau_object *object, u32 mthd, void *args, u32 size) | ||
48 | { | ||
49 | struct nv50_disp_priv *priv = (void *)object->engine; | ||
50 | const u8 type = (mthd & NV50_DISP_SOR_MTHD_TYPE) >> 12; | ||
51 | const u8 head = (mthd & NV50_DISP_SOR_MTHD_HEAD) >> 3; | ||
52 | const u8 link = (mthd & NV50_DISP_SOR_MTHD_LINK) >> 2; | ||
53 | const u8 or = (mthd & NV50_DISP_SOR_MTHD_OR); | ||
54 | const u16 mask = (0x0100 << head) | (0x0040 << link) | (0x0001 << or); | ||
55 | struct nvkm_output *outp = NULL, *temp; | ||
56 | u32 data; | ||
57 | int ret = -EINVAL; | ||
58 | |||
59 | if (size < sizeof(u32)) | ||
60 | return -EINVAL; | ||
61 | data = *(u32 *)args; | ||
62 | |||
63 | list_for_each_entry(temp, &priv->base.outp, head) { | ||
64 | if ((temp->info.hasht & 0xff) == type && | ||
65 | (temp->info.hashm & mask) == mask) { | ||
66 | outp = temp; | ||
67 | break; | ||
68 | } | ||
69 | } | ||
70 | |||
71 | switch (mthd & ~0x3f) { | ||
72 | case NV50_DISP_SOR_PWR: | ||
73 | ret = priv->sor.power(priv, or, data); | ||
74 | break; | ||
75 | case NVA3_DISP_SOR_HDA_ELD: | ||
76 | ret = priv->sor.hda_eld(priv, or, args, size); | ||
77 | break; | ||
78 | case NV84_DISP_SOR_HDMI_PWR: | ||
79 | ret = priv->sor.hdmi(priv, head, or, data); | ||
80 | break; | ||
81 | case NV50_DISP_SOR_LVDS_SCRIPT: | ||
82 | priv->sor.lvdsconf = data & NV50_DISP_SOR_LVDS_SCRIPT_ID; | ||
83 | ret = 0; | ||
84 | break; | ||
85 | case NV94_DISP_SOR_DP_PWR: | ||
86 | if (outp) { | ||
87 | struct nvkm_output_dp *outpdp = (void *)outp; | ||
88 | switch (data) { | ||
89 | case NV94_DISP_SOR_DP_PWR_STATE_OFF: | ||
90 | nouveau_event_put(outpdp->irq); | ||
91 | ((struct nvkm_output_dp_impl *)nv_oclass(outp)) | ||
92 | ->lnk_pwr(outpdp, 0); | ||
93 | atomic_set(&outpdp->lt.done, 0); | ||
94 | break; | ||
95 | case NV94_DISP_SOR_DP_PWR_STATE_ON: | ||
96 | nvkm_output_dp_train(&outpdp->base, 0, true); | ||
97 | break; | ||
98 | default: | ||
99 | return -EINVAL; | ||
100 | } | ||
101 | } | ||
102 | break; | ||
103 | default: | ||
104 | BUG_ON(1); | ||
105 | } | ||
106 | |||
107 | return ret; | ||
108 | } | ||
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/sornv94.c b/drivers/gpu/drm/nouveau/core/engine/disp/sornv94.c index 05487cda84a8..39f85d627336 100644 --- a/drivers/gpu/drm/nouveau/core/engine/disp/sornv94.c +++ b/drivers/gpu/drm/nouveau/core/engine/disp/sornv94.c | |||
@@ -23,7 +23,6 @@ | |||
23 | */ | 23 | */ |
24 | 24 | ||
25 | #include <core/os.h> | 25 | #include <core/os.h> |
26 | #include <core/class.h> | ||
27 | 26 | ||
28 | #include <subdev/bios.h> | 27 | #include <subdev/bios.h> |
29 | #include <subdev/bios/dcb.h> | 28 | #include <subdev/bios/dcb.h> |
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/sornvd0.c b/drivers/gpu/drm/nouveau/core/engine/disp/sornvd0.c index 97f0e9cd3d40..7b7bbc3e459e 100644 --- a/drivers/gpu/drm/nouveau/core/engine/disp/sornvd0.c +++ b/drivers/gpu/drm/nouveau/core/engine/disp/sornvd0.c | |||
@@ -23,7 +23,6 @@ | |||
23 | */ | 23 | */ |
24 | 24 | ||
25 | #include <core/os.h> | 25 | #include <core/os.h> |
26 | #include <core/class.h> | ||
27 | 26 | ||
28 | #include <subdev/bios.h> | 27 | #include <subdev/bios.h> |
29 | #include <subdev/bios/dcb.h> | 28 | #include <subdev/bios/dcb.h> |
diff --git a/drivers/gpu/drm/nouveau/core/engine/dmaobj/base.c b/drivers/gpu/drm/nouveau/core/engine/dmaobj/base.c index 5103e88d1877..e1500f77a56a 100644 --- a/drivers/gpu/drm/nouveau/core/engine/dmaobj/base.c +++ b/drivers/gpu/drm/nouveau/core/engine/dmaobj/base.c | |||
@@ -23,98 +23,143 @@ | |||
23 | */ | 23 | */ |
24 | 24 | ||
25 | #include <core/object.h> | 25 | #include <core/object.h> |
26 | #include <core/class.h> | 26 | #include <core/client.h> |
27 | #include <nvif/unpack.h> | ||
28 | #include <nvif/class.h> | ||
27 | 29 | ||
28 | #include <subdev/fb.h> | 30 | #include <subdev/fb.h> |
29 | #include <engine/dmaobj.h> | 31 | #include <subdev/instmem.h> |
32 | |||
33 | #include "priv.h" | ||
30 | 34 | ||
31 | static int | 35 | static int |
32 | nouveau_dmaobj_ctor(struct nouveau_object *parent, | 36 | nvkm_dmaobj_bind(struct nouveau_dmaobj *dmaobj, struct nouveau_object *parent, |
37 | struct nouveau_gpuobj **pgpuobj) | ||
38 | { | ||
39 | const struct nvkm_dmaeng_impl *impl = (void *) | ||
40 | nv_oclass(nv_object(dmaobj)->engine); | ||
41 | int ret = 0; | ||
42 | |||
43 | if (nv_object(dmaobj) == parent) { /* ctor bind */ | ||
44 | if (nv_mclass(parent->parent) == NV_DEVICE) { | ||
45 | /* delayed, or no, binding */ | ||
46 | return 0; | ||
47 | } | ||
48 | ret = impl->bind(dmaobj, parent, pgpuobj); | ||
49 | if (ret == 0) | ||
50 | nouveau_object_ref(NULL, &parent); | ||
51 | return ret; | ||
52 | } | ||
53 | |||
54 | return impl->bind(dmaobj, parent, pgpuobj); | ||
55 | } | ||
56 | |||
57 | int | ||
58 | nvkm_dmaobj_create_(struct nouveau_object *parent, | ||
33 | struct nouveau_object *engine, | 59 | struct nouveau_object *engine, |
34 | struct nouveau_oclass *oclass, void *data, u32 size, | 60 | struct nouveau_oclass *oclass, void **pdata, u32 *psize, |
35 | struct nouveau_object **pobject) | 61 | int length, void **pobject) |
36 | { | 62 | { |
37 | struct nouveau_dmaeng *dmaeng = (void *)engine; | 63 | union { |
64 | struct nv_dma_v0 v0; | ||
65 | } *args = *pdata; | ||
66 | struct nouveau_instmem *instmem = nouveau_instmem(parent); | ||
67 | struct nouveau_client *client = nouveau_client(parent); | ||
68 | struct nouveau_device *device = nv_device(parent); | ||
69 | struct nouveau_fb *pfb = nouveau_fb(parent); | ||
38 | struct nouveau_dmaobj *dmaobj; | 70 | struct nouveau_dmaobj *dmaobj; |
39 | struct nouveau_gpuobj *gpuobj; | 71 | void *data = *pdata; |
40 | struct nv_dma_class *args = data; | 72 | u32 size = *psize; |
41 | int ret; | 73 | int ret; |
42 | 74 | ||
43 | if (size < sizeof(*args)) | 75 | ret = nouveau_object_create_(parent, engine, oclass, 0, length, pobject); |
44 | return -EINVAL; | 76 | dmaobj = *pobject; |
45 | |||
46 | ret = nouveau_object_create(parent, engine, oclass, 0, &dmaobj); | ||
47 | *pobject = nv_object(dmaobj); | ||
48 | if (ret) | 77 | if (ret) |
49 | return ret; | 78 | return ret; |
50 | 79 | ||
51 | switch (args->flags & NV_DMA_TARGET_MASK) { | 80 | nv_ioctl(parent, "create dma size %d\n", *psize); |
52 | case NV_DMA_TARGET_VM: | 81 | if (nvif_unpack(args->v0, 0, 0, true)) { |
82 | nv_ioctl(parent, "create dma vers %d target %d access %d " | ||
83 | "start %016llx limit %016llx\n", | ||
84 | args->v0.version, args->v0.target, args->v0.access, | ||
85 | args->v0.start, args->v0.limit); | ||
86 | dmaobj->target = args->v0.target; | ||
87 | dmaobj->access = args->v0.access; | ||
88 | dmaobj->start = args->v0.start; | ||
89 | dmaobj->limit = args->v0.limit; | ||
90 | } else | ||
91 | return ret; | ||
92 | |||
93 | *pdata = data; | ||
94 | *psize = size; | ||
95 | |||
96 | if (dmaobj->start > dmaobj->limit) | ||
97 | return -EINVAL; | ||
98 | |||
99 | switch (dmaobj->target) { | ||
100 | case NV_DMA_V0_TARGET_VM: | ||
53 | dmaobj->target = NV_MEM_TARGET_VM; | 101 | dmaobj->target = NV_MEM_TARGET_VM; |
54 | break; | 102 | break; |
55 | case NV_DMA_TARGET_VRAM: | 103 | case NV_DMA_V0_TARGET_VRAM: |
104 | if (!client->super) { | ||
105 | if (dmaobj->limit >= pfb->ram->size - instmem->reserved) | ||
106 | return -EACCES; | ||
107 | if (device->card_type >= NV_50) | ||
108 | return -EACCES; | ||
109 | } | ||
56 | dmaobj->target = NV_MEM_TARGET_VRAM; | 110 | dmaobj->target = NV_MEM_TARGET_VRAM; |
57 | break; | 111 | break; |
58 | case NV_DMA_TARGET_PCI: | 112 | case NV_DMA_V0_TARGET_PCI: |
113 | if (!client->super) | ||
114 | return -EACCES; | ||
59 | dmaobj->target = NV_MEM_TARGET_PCI; | 115 | dmaobj->target = NV_MEM_TARGET_PCI; |
60 | break; | 116 | break; |
61 | case NV_DMA_TARGET_PCI_US: | 117 | case NV_DMA_V0_TARGET_PCI_US: |
62 | case NV_DMA_TARGET_AGP: | 118 | case NV_DMA_V0_TARGET_AGP: |
119 | if (!client->super) | ||
120 | return -EACCES; | ||
63 | dmaobj->target = NV_MEM_TARGET_PCI_NOSNOOP; | 121 | dmaobj->target = NV_MEM_TARGET_PCI_NOSNOOP; |
64 | break; | 122 | break; |
65 | default: | 123 | default: |
66 | return -EINVAL; | 124 | return -EINVAL; |
67 | } | 125 | } |
68 | 126 | ||
69 | switch (args->flags & NV_DMA_ACCESS_MASK) { | 127 | switch (dmaobj->access) { |
70 | case NV_DMA_ACCESS_VM: | 128 | case NV_DMA_V0_ACCESS_VM: |
71 | dmaobj->access = NV_MEM_ACCESS_VM; | 129 | dmaobj->access = NV_MEM_ACCESS_VM; |
72 | break; | 130 | break; |
73 | case NV_DMA_ACCESS_RD: | 131 | case NV_DMA_V0_ACCESS_RD: |
74 | dmaobj->access = NV_MEM_ACCESS_RO; | 132 | dmaobj->access = NV_MEM_ACCESS_RO; |
75 | break; | 133 | break; |
76 | case NV_DMA_ACCESS_WR: | 134 | case NV_DMA_V0_ACCESS_WR: |
77 | dmaobj->access = NV_MEM_ACCESS_WO; | 135 | dmaobj->access = NV_MEM_ACCESS_WO; |
78 | break; | 136 | break; |
79 | case NV_DMA_ACCESS_RDWR: | 137 | case NV_DMA_V0_ACCESS_RDWR: |
80 | dmaobj->access = NV_MEM_ACCESS_RW; | 138 | dmaobj->access = NV_MEM_ACCESS_RW; |
81 | break; | 139 | break; |
82 | default: | 140 | default: |
83 | return -EINVAL; | 141 | return -EINVAL; |
84 | } | 142 | } |
85 | 143 | ||
86 | dmaobj->start = args->start; | ||
87 | dmaobj->limit = args->limit; | ||
88 | dmaobj->conf0 = args->conf0; | ||
89 | |||
90 | switch (nv_mclass(parent)) { | ||
91 | case NV_DEVICE_CLASS: | ||
92 | /* delayed, or no, binding */ | ||
93 | break; | ||
94 | default: | ||
95 | ret = dmaeng->bind(dmaeng, *pobject, dmaobj, &gpuobj); | ||
96 | if (ret == 0) { | ||
97 | nouveau_object_ref(NULL, pobject); | ||
98 | *pobject = nv_object(gpuobj); | ||
99 | } | ||
100 | break; | ||
101 | } | ||
102 | |||
103 | return ret; | 144 | return ret; |
104 | } | 145 | } |
105 | 146 | ||
106 | static struct nouveau_ofuncs | 147 | int |
107 | nouveau_dmaobj_ofuncs = { | 148 | _nvkm_dmaeng_ctor(struct nouveau_object *parent, struct nouveau_object *engine, |
108 | .ctor = nouveau_dmaobj_ctor, | 149 | struct nouveau_oclass *oclass, void *data, u32 size, |
109 | .dtor = nouveau_object_destroy, | 150 | struct nouveau_object **pobject) |
110 | .init = nouveau_object_init, | 151 | { |
111 | .fini = nouveau_object_fini, | 152 | const struct nvkm_dmaeng_impl *impl = (void *)oclass; |
112 | }; | 153 | struct nouveau_dmaeng *dmaeng; |
113 | 154 | int ret; | |
114 | struct nouveau_oclass | 155 | |
115 | nouveau_dmaobj_sclass[] = { | 156 | ret = nouveau_engine_create(parent, engine, oclass, true, "DMAOBJ", |
116 | { NV_DMA_FROM_MEMORY_CLASS, &nouveau_dmaobj_ofuncs }, | 157 | "dmaobj", &dmaeng); |
117 | { NV_DMA_TO_MEMORY_CLASS, &nouveau_dmaobj_ofuncs }, | 158 | *pobject = nv_object(dmaeng); |
118 | { NV_DMA_IN_MEMORY_CLASS, &nouveau_dmaobj_ofuncs }, | 159 | if (ret) |
119 | {} | 160 | return ret; |
120 | }; | 161 | |
162 | nv_engine(dmaeng)->sclass = impl->sclass; | ||
163 | dmaeng->bind = nvkm_dmaobj_bind; | ||
164 | return 0; | ||
165 | } | ||
diff --git a/drivers/gpu/drm/nouveau/core/engine/dmaobj/nv04.c b/drivers/gpu/drm/nouveau/core/engine/dmaobj/nv04.c index 027d8217c0fa..20c9dbfe3b2e 100644 --- a/drivers/gpu/drm/nouveau/core/engine/dmaobj/nv04.c +++ b/drivers/gpu/drm/nouveau/core/engine/dmaobj/nv04.c | |||
@@ -23,121 +23,143 @@ | |||
23 | */ | 23 | */ |
24 | 24 | ||
25 | #include <core/gpuobj.h> | 25 | #include <core/gpuobj.h> |
26 | #include <core/class.h> | 26 | #include <nvif/class.h> |
27 | 27 | ||
28 | #include <subdev/fb.h> | 28 | #include <subdev/fb.h> |
29 | #include <subdev/vm/nv04.h> | 29 | #include <subdev/vm/nv04.h> |
30 | 30 | ||
31 | #include <engine/dmaobj.h> | 31 | #include "priv.h" |
32 | 32 | ||
33 | struct nv04_dmaeng_priv { | 33 | struct nv04_dmaobj_priv { |
34 | struct nouveau_dmaeng base; | 34 | struct nouveau_dmaobj base; |
35 | bool clone; | ||
36 | u32 flags0; | ||
37 | u32 flags2; | ||
35 | }; | 38 | }; |
36 | 39 | ||
37 | static int | 40 | static int |
38 | nv04_dmaobj_bind(struct nouveau_dmaeng *dmaeng, | 41 | nv04_dmaobj_bind(struct nouveau_dmaobj *dmaobj, |
39 | struct nouveau_object *parent, | 42 | struct nouveau_object *parent, |
40 | struct nouveau_dmaobj *dmaobj, | ||
41 | struct nouveau_gpuobj **pgpuobj) | 43 | struct nouveau_gpuobj **pgpuobj) |
42 | { | 44 | { |
43 | struct nv04_vmmgr_priv *vmm = nv04_vmmgr(dmaeng); | 45 | struct nv04_dmaobj_priv *priv = (void *)dmaobj; |
44 | struct nouveau_gpuobj *gpuobj; | 46 | struct nouveau_gpuobj *gpuobj; |
45 | u32 flags0 = nv_mclass(dmaobj); | 47 | u64 offset = priv->base.start & 0xfffff000; |
46 | u32 flags2 = 0x00000000; | 48 | u64 adjust = priv->base.start & 0x00000fff; |
47 | u64 offset = dmaobj->start & 0xfffff000; | 49 | u32 length = priv->base.limit - priv->base.start; |
48 | u64 adjust = dmaobj->start & 0x00000fff; | ||
49 | u32 length = dmaobj->limit - dmaobj->start; | ||
50 | int ret; | 50 | int ret; |
51 | 51 | ||
52 | if (!nv_iclass(parent, NV_ENGCTX_CLASS)) { | 52 | if (!nv_iclass(parent, NV_ENGCTX_CLASS)) { |
53 | switch (nv_mclass(parent->parent)) { | 53 | switch (nv_mclass(parent->parent)) { |
54 | case NV03_CHANNEL_DMA_CLASS: | 54 | case NV03_CHANNEL_DMA: |
55 | case NV10_CHANNEL_DMA_CLASS: | 55 | case NV10_CHANNEL_DMA: |
56 | case NV17_CHANNEL_DMA_CLASS: | 56 | case NV17_CHANNEL_DMA: |
57 | case NV40_CHANNEL_DMA_CLASS: | 57 | case NV40_CHANNEL_DMA: |
58 | break; | 58 | break; |
59 | default: | 59 | default: |
60 | return -EINVAL; | 60 | return -EINVAL; |
61 | } | 61 | } |
62 | } | 62 | } |
63 | 63 | ||
64 | if (dmaobj->target == NV_MEM_TARGET_VM) { | 64 | if (priv->clone) { |
65 | if (nv_object(vmm)->oclass == &nv04_vmmgr_oclass) { | 65 | struct nv04_vmmgr_priv *vmm = nv04_vmmgr(dmaobj); |
66 | struct nouveau_gpuobj *pgt = vmm->vm->pgt[0].obj[0]; | 66 | struct nouveau_gpuobj *pgt = vmm->vm->pgt[0].obj[0]; |
67 | if (!dmaobj->start) | 67 | if (!dmaobj->start) |
68 | return nouveau_gpuobj_dup(parent, pgt, pgpuobj); | 68 | return nouveau_gpuobj_dup(parent, pgt, pgpuobj); |
69 | offset = nv_ro32(pgt, 8 + (offset >> 10)); | 69 | offset = nv_ro32(pgt, 8 + (offset >> 10)); |
70 | offset &= 0xfffff000; | 70 | offset &= 0xfffff000; |
71 | } | 71 | } |
72 | |||
73 | ret = nouveau_gpuobj_new(parent, parent, 16, 16, 0, &gpuobj); | ||
74 | *pgpuobj = gpuobj; | ||
75 | if (ret == 0) { | ||
76 | nv_wo32(*pgpuobj, 0x00, priv->flags0 | (adjust << 20)); | ||
77 | nv_wo32(*pgpuobj, 0x04, length); | ||
78 | nv_wo32(*pgpuobj, 0x08, priv->flags2 | offset); | ||
79 | nv_wo32(*pgpuobj, 0x0c, priv->flags2 | offset); | ||
80 | } | ||
81 | |||
82 | return ret; | ||
83 | } | ||
84 | |||
85 | static int | ||
86 | nv04_dmaobj_ctor(struct nouveau_object *parent, struct nouveau_object *engine, | ||
87 | struct nouveau_oclass *oclass, void *data, u32 size, | ||
88 | struct nouveau_object **pobject) | ||
89 | { | ||
90 | struct nouveau_dmaeng *dmaeng = (void *)engine; | ||
91 | struct nv04_vmmgr_priv *vmm = nv04_vmmgr(engine); | ||
92 | struct nv04_dmaobj_priv *priv; | ||
93 | int ret; | ||
94 | |||
95 | ret = nvkm_dmaobj_create(parent, engine, oclass, &data, &size, &priv); | ||
96 | *pobject = nv_object(priv); | ||
97 | if (ret || (ret = -ENOSYS, size)) | ||
98 | return ret; | ||
72 | 99 | ||
73 | dmaobj->target = NV_MEM_TARGET_PCI; | 100 | if (priv->base.target == NV_MEM_TARGET_VM) { |
74 | dmaobj->access = NV_MEM_ACCESS_RW; | 101 | if (nv_object(vmm)->oclass == &nv04_vmmgr_oclass) |
102 | priv->clone = true; | ||
103 | priv->base.target = NV_MEM_TARGET_PCI; | ||
104 | priv->base.access = NV_MEM_ACCESS_RW; | ||
75 | } | 105 | } |
76 | 106 | ||
77 | switch (dmaobj->target) { | 107 | priv->flags0 = nv_mclass(priv); |
108 | switch (priv->base.target) { | ||
78 | case NV_MEM_TARGET_VRAM: | 109 | case NV_MEM_TARGET_VRAM: |
79 | flags0 |= 0x00003000; | 110 | priv->flags0 |= 0x00003000; |
80 | break; | 111 | break; |
81 | case NV_MEM_TARGET_PCI: | 112 | case NV_MEM_TARGET_PCI: |
82 | flags0 |= 0x00023000; | 113 | priv->flags0 |= 0x00023000; |
83 | break; | 114 | break; |
84 | case NV_MEM_TARGET_PCI_NOSNOOP: | 115 | case NV_MEM_TARGET_PCI_NOSNOOP: |
85 | flags0 |= 0x00033000; | 116 | priv->flags0 |= 0x00033000; |
86 | break; | 117 | break; |
87 | default: | 118 | default: |
88 | return -EINVAL; | 119 | return -EINVAL; |
89 | } | 120 | } |
90 | 121 | ||
91 | switch (dmaobj->access) { | 122 | switch (priv->base.access) { |
92 | case NV_MEM_ACCESS_RO: | 123 | case NV_MEM_ACCESS_RO: |
93 | flags0 |= 0x00004000; | 124 | priv->flags0 |= 0x00004000; |
94 | break; | 125 | break; |
95 | case NV_MEM_ACCESS_WO: | 126 | case NV_MEM_ACCESS_WO: |
96 | flags0 |= 0x00008000; | 127 | priv->flags0 |= 0x00008000; |
97 | case NV_MEM_ACCESS_RW: | 128 | case NV_MEM_ACCESS_RW: |
98 | flags2 |= 0x00000002; | 129 | priv->flags2 |= 0x00000002; |
99 | break; | 130 | break; |
100 | default: | 131 | default: |
101 | return -EINVAL; | 132 | return -EINVAL; |
102 | } | 133 | } |
103 | 134 | ||
104 | ret = nouveau_gpuobj_new(parent, parent, 16, 16, 0, &gpuobj); | 135 | return dmaeng->bind(&priv->base, nv_object(priv), (void *)pobject); |
105 | *pgpuobj = gpuobj; | ||
106 | if (ret == 0) { | ||
107 | nv_wo32(*pgpuobj, 0x00, flags0 | (adjust << 20)); | ||
108 | nv_wo32(*pgpuobj, 0x04, length); | ||
109 | nv_wo32(*pgpuobj, 0x08, flags2 | offset); | ||
110 | nv_wo32(*pgpuobj, 0x0c, flags2 | offset); | ||
111 | } | ||
112 | |||
113 | return ret; | ||
114 | } | 136 | } |
115 | 137 | ||
116 | static int | 138 | static struct nouveau_ofuncs |
117 | nv04_dmaeng_ctor(struct nouveau_object *parent, struct nouveau_object *engine, | 139 | nv04_dmaobj_ofuncs = { |
118 | struct nouveau_oclass *oclass, void *data, u32 size, | 140 | .ctor = nv04_dmaobj_ctor, |
119 | struct nouveau_object **pobject) | 141 | .dtor = _nvkm_dmaobj_dtor, |
120 | { | 142 | .init = _nvkm_dmaobj_init, |
121 | struct nv04_dmaeng_priv *priv; | 143 | .fini = _nvkm_dmaobj_fini, |
122 | int ret; | 144 | }; |
123 | |||
124 | ret = nouveau_dmaeng_create(parent, engine, oclass, &priv); | ||
125 | *pobject = nv_object(priv); | ||
126 | if (ret) | ||
127 | return ret; | ||
128 | 145 | ||
129 | nv_engine(priv)->sclass = nouveau_dmaobj_sclass; | 146 | static struct nouveau_oclass |
130 | priv->base.bind = nv04_dmaobj_bind; | 147 | nv04_dmaeng_sclass[] = { |
131 | return 0; | 148 | { NV_DMA_FROM_MEMORY, &nv04_dmaobj_ofuncs }, |
132 | } | 149 | { NV_DMA_TO_MEMORY, &nv04_dmaobj_ofuncs }, |
150 | { NV_DMA_IN_MEMORY, &nv04_dmaobj_ofuncs }, | ||
151 | {} | ||
152 | }; | ||
133 | 153 | ||
134 | struct nouveau_oclass | 154 | struct nouveau_oclass * |
135 | nv04_dmaeng_oclass = { | 155 | nv04_dmaeng_oclass = &(struct nvkm_dmaeng_impl) { |
136 | .handle = NV_ENGINE(DMAOBJ, 0x04), | 156 | .base.handle = NV_ENGINE(DMAOBJ, 0x04), |
137 | .ofuncs = &(struct nouveau_ofuncs) { | 157 | .base.ofuncs = &(struct nouveau_ofuncs) { |
138 | .ctor = nv04_dmaeng_ctor, | 158 | .ctor = _nvkm_dmaeng_ctor, |
139 | .dtor = _nouveau_dmaeng_dtor, | 159 | .dtor = _nvkm_dmaeng_dtor, |
140 | .init = _nouveau_dmaeng_init, | 160 | .init = _nvkm_dmaeng_init, |
141 | .fini = _nouveau_dmaeng_fini, | 161 | .fini = _nvkm_dmaeng_fini, |
142 | }, | 162 | }, |
143 | }; | 163 | .sclass = nv04_dmaeng_sclass, |
164 | .bind = nv04_dmaobj_bind, | ||
165 | }.base; | ||
diff --git a/drivers/gpu/drm/nouveau/core/engine/dmaobj/nv50.c b/drivers/gpu/drm/nouveau/core/engine/dmaobj/nv50.c index 750183f7c057..a740ddba2ee2 100644 --- a/drivers/gpu/drm/nouveau/core/engine/dmaobj/nv50.c +++ b/drivers/gpu/drm/nouveau/core/engine/dmaobj/nv50.c | |||
@@ -22,140 +22,176 @@ | |||
22 | * Authors: Ben Skeggs | 22 | * Authors: Ben Skeggs |
23 | */ | 23 | */ |
24 | 24 | ||
25 | #include <core/client.h> | ||
25 | #include <core/gpuobj.h> | 26 | #include <core/gpuobj.h> |
26 | #include <core/class.h> | 27 | #include <nvif/unpack.h> |
28 | #include <nvif/class.h> | ||
27 | 29 | ||
28 | #include <subdev/fb.h> | 30 | #include <subdev/fb.h> |
29 | #include <engine/dmaobj.h> | ||
30 | 31 | ||
31 | struct nv50_dmaeng_priv { | 32 | #include "priv.h" |
32 | struct nouveau_dmaeng base; | 33 | |
34 | struct nv50_dmaobj_priv { | ||
35 | struct nouveau_dmaobj base; | ||
36 | u32 flags0; | ||
37 | u32 flags5; | ||
33 | }; | 38 | }; |
34 | 39 | ||
35 | static int | 40 | static int |
36 | nv50_dmaobj_bind(struct nouveau_dmaeng *dmaeng, | 41 | nv50_dmaobj_bind(struct nouveau_dmaobj *dmaobj, |
37 | struct nouveau_object *parent, | 42 | struct nouveau_object *parent, |
38 | struct nouveau_dmaobj *dmaobj, | ||
39 | struct nouveau_gpuobj **pgpuobj) | 43 | struct nouveau_gpuobj **pgpuobj) |
40 | { | 44 | { |
41 | u32 flags0 = nv_mclass(dmaobj); | 45 | struct nv50_dmaobj_priv *priv = (void *)dmaobj; |
42 | u32 flags5 = 0x00000000; | ||
43 | int ret; | 46 | int ret; |
44 | 47 | ||
45 | if (!nv_iclass(parent, NV_ENGCTX_CLASS)) { | 48 | if (!nv_iclass(parent, NV_ENGCTX_CLASS)) { |
46 | switch (nv_mclass(parent->parent)) { | 49 | switch (nv_mclass(parent->parent)) { |
47 | case NV50_CHANNEL_DMA_CLASS: | 50 | case NV40_CHANNEL_DMA: |
48 | case NV84_CHANNEL_DMA_CLASS: | 51 | case NV50_CHANNEL_GPFIFO: |
49 | case NV50_CHANNEL_IND_CLASS: | 52 | case G82_CHANNEL_GPFIFO: |
50 | case NV84_CHANNEL_IND_CLASS: | 53 | case NV50_DISP_CORE_CHANNEL_DMA: |
51 | case NV50_DISP_MAST_CLASS: | 54 | case G82_DISP_CORE_CHANNEL_DMA: |
52 | case NV84_DISP_MAST_CLASS: | 55 | case GT206_DISP_CORE_CHANNEL_DMA: |
53 | case NV94_DISP_MAST_CLASS: | 56 | case GT200_DISP_CORE_CHANNEL_DMA: |
54 | case NVA0_DISP_MAST_CLASS: | 57 | case GT214_DISP_CORE_CHANNEL_DMA: |
55 | case NVA3_DISP_MAST_CLASS: | 58 | case NV50_DISP_BASE_CHANNEL_DMA: |
56 | case NV50_DISP_SYNC_CLASS: | 59 | case G82_DISP_BASE_CHANNEL_DMA: |
57 | case NV84_DISP_SYNC_CLASS: | 60 | case GT200_DISP_BASE_CHANNEL_DMA: |
58 | case NV94_DISP_SYNC_CLASS: | 61 | case GT214_DISP_BASE_CHANNEL_DMA: |
59 | case NVA0_DISP_SYNC_CLASS: | 62 | case NV50_DISP_OVERLAY_CHANNEL_DMA: |
60 | case NVA3_DISP_SYNC_CLASS: | 63 | case G82_DISP_OVERLAY_CHANNEL_DMA: |
61 | case NV50_DISP_OVLY_CLASS: | 64 | case GT200_DISP_OVERLAY_CHANNEL_DMA: |
62 | case NV84_DISP_OVLY_CLASS: | 65 | case GT214_DISP_OVERLAY_CHANNEL_DMA: |
63 | case NV94_DISP_OVLY_CLASS: | ||
64 | case NVA0_DISP_OVLY_CLASS: | ||
65 | case NVA3_DISP_OVLY_CLASS: | ||
66 | break; | 66 | break; |
67 | default: | 67 | default: |
68 | return -EINVAL; | 68 | return -EINVAL; |
69 | } | 69 | } |
70 | } | 70 | } |
71 | 71 | ||
72 | if (!(dmaobj->conf0 & NV50_DMA_CONF0_ENABLE)) { | 72 | ret = nouveau_gpuobj_new(parent, parent, 24, 32, 0, pgpuobj); |
73 | if (dmaobj->target == NV_MEM_TARGET_VM) { | 73 | if (ret == 0) { |
74 | dmaobj->conf0 = NV50_DMA_CONF0_PRIV_VM; | 74 | nv_wo32(*pgpuobj, 0x00, priv->flags0 | nv_mclass(dmaobj)); |
75 | dmaobj->conf0 |= NV50_DMA_CONF0_PART_VM; | 75 | nv_wo32(*pgpuobj, 0x04, lower_32_bits(priv->base.limit)); |
76 | dmaobj->conf0 |= NV50_DMA_CONF0_COMP_VM; | 76 | nv_wo32(*pgpuobj, 0x08, lower_32_bits(priv->base.start)); |
77 | dmaobj->conf0 |= NV50_DMA_CONF0_TYPE_VM; | 77 | nv_wo32(*pgpuobj, 0x0c, upper_32_bits(priv->base.limit) << 24 | |
78 | upper_32_bits(priv->base.start)); | ||
79 | nv_wo32(*pgpuobj, 0x10, 0x00000000); | ||
80 | nv_wo32(*pgpuobj, 0x14, priv->flags5); | ||
81 | } | ||
82 | |||
83 | return ret; | ||
84 | } | ||
85 | |||
86 | static int | ||
87 | nv50_dmaobj_ctor(struct nouveau_object *parent, struct nouveau_object *engine, | ||
88 | struct nouveau_oclass *oclass, void *data, u32 size, | ||
89 | struct nouveau_object **pobject) | ||
90 | { | ||
91 | struct nouveau_dmaeng *dmaeng = (void *)engine; | ||
92 | union { | ||
93 | struct nv50_dma_v0 v0; | ||
94 | } *args; | ||
95 | struct nv50_dmaobj_priv *priv; | ||
96 | u32 user, part, comp, kind; | ||
97 | int ret; | ||
98 | |||
99 | ret = nvkm_dmaobj_create(parent, engine, oclass, &data, &size, &priv); | ||
100 | *pobject = nv_object(priv); | ||
101 | if (ret) | ||
102 | return ret; | ||
103 | args = data; | ||
104 | |||
105 | nv_ioctl(parent, "create nv50 dma size %d\n", size); | ||
106 | if (nvif_unpack(args->v0, 0, 0, false)) { | ||
107 | nv_ioctl(parent, "create nv50 dma vers %d priv %d part %d " | ||
108 | "comp %d kind %02x\n", args->v0.version, | ||
109 | args->v0.priv, args->v0.part, args->v0.comp, | ||
110 | args->v0.kind); | ||
111 | user = args->v0.priv; | ||
112 | part = args->v0.part; | ||
113 | comp = args->v0.comp; | ||
114 | kind = args->v0.kind; | ||
115 | } else | ||
116 | if (size == 0) { | ||
117 | if (priv->base.target != NV_MEM_TARGET_VM) { | ||
118 | user = NV50_DMA_V0_PRIV_US; | ||
119 | part = NV50_DMA_V0_PART_256; | ||
120 | comp = NV50_DMA_V0_COMP_NONE; | ||
121 | kind = NV50_DMA_V0_KIND_PITCH; | ||
78 | } else { | 122 | } else { |
79 | dmaobj->conf0 = NV50_DMA_CONF0_PRIV_US; | 123 | user = NV50_DMA_V0_PRIV_VM; |
80 | dmaobj->conf0 |= NV50_DMA_CONF0_PART_256; | 124 | part = NV50_DMA_V0_PART_VM; |
81 | dmaobj->conf0 |= NV50_DMA_CONF0_COMP_NONE; | 125 | comp = NV50_DMA_V0_COMP_VM; |
82 | dmaobj->conf0 |= NV50_DMA_CONF0_TYPE_LINEAR; | 126 | kind = NV50_DMA_V0_KIND_VM; |
83 | } | 127 | } |
84 | } | 128 | } else |
129 | return ret; | ||
85 | 130 | ||
86 | flags0 |= (dmaobj->conf0 & NV50_DMA_CONF0_COMP) << 22; | 131 | if (user > 2 || part > 2 || comp > 3 || kind > 0x7f) |
87 | flags0 |= (dmaobj->conf0 & NV50_DMA_CONF0_TYPE) << 22; | 132 | return -EINVAL; |
88 | flags0 |= (dmaobj->conf0 & NV50_DMA_CONF0_PRIV); | 133 | priv->flags0 = (comp << 29) | (kind << 22) | (user << 20); |
89 | flags5 |= (dmaobj->conf0 & NV50_DMA_CONF0_PART); | 134 | priv->flags5 = (part << 16); |
90 | 135 | ||
91 | switch (dmaobj->target) { | 136 | switch (priv->base.target) { |
92 | case NV_MEM_TARGET_VM: | 137 | case NV_MEM_TARGET_VM: |
93 | flags0 |= 0x00000000; | 138 | priv->flags0 |= 0x00000000; |
94 | break; | 139 | break; |
95 | case NV_MEM_TARGET_VRAM: | 140 | case NV_MEM_TARGET_VRAM: |
96 | flags0 |= 0x00010000; | 141 | priv->flags0 |= 0x00010000; |
97 | break; | 142 | break; |
98 | case NV_MEM_TARGET_PCI: | 143 | case NV_MEM_TARGET_PCI: |
99 | flags0 |= 0x00020000; | 144 | priv->flags0 |= 0x00020000; |
100 | break; | 145 | break; |
101 | case NV_MEM_TARGET_PCI_NOSNOOP: | 146 | case NV_MEM_TARGET_PCI_NOSNOOP: |
102 | flags0 |= 0x00030000; | 147 | priv->flags0 |= 0x00030000; |
103 | break; | 148 | break; |
104 | default: | 149 | default: |
105 | return -EINVAL; | 150 | return -EINVAL; |
106 | } | 151 | } |
107 | 152 | ||
108 | switch (dmaobj->access) { | 153 | switch (priv->base.access) { |
109 | case NV_MEM_ACCESS_VM: | 154 | case NV_MEM_ACCESS_VM: |
110 | break; | 155 | break; |
111 | case NV_MEM_ACCESS_RO: | 156 | case NV_MEM_ACCESS_RO: |
112 | flags0 |= 0x00040000; | 157 | priv->flags0 |= 0x00040000; |
113 | break; | 158 | break; |
114 | case NV_MEM_ACCESS_WO: | 159 | case NV_MEM_ACCESS_WO: |
115 | case NV_MEM_ACCESS_RW: | 160 | case NV_MEM_ACCESS_RW: |
116 | flags0 |= 0x00080000; | 161 | priv->flags0 |= 0x00080000; |
117 | break; | 162 | break; |
163 | default: | ||
164 | return -EINVAL; | ||
118 | } | 165 | } |
119 | 166 | ||
120 | ret = nouveau_gpuobj_new(parent, parent, 24, 32, 0, pgpuobj); | 167 | return dmaeng->bind(&priv->base, nv_object(priv), (void *)pobject); |
121 | if (ret == 0) { | ||
122 | nv_wo32(*pgpuobj, 0x00, flags0); | ||
123 | nv_wo32(*pgpuobj, 0x04, lower_32_bits(dmaobj->limit)); | ||
124 | nv_wo32(*pgpuobj, 0x08, lower_32_bits(dmaobj->start)); | ||
125 | nv_wo32(*pgpuobj, 0x0c, upper_32_bits(dmaobj->limit) << 24 | | ||
126 | upper_32_bits(dmaobj->start)); | ||
127 | nv_wo32(*pgpuobj, 0x10, 0x00000000); | ||
128 | nv_wo32(*pgpuobj, 0x14, flags5); | ||
129 | } | ||
130 | |||
131 | return ret; | ||
132 | } | 168 | } |
133 | 169 | ||
134 | static int | 170 | static struct nouveau_ofuncs |
135 | nv50_dmaeng_ctor(struct nouveau_object *parent, struct nouveau_object *engine, | 171 | nv50_dmaobj_ofuncs = { |
136 | struct nouveau_oclass *oclass, void *data, u32 size, | 172 | .ctor = nv50_dmaobj_ctor, |
137 | struct nouveau_object **pobject) | 173 | .dtor = _nvkm_dmaobj_dtor, |
138 | { | 174 | .init = _nvkm_dmaobj_init, |
139 | struct nv50_dmaeng_priv *priv; | 175 | .fini = _nvkm_dmaobj_fini, |
140 | int ret; | 176 | }; |
141 | |||
142 | ret = nouveau_dmaeng_create(parent, engine, oclass, &priv); | ||
143 | *pobject = nv_object(priv); | ||
144 | if (ret) | ||
145 | return ret; | ||
146 | 177 | ||
147 | nv_engine(priv)->sclass = nouveau_dmaobj_sclass; | 178 | static struct nouveau_oclass |
148 | priv->base.bind = nv50_dmaobj_bind; | 179 | nv50_dmaeng_sclass[] = { |
149 | return 0; | 180 | { NV_DMA_FROM_MEMORY, &nv50_dmaobj_ofuncs }, |
150 | } | 181 | { NV_DMA_TO_MEMORY, &nv50_dmaobj_ofuncs }, |
182 | { NV_DMA_IN_MEMORY, &nv50_dmaobj_ofuncs }, | ||
183 | {} | ||
184 | }; | ||
151 | 185 | ||
152 | struct nouveau_oclass | 186 | struct nouveau_oclass * |
153 | nv50_dmaeng_oclass = { | 187 | nv50_dmaeng_oclass = &(struct nvkm_dmaeng_impl) { |
154 | .handle = NV_ENGINE(DMAOBJ, 0x50), | 188 | .base.handle = NV_ENGINE(DMAOBJ, 0x50), |
155 | .ofuncs = &(struct nouveau_ofuncs) { | 189 | .base.ofuncs = &(struct nouveau_ofuncs) { |
156 | .ctor = nv50_dmaeng_ctor, | 190 | .ctor = _nvkm_dmaeng_ctor, |
157 | .dtor = _nouveau_dmaeng_dtor, | 191 | .dtor = _nvkm_dmaeng_dtor, |
158 | .init = _nouveau_dmaeng_init, | 192 | .init = _nvkm_dmaeng_init, |
159 | .fini = _nouveau_dmaeng_fini, | 193 | .fini = _nvkm_dmaeng_fini, |
160 | }, | 194 | }, |
161 | }; | 195 | .sclass = nv50_dmaeng_sclass, |
196 | .bind = nv50_dmaobj_bind, | ||
197 | }.base; | ||
diff --git a/drivers/gpu/drm/nouveau/core/engine/dmaobj/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/dmaobj/nvc0.c index cd3970d03b80..88ec33b20048 100644 --- a/drivers/gpu/drm/nouveau/core/engine/dmaobj/nvc0.c +++ b/drivers/gpu/drm/nouveau/core/engine/dmaobj/nvc0.c | |||
@@ -22,32 +22,35 @@ | |||
22 | * Authors: Ben Skeggs | 22 | * Authors: Ben Skeggs |
23 | */ | 23 | */ |
24 | 24 | ||
25 | #include <core/client.h> | ||
25 | #include <core/device.h> | 26 | #include <core/device.h> |
26 | #include <core/gpuobj.h> | 27 | #include <core/gpuobj.h> |
27 | #include <core/class.h> | 28 | #include <nvif/unpack.h> |
29 | #include <nvif/class.h> | ||
28 | 30 | ||
29 | #include <subdev/fb.h> | 31 | #include <subdev/fb.h> |
30 | #include <engine/dmaobj.h> | ||
31 | 32 | ||
32 | struct nvc0_dmaeng_priv { | 33 | #include "priv.h" |
33 | struct nouveau_dmaeng base; | 34 | |
35 | struct nvc0_dmaobj_priv { | ||
36 | struct nouveau_dmaobj base; | ||
37 | u32 flags0; | ||
38 | u32 flags5; | ||
34 | }; | 39 | }; |
35 | 40 | ||
36 | static int | 41 | static int |
37 | nvc0_dmaobj_bind(struct nouveau_dmaeng *dmaeng, | 42 | nvc0_dmaobj_bind(struct nouveau_dmaobj *dmaobj, |
38 | struct nouveau_object *parent, | 43 | struct nouveau_object *parent, |
39 | struct nouveau_dmaobj *dmaobj, | ||
40 | struct nouveau_gpuobj **pgpuobj) | 44 | struct nouveau_gpuobj **pgpuobj) |
41 | { | 45 | { |
42 | u32 flags0 = nv_mclass(dmaobj); | 46 | struct nvc0_dmaobj_priv *priv = (void *)dmaobj; |
43 | u32 flags5 = 0x00000000; | ||
44 | int ret; | 47 | int ret; |
45 | 48 | ||
46 | if (!nv_iclass(parent, NV_ENGCTX_CLASS)) { | 49 | if (!nv_iclass(parent, NV_ENGCTX_CLASS)) { |
47 | switch (nv_mclass(parent->parent)) { | 50 | switch (nv_mclass(parent->parent)) { |
48 | case NVA3_DISP_MAST_CLASS: | 51 | case GT214_DISP_CORE_CHANNEL_DMA: |
49 | case NVA3_DISP_SYNC_CLASS: | 52 | case GT214_DISP_BASE_CHANNEL_DMA: |
50 | case NVA3_DISP_OVLY_CLASS: | 53 | case GT214_DISP_OVERLAY_CHANNEL_DMA: |
51 | break; | 54 | break; |
52 | default: | 55 | default: |
53 | return -EINVAL; | 56 | return -EINVAL; |
@@ -55,89 +58,122 @@ nvc0_dmaobj_bind(struct nouveau_dmaeng *dmaeng, | |||
55 | } else | 58 | } else |
56 | return 0; | 59 | return 0; |
57 | 60 | ||
58 | if (!(dmaobj->conf0 & NVC0_DMA_CONF0_ENABLE)) { | 61 | ret = nouveau_gpuobj_new(parent, parent, 24, 32, 0, pgpuobj); |
59 | if (dmaobj->target == NV_MEM_TARGET_VM) { | 62 | if (ret == 0) { |
60 | dmaobj->conf0 = NVC0_DMA_CONF0_PRIV_VM; | 63 | nv_wo32(*pgpuobj, 0x00, priv->flags0 | nv_mclass(dmaobj)); |
61 | dmaobj->conf0 |= NVC0_DMA_CONF0_TYPE_VM; | 64 | nv_wo32(*pgpuobj, 0x04, lower_32_bits(priv->base.limit)); |
65 | nv_wo32(*pgpuobj, 0x08, lower_32_bits(priv->base.start)); | ||
66 | nv_wo32(*pgpuobj, 0x0c, upper_32_bits(priv->base.limit) << 24 | | ||
67 | upper_32_bits(priv->base.start)); | ||
68 | nv_wo32(*pgpuobj, 0x10, 0x00000000); | ||
69 | nv_wo32(*pgpuobj, 0x14, priv->flags5); | ||
70 | } | ||
71 | |||
72 | return ret; | ||
73 | } | ||
74 | |||
75 | static int | ||
76 | nvc0_dmaobj_ctor(struct nouveau_object *parent, struct nouveau_object *engine, | ||
77 | struct nouveau_oclass *oclass, void *data, u32 size, | ||
78 | struct nouveau_object **pobject) | ||
79 | { | ||
80 | struct nouveau_dmaeng *dmaeng = (void *)engine; | ||
81 | union { | ||
82 | struct gf100_dma_v0 v0; | ||
83 | } *args; | ||
84 | struct nvc0_dmaobj_priv *priv; | ||
85 | u32 kind, user, unkn; | ||
86 | int ret; | ||
87 | |||
88 | ret = nvkm_dmaobj_create(parent, engine, oclass, &data, &size, &priv); | ||
89 | *pobject = nv_object(priv); | ||
90 | if (ret) | ||
91 | return ret; | ||
92 | args = data; | ||
93 | |||
94 | nv_ioctl(parent, "create gf100 dma size %d\n", size); | ||
95 | if (nvif_unpack(args->v0, 0, 0, false)) { | ||
96 | nv_ioctl(parent, "create gf100 dma vers %d priv %d kind %02x\n", | ||
97 | args->v0.version, args->v0.priv, args->v0.kind); | ||
98 | kind = args->v0.kind; | ||
99 | user = args->v0.priv; | ||
100 | unkn = 0; | ||
101 | } else | ||
102 | if (size == 0) { | ||
103 | if (priv->base.target != NV_MEM_TARGET_VM) { | ||
104 | kind = GF100_DMA_V0_KIND_PITCH; | ||
105 | user = GF100_DMA_V0_PRIV_US; | ||
106 | unkn = 2; | ||
62 | } else { | 107 | } else { |
63 | dmaobj->conf0 = NVC0_DMA_CONF0_PRIV_US; | 108 | kind = GF100_DMA_V0_KIND_VM; |
64 | dmaobj->conf0 |= NVC0_DMA_CONF0_TYPE_LINEAR; | 109 | user = GF100_DMA_V0_PRIV_VM; |
65 | dmaobj->conf0 |= 0x00020000; | 110 | unkn = 0; |
66 | } | 111 | } |
67 | } | 112 | } else |
113 | return ret; | ||
68 | 114 | ||
69 | flags0 |= (dmaobj->conf0 & NVC0_DMA_CONF0_TYPE) << 22; | 115 | if (user > 2) |
70 | flags0 |= (dmaobj->conf0 & NVC0_DMA_CONF0_PRIV); | 116 | return -EINVAL; |
71 | flags5 |= (dmaobj->conf0 & NVC0_DMA_CONF0_UNKN); | 117 | priv->flags0 |= (kind << 22) | (user << 20); |
118 | priv->flags5 |= (unkn << 16); | ||
72 | 119 | ||
73 | switch (dmaobj->target) { | 120 | switch (priv->base.target) { |
74 | case NV_MEM_TARGET_VM: | 121 | case NV_MEM_TARGET_VM: |
75 | flags0 |= 0x00000000; | 122 | priv->flags0 |= 0x00000000; |
76 | break; | 123 | break; |
77 | case NV_MEM_TARGET_VRAM: | 124 | case NV_MEM_TARGET_VRAM: |
78 | flags0 |= 0x00010000; | 125 | priv->flags0 |= 0x00010000; |
79 | break; | 126 | break; |
80 | case NV_MEM_TARGET_PCI: | 127 | case NV_MEM_TARGET_PCI: |
81 | flags0 |= 0x00020000; | 128 | priv->flags0 |= 0x00020000; |
82 | break; | 129 | break; |
83 | case NV_MEM_TARGET_PCI_NOSNOOP: | 130 | case NV_MEM_TARGET_PCI_NOSNOOP: |
84 | flags0 |= 0x00030000; | 131 | priv->flags0 |= 0x00030000; |
85 | break; | 132 | break; |
86 | default: | 133 | default: |
87 | return -EINVAL; | 134 | return -EINVAL; |
88 | } | 135 | } |
89 | 136 | ||
90 | switch (dmaobj->access) { | 137 | switch (priv->base.access) { |
91 | case NV_MEM_ACCESS_VM: | 138 | case NV_MEM_ACCESS_VM: |
92 | break; | 139 | break; |
93 | case NV_MEM_ACCESS_RO: | 140 | case NV_MEM_ACCESS_RO: |
94 | flags0 |= 0x00040000; | 141 | priv->flags0 |= 0x00040000; |
95 | break; | 142 | break; |
96 | case NV_MEM_ACCESS_WO: | 143 | case NV_MEM_ACCESS_WO: |
97 | case NV_MEM_ACCESS_RW: | 144 | case NV_MEM_ACCESS_RW: |
98 | flags0 |= 0x00080000; | 145 | priv->flags0 |= 0x00080000; |
99 | break; | 146 | break; |
100 | } | 147 | } |
101 | 148 | ||
102 | ret = nouveau_gpuobj_new(parent, parent, 24, 32, 0, pgpuobj); | 149 | return dmaeng->bind(&priv->base, nv_object(priv), (void *)pobject); |
103 | if (ret == 0) { | ||
104 | nv_wo32(*pgpuobj, 0x00, flags0); | ||
105 | nv_wo32(*pgpuobj, 0x04, lower_32_bits(dmaobj->limit)); | ||
106 | nv_wo32(*pgpuobj, 0x08, lower_32_bits(dmaobj->start)); | ||
107 | nv_wo32(*pgpuobj, 0x0c, upper_32_bits(dmaobj->limit) << 24 | | ||
108 | upper_32_bits(dmaobj->start)); | ||
109 | nv_wo32(*pgpuobj, 0x10, 0x00000000); | ||
110 | nv_wo32(*pgpuobj, 0x14, flags5); | ||
111 | } | ||
112 | |||
113 | return ret; | ||
114 | } | 150 | } |
115 | 151 | ||
116 | static int | 152 | static struct nouveau_ofuncs |
117 | nvc0_dmaeng_ctor(struct nouveau_object *parent, struct nouveau_object *engine, | 153 | nvc0_dmaobj_ofuncs = { |
118 | struct nouveau_oclass *oclass, void *data, u32 size, | 154 | .ctor = nvc0_dmaobj_ctor, |
119 | struct nouveau_object **pobject) | 155 | .dtor = _nvkm_dmaobj_dtor, |
120 | { | 156 | .init = _nvkm_dmaobj_init, |
121 | struct nvc0_dmaeng_priv *priv; | 157 | .fini = _nvkm_dmaobj_fini, |
122 | int ret; | 158 | }; |
123 | |||
124 | ret = nouveau_dmaeng_create(parent, engine, oclass, &priv); | ||
125 | *pobject = nv_object(priv); | ||
126 | if (ret) | ||
127 | return ret; | ||
128 | 159 | ||
129 | nv_engine(priv)->sclass = nouveau_dmaobj_sclass; | 160 | static struct nouveau_oclass |
130 | priv->base.bind = nvc0_dmaobj_bind; | 161 | nvc0_dmaeng_sclass[] = { |
131 | return 0; | 162 | { NV_DMA_FROM_MEMORY, &nvc0_dmaobj_ofuncs }, |
132 | } | 163 | { NV_DMA_TO_MEMORY, &nvc0_dmaobj_ofuncs }, |
164 | { NV_DMA_IN_MEMORY, &nvc0_dmaobj_ofuncs }, | ||
165 | {} | ||
166 | }; | ||
133 | 167 | ||
134 | struct nouveau_oclass | 168 | struct nouveau_oclass * |
135 | nvc0_dmaeng_oclass = { | 169 | nvc0_dmaeng_oclass = &(struct nvkm_dmaeng_impl) { |
136 | .handle = NV_ENGINE(DMAOBJ, 0xc0), | 170 | .base.handle = NV_ENGINE(DMAOBJ, 0xc0), |
137 | .ofuncs = &(struct nouveau_ofuncs) { | 171 | .base.ofuncs = &(struct nouveau_ofuncs) { |
138 | .ctor = nvc0_dmaeng_ctor, | 172 | .ctor = _nvkm_dmaeng_ctor, |
139 | .dtor = _nouveau_dmaeng_dtor, | 173 | .dtor = _nvkm_dmaeng_dtor, |
140 | .init = _nouveau_dmaeng_init, | 174 | .init = _nvkm_dmaeng_init, |
141 | .fini = _nouveau_dmaeng_fini, | 175 | .fini = _nvkm_dmaeng_fini, |
142 | }, | 176 | }, |
143 | }; | 177 | .sclass = nvc0_dmaeng_sclass, |
178 | .bind = nvc0_dmaobj_bind, | ||
179 | }.base; | ||
diff --git a/drivers/gpu/drm/nouveau/core/engine/dmaobj/nvd0.c b/drivers/gpu/drm/nouveau/core/engine/dmaobj/nvd0.c index 1cfb3bb90131..3fc4f0b0eaca 100644 --- a/drivers/gpu/drm/nouveau/core/engine/dmaobj/nvd0.c +++ b/drivers/gpu/drm/nouveau/core/engine/dmaobj/nvd0.c | |||
@@ -22,40 +22,40 @@ | |||
22 | * Authors: Ben Skeggs | 22 | * Authors: Ben Skeggs |
23 | */ | 23 | */ |
24 | 24 | ||
25 | #include <core/client.h> | ||
25 | #include <core/device.h> | 26 | #include <core/device.h> |
26 | #include <core/gpuobj.h> | 27 | #include <core/gpuobj.h> |
27 | #include <core/class.h> | 28 | #include <nvif/unpack.h> |
29 | #include <nvif/class.h> | ||
28 | 30 | ||
29 | #include <subdev/fb.h> | 31 | #include <subdev/fb.h> |
30 | #include <engine/dmaobj.h> | ||
31 | 32 | ||
32 | struct nvd0_dmaeng_priv { | 33 | #include "priv.h" |
33 | struct nouveau_dmaeng base; | 34 | |
35 | struct nvd0_dmaobj_priv { | ||
36 | struct nouveau_dmaobj base; | ||
37 | u32 flags0; | ||
34 | }; | 38 | }; |
35 | 39 | ||
36 | static int | 40 | static int |
37 | nvd0_dmaobj_bind(struct nouveau_dmaeng *dmaeng, | 41 | nvd0_dmaobj_bind(struct nouveau_dmaobj *dmaobj, |
38 | struct nouveau_object *parent, | 42 | struct nouveau_object *parent, |
39 | struct nouveau_dmaobj *dmaobj, | ||
40 | struct nouveau_gpuobj **pgpuobj) | 43 | struct nouveau_gpuobj **pgpuobj) |
41 | { | 44 | { |
42 | u32 flags0 = 0x00000000; | 45 | struct nvd0_dmaobj_priv *priv = (void *)dmaobj; |
43 | int ret; | 46 | int ret; |
44 | 47 | ||
45 | if (!nv_iclass(parent, NV_ENGCTX_CLASS)) { | 48 | if (!nv_iclass(parent, NV_ENGCTX_CLASS)) { |
46 | switch (nv_mclass(parent->parent)) { | 49 | switch (nv_mclass(parent->parent)) { |
47 | case NVD0_DISP_MAST_CLASS: | 50 | case GF110_DISP_CORE_CHANNEL_DMA: |
48 | case NVD0_DISP_SYNC_CLASS: | 51 | case GK104_DISP_CORE_CHANNEL_DMA: |
49 | case NVD0_DISP_OVLY_CLASS: | 52 | case GK110_DISP_CORE_CHANNEL_DMA: |
50 | case NVE0_DISP_MAST_CLASS: | 53 | case GM107_DISP_CORE_CHANNEL_DMA: |
51 | case NVE0_DISP_SYNC_CLASS: | 54 | case GF110_DISP_BASE_CHANNEL_DMA: |
52 | case NVE0_DISP_OVLY_CLASS: | 55 | case GK104_DISP_BASE_CHANNEL_DMA: |
53 | case NVF0_DISP_MAST_CLASS: | 56 | case GK110_DISP_BASE_CHANNEL_DMA: |
54 | case NVF0_DISP_SYNC_CLASS: | 57 | case GF110_DISP_OVERLAY_CONTROL_DMA: |
55 | case NVF0_DISP_OVLY_CLASS: | 58 | case GK104_DISP_OVERLAY_CONTROL_DMA: |
56 | case GM107_DISP_MAST_CLASS: | ||
57 | case GM107_DISP_SYNC_CLASS: | ||
58 | case GM107_DISP_OVLY_CLASS: | ||
59 | break; | 59 | break; |
60 | default: | 60 | default: |
61 | return -EINVAL; | 61 | return -EINVAL; |
@@ -63,33 +63,11 @@ nvd0_dmaobj_bind(struct nouveau_dmaeng *dmaeng, | |||
63 | } else | 63 | } else |
64 | return 0; | 64 | return 0; |
65 | 65 | ||
66 | if (!(dmaobj->conf0 & NVD0_DMA_CONF0_ENABLE)) { | ||
67 | if (dmaobj->target == NV_MEM_TARGET_VM) { | ||
68 | dmaobj->conf0 |= NVD0_DMA_CONF0_TYPE_VM; | ||
69 | dmaobj->conf0 |= NVD0_DMA_CONF0_PAGE_LP; | ||
70 | } else { | ||
71 | dmaobj->conf0 |= NVD0_DMA_CONF0_TYPE_LINEAR; | ||
72 | dmaobj->conf0 |= NVD0_DMA_CONF0_PAGE_SP; | ||
73 | } | ||
74 | } | ||
75 | |||
76 | flags0 |= (dmaobj->conf0 & NVD0_DMA_CONF0_TYPE) << 20; | ||
77 | flags0 |= (dmaobj->conf0 & NVD0_DMA_CONF0_PAGE) >> 4; | ||
78 | |||
79 | switch (dmaobj->target) { | ||
80 | case NV_MEM_TARGET_VRAM: | ||
81 | flags0 |= 0x00000009; | ||
82 | break; | ||
83 | default: | ||
84 | return -EINVAL; | ||
85 | break; | ||
86 | } | ||
87 | |||
88 | ret = nouveau_gpuobj_new(parent, parent, 24, 32, 0, pgpuobj); | 66 | ret = nouveau_gpuobj_new(parent, parent, 24, 32, 0, pgpuobj); |
89 | if (ret == 0) { | 67 | if (ret == 0) { |
90 | nv_wo32(*pgpuobj, 0x00, flags0); | 68 | nv_wo32(*pgpuobj, 0x00, priv->flags0); |
91 | nv_wo32(*pgpuobj, 0x04, dmaobj->start >> 8); | 69 | nv_wo32(*pgpuobj, 0x04, priv->base.start >> 8); |
92 | nv_wo32(*pgpuobj, 0x08, dmaobj->limit >> 8); | 70 | nv_wo32(*pgpuobj, 0x08, priv->base.limit >> 8); |
93 | nv_wo32(*pgpuobj, 0x0c, 0x00000000); | 71 | nv_wo32(*pgpuobj, 0x0c, 0x00000000); |
94 | nv_wo32(*pgpuobj, 0x10, 0x00000000); | 72 | nv_wo32(*pgpuobj, 0x10, 0x00000000); |
95 | nv_wo32(*pgpuobj, 0x14, 0x00000000); | 73 | nv_wo32(*pgpuobj, 0x14, 0x00000000); |
@@ -99,30 +77,91 @@ nvd0_dmaobj_bind(struct nouveau_dmaeng *dmaeng, | |||
99 | } | 77 | } |
100 | 78 | ||
101 | static int | 79 | static int |
102 | nvd0_dmaeng_ctor(struct nouveau_object *parent, struct nouveau_object *engine, | 80 | nvd0_dmaobj_ctor(struct nouveau_object *parent, struct nouveau_object *engine, |
103 | struct nouveau_oclass *oclass, void *data, u32 size, | 81 | struct nouveau_oclass *oclass, void *data, u32 size, |
104 | struct nouveau_object **pobject) | 82 | struct nouveau_object **pobject) |
105 | { | 83 | { |
106 | struct nvd0_dmaeng_priv *priv; | 84 | struct nouveau_dmaeng *dmaeng = (void *)engine; |
85 | union { | ||
86 | struct gf110_dma_v0 v0; | ||
87 | } *args; | ||
88 | struct nvd0_dmaobj_priv *priv; | ||
89 | u32 kind, page; | ||
107 | int ret; | 90 | int ret; |
108 | 91 | ||
109 | ret = nouveau_dmaeng_create(parent, engine, oclass, &priv); | 92 | ret = nvkm_dmaobj_create(parent, engine, oclass, &data, &size, &priv); |
110 | *pobject = nv_object(priv); | 93 | *pobject = nv_object(priv); |
111 | if (ret) | 94 | if (ret) |
112 | return ret; | 95 | return ret; |
96 | args = data; | ||
113 | 97 | ||
114 | nv_engine(priv)->sclass = nouveau_dmaobj_sclass; | 98 | nv_ioctl(parent, "create gf110 dma size %d\n", size); |
115 | priv->base.bind = nvd0_dmaobj_bind; | 99 | if (nvif_unpack(args->v0, 0, 0, false)) { |
116 | return 0; | 100 | nv_ioctl(parent, "create gf100 dma vers %d page %d kind %02x\n", |
101 | args->v0.version, args->v0.page, args->v0.kind); | ||
102 | kind = args->v0.kind; | ||
103 | page = args->v0.page; | ||
104 | } else | ||
105 | if (size == 0) { | ||
106 | if (priv->base.target != NV_MEM_TARGET_VM) { | ||
107 | kind = GF110_DMA_V0_KIND_PITCH; | ||
108 | page = GF110_DMA_V0_PAGE_SP; | ||
109 | } else { | ||
110 | kind = GF110_DMA_V0_KIND_VM; | ||
111 | page = GF110_DMA_V0_PAGE_LP; | ||
112 | } | ||
113 | } else | ||
114 | return ret; | ||
115 | |||
116 | if (page > 1) | ||
117 | return -EINVAL; | ||
118 | priv->flags0 = (kind << 20) | (page << 6); | ||
119 | |||
120 | switch (priv->base.target) { | ||
121 | case NV_MEM_TARGET_VRAM: | ||
122 | priv->flags0 |= 0x00000009; | ||
123 | break; | ||
124 | case NV_MEM_TARGET_VM: | ||
125 | case NV_MEM_TARGET_PCI: | ||
126 | case NV_MEM_TARGET_PCI_NOSNOOP: | ||
127 | /* XXX: don't currently know how to construct a real one | ||
128 | * of these. we only use them to represent pushbufs | ||
129 | * on these chipsets, and the classes that use them | ||
130 | * deal with the target themselves. | ||
131 | */ | ||
132 | break; | ||
133 | default: | ||
134 | return -EINVAL; | ||
135 | } | ||
136 | |||
137 | return dmaeng->bind(&priv->base, nv_object(priv), (void *)pobject); | ||
117 | } | 138 | } |
118 | 139 | ||
119 | struct nouveau_oclass | 140 | static struct nouveau_ofuncs |
120 | nvd0_dmaeng_oclass = { | 141 | nvd0_dmaobj_ofuncs = { |
121 | .handle = NV_ENGINE(DMAOBJ, 0xd0), | 142 | .ctor = nvd0_dmaobj_ctor, |
122 | .ofuncs = &(struct nouveau_ofuncs) { | 143 | .dtor = _nvkm_dmaobj_dtor, |
123 | .ctor = nvd0_dmaeng_ctor, | 144 | .init = _nvkm_dmaobj_init, |
124 | .dtor = _nouveau_dmaeng_dtor, | 145 | .fini = _nvkm_dmaobj_fini, |
125 | .init = _nouveau_dmaeng_init, | ||
126 | .fini = _nouveau_dmaeng_fini, | ||
127 | }, | ||
128 | }; | 146 | }; |
147 | |||
148 | static struct nouveau_oclass | ||
149 | nvd0_dmaeng_sclass[] = { | ||
150 | { NV_DMA_FROM_MEMORY, &nvd0_dmaobj_ofuncs }, | ||
151 | { NV_DMA_TO_MEMORY, &nvd0_dmaobj_ofuncs }, | ||
152 | { NV_DMA_IN_MEMORY, &nvd0_dmaobj_ofuncs }, | ||
153 | {} | ||
154 | }; | ||
155 | |||
156 | struct nouveau_oclass * | ||
157 | nvd0_dmaeng_oclass = &(struct nvkm_dmaeng_impl) { | ||
158 | .base.handle = NV_ENGINE(DMAOBJ, 0xd0), | ||
159 | .base.ofuncs = &(struct nouveau_ofuncs) { | ||
160 | .ctor = _nvkm_dmaeng_ctor, | ||
161 | .dtor = _nvkm_dmaeng_dtor, | ||
162 | .init = _nvkm_dmaeng_init, | ||
163 | .fini = _nvkm_dmaeng_fini, | ||
164 | }, | ||
165 | .sclass = nvd0_dmaeng_sclass, | ||
166 | .bind = nvd0_dmaobj_bind, | ||
167 | }.base; | ||
diff --git a/drivers/gpu/drm/nouveau/core/engine/dmaobj/priv.h b/drivers/gpu/drm/nouveau/core/engine/dmaobj/priv.h new file mode 100644 index 000000000000..36f743866937 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/engine/dmaobj/priv.h | |||
@@ -0,0 +1,30 @@ | |||
1 | #ifndef __NVKM_DMAOBJ_PRIV_H__ | ||
2 | #define __NVKM_DMAOBJ_PRIV_H__ | ||
3 | |||
4 | #include <engine/dmaobj.h> | ||
5 | |||
6 | #define nvkm_dmaobj_create(p,e,c,pa,sa,d) \ | ||
7 | nvkm_dmaobj_create_((p), (e), (c), (pa), (sa), sizeof(**d), (void **)d) | ||
8 | |||
9 | int nvkm_dmaobj_create_(struct nouveau_object *, struct nouveau_object *, | ||
10 | struct nouveau_oclass *, void **, u32 *, | ||
11 | int, void **); | ||
12 | #define _nvkm_dmaobj_dtor nouveau_object_destroy | ||
13 | #define _nvkm_dmaobj_init nouveau_object_init | ||
14 | #define _nvkm_dmaobj_fini nouveau_object_fini | ||
15 | |||
16 | int _nvkm_dmaeng_ctor(struct nouveau_object *, struct nouveau_object *, | ||
17 | struct nouveau_oclass *, void *, u32, | ||
18 | struct nouveau_object **); | ||
19 | #define _nvkm_dmaeng_dtor _nouveau_engine_dtor | ||
20 | #define _nvkm_dmaeng_init _nouveau_engine_init | ||
21 | #define _nvkm_dmaeng_fini _nouveau_engine_fini | ||
22 | |||
23 | struct nvkm_dmaeng_impl { | ||
24 | struct nouveau_oclass base; | ||
25 | struct nouveau_oclass *sclass; | ||
26 | int (*bind)(struct nouveau_dmaobj *, struct nouveau_object *, | ||
27 | struct nouveau_gpuobj **); | ||
28 | }; | ||
29 | |||
30 | #endif | ||
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/base.c b/drivers/gpu/drm/nouveau/core/engine/fifo/base.c index 56ed3d73bf8e..0f999fc45ab9 100644 --- a/drivers/gpu/drm/nouveau/core/engine/fifo/base.c +++ b/drivers/gpu/drm/nouveau/core/engine/fifo/base.c | |||
@@ -26,11 +26,30 @@ | |||
26 | #include <core/object.h> | 26 | #include <core/object.h> |
27 | #include <core/handle.h> | 27 | #include <core/handle.h> |
28 | #include <core/event.h> | 28 | #include <core/event.h> |
29 | #include <core/class.h> | 29 | #include <nvif/unpack.h> |
30 | #include <nvif/class.h> | ||
31 | #include <nvif/event.h> | ||
30 | 32 | ||
31 | #include <engine/dmaobj.h> | 33 | #include <engine/dmaobj.h> |
32 | #include <engine/fifo.h> | 34 | #include <engine/fifo.h> |
33 | 35 | ||
36 | static int | ||
37 | nouveau_fifo_event_ctor(void *data, u32 size, struct nvkm_notify *notify) | ||
38 | { | ||
39 | if (size == 0) { | ||
40 | notify->size = 0; | ||
41 | notify->types = 1; | ||
42 | notify->index = 0; | ||
43 | return 0; | ||
44 | } | ||
45 | return -ENOSYS; | ||
46 | } | ||
47 | |||
48 | static const struct nvkm_event_func | ||
49 | nouveau_fifo_event_func = { | ||
50 | .ctor = nouveau_fifo_event_ctor, | ||
51 | }; | ||
52 | |||
34 | int | 53 | int |
35 | nouveau_fifo_channel_create_(struct nouveau_object *parent, | 54 | nouveau_fifo_channel_create_(struct nouveau_object *parent, |
36 | struct nouveau_object *engine, | 55 | struct nouveau_object *engine, |
@@ -59,14 +78,14 @@ nouveau_fifo_channel_create_(struct nouveau_object *parent, | |||
59 | 78 | ||
60 | dmaeng = (void *)chan->pushdma->base.engine; | 79 | dmaeng = (void *)chan->pushdma->base.engine; |
61 | switch (chan->pushdma->base.oclass->handle) { | 80 | switch (chan->pushdma->base.oclass->handle) { |
62 | case NV_DMA_FROM_MEMORY_CLASS: | 81 | case NV_DMA_FROM_MEMORY: |
63 | case NV_DMA_IN_MEMORY_CLASS: | 82 | case NV_DMA_IN_MEMORY: |
64 | break; | 83 | break; |
65 | default: | 84 | default: |
66 | return -EINVAL; | 85 | return -EINVAL; |
67 | } | 86 | } |
68 | 87 | ||
69 | ret = dmaeng->bind(dmaeng, parent, chan->pushdma, &chan->pushgpu); | 88 | ret = dmaeng->bind(chan->pushdma, parent, &chan->pushgpu); |
70 | if (ret) | 89 | if (ret) |
71 | return ret; | 90 | return ret; |
72 | 91 | ||
@@ -85,15 +104,10 @@ nouveau_fifo_channel_create_(struct nouveau_object *parent, | |||
85 | return -ENOSPC; | 104 | return -ENOSPC; |
86 | } | 105 | } |
87 | 106 | ||
88 | /* map fifo control registers */ | 107 | chan->addr = nv_device_resource_start(device, bar) + |
89 | chan->user = ioremap(nv_device_resource_start(device, bar) + addr + | 108 | addr + size * chan->chid; |
90 | (chan->chid * size), size); | ||
91 | if (!chan->user) | ||
92 | return -EFAULT; | ||
93 | |||
94 | nouveau_event_trigger(priv->cevent, 1, 0); | ||
95 | |||
96 | chan->size = size; | 109 | chan->size = size; |
110 | nvkm_event_send(&priv->cevent, 1, 0, NULL, 0); | ||
97 | return 0; | 111 | return 0; |
98 | } | 112 | } |
99 | 113 | ||
@@ -103,7 +117,8 @@ nouveau_fifo_channel_destroy(struct nouveau_fifo_chan *chan) | |||
103 | struct nouveau_fifo *priv = (void *)nv_object(chan)->engine; | 117 | struct nouveau_fifo *priv = (void *)nv_object(chan)->engine; |
104 | unsigned long flags; | 118 | unsigned long flags; |
105 | 119 | ||
106 | iounmap(chan->user); | 120 | if (chan->user) |
121 | iounmap(chan->user); | ||
107 | 122 | ||
108 | spin_lock_irqsave(&priv->lock, flags); | 123 | spin_lock_irqsave(&priv->lock, flags); |
109 | priv->channel[chan->chid] = NULL; | 124 | priv->channel[chan->chid] = NULL; |
@@ -121,10 +136,24 @@ _nouveau_fifo_channel_dtor(struct nouveau_object *object) | |||
121 | nouveau_fifo_channel_destroy(chan); | 136 | nouveau_fifo_channel_destroy(chan); |
122 | } | 137 | } |
123 | 138 | ||
139 | int | ||
140 | _nouveau_fifo_channel_map(struct nouveau_object *object, u64 *addr, u32 *size) | ||
141 | { | ||
142 | struct nouveau_fifo_chan *chan = (void *)object; | ||
143 | *addr = chan->addr; | ||
144 | *size = chan->size; | ||
145 | return 0; | ||
146 | } | ||
147 | |||
124 | u32 | 148 | u32 |
125 | _nouveau_fifo_channel_rd32(struct nouveau_object *object, u64 addr) | 149 | _nouveau_fifo_channel_rd32(struct nouveau_object *object, u64 addr) |
126 | { | 150 | { |
127 | struct nouveau_fifo_chan *chan = (void *)object; | 151 | struct nouveau_fifo_chan *chan = (void *)object; |
152 | if (unlikely(!chan->user)) { | ||
153 | chan->user = ioremap(chan->addr, chan->size); | ||
154 | if (WARN_ON_ONCE(chan->user == NULL)) | ||
155 | return 0; | ||
156 | } | ||
128 | return ioread32_native(chan->user + addr); | 157 | return ioread32_native(chan->user + addr); |
129 | } | 158 | } |
130 | 159 | ||
@@ -132,9 +161,57 @@ void | |||
132 | _nouveau_fifo_channel_wr32(struct nouveau_object *object, u64 addr, u32 data) | 161 | _nouveau_fifo_channel_wr32(struct nouveau_object *object, u64 addr, u32 data) |
133 | { | 162 | { |
134 | struct nouveau_fifo_chan *chan = (void *)object; | 163 | struct nouveau_fifo_chan *chan = (void *)object; |
164 | if (unlikely(!chan->user)) { | ||
165 | chan->user = ioremap(chan->addr, chan->size); | ||
166 | if (WARN_ON_ONCE(chan->user == NULL)) | ||
167 | return; | ||
168 | } | ||
135 | iowrite32_native(data, chan->user + addr); | 169 | iowrite32_native(data, chan->user + addr); |
136 | } | 170 | } |
137 | 171 | ||
172 | int | ||
173 | nouveau_fifo_uevent_ctor(void *data, u32 size, struct nvkm_notify *notify) | ||
174 | { | ||
175 | union { | ||
176 | struct nvif_notify_uevent_req none; | ||
177 | } *req = data; | ||
178 | int ret; | ||
179 | |||
180 | if (nvif_unvers(req->none)) { | ||
181 | notify->size = sizeof(struct nvif_notify_uevent_rep); | ||
182 | notify->types = 1; | ||
183 | notify->index = 0; | ||
184 | } | ||
185 | |||
186 | return ret; | ||
187 | } | ||
188 | |||
189 | void | ||
190 | nouveau_fifo_uevent(struct nouveau_fifo *fifo) | ||
191 | { | ||
192 | struct nvif_notify_uevent_rep rep = { | ||
193 | }; | ||
194 | nvkm_event_send(&fifo->uevent, 1, 0, &rep, sizeof(rep)); | ||
195 | } | ||
196 | |||
197 | int | ||
198 | _nouveau_fifo_channel_ntfy(struct nouveau_object *object, u32 type, | ||
199 | struct nvkm_event **event) | ||
200 | { | ||
201 | struct nouveau_fifo *fifo = (void *)object->engine; | ||
202 | switch (type) { | ||
203 | case G82_CHANNEL_DMA_V0_NTFY_UEVENT: | ||
204 | if (nv_mclass(object) >= G82_CHANNEL_DMA) { | ||
205 | *event = &fifo->uevent; | ||
206 | return 0; | ||
207 | } | ||
208 | break; | ||
209 | default: | ||
210 | break; | ||
211 | } | ||
212 | return -EINVAL; | ||
213 | } | ||
214 | |||
138 | static int | 215 | static int |
139 | nouveau_fifo_chid(struct nouveau_fifo *priv, struct nouveau_object *object) | 216 | nouveau_fifo_chid(struct nouveau_fifo *priv, struct nouveau_object *object) |
140 | { | 217 | { |
@@ -168,8 +245,8 @@ void | |||
168 | nouveau_fifo_destroy(struct nouveau_fifo *priv) | 245 | nouveau_fifo_destroy(struct nouveau_fifo *priv) |
169 | { | 246 | { |
170 | kfree(priv->channel); | 247 | kfree(priv->channel); |
171 | nouveau_event_destroy(&priv->uevent); | 248 | nvkm_event_fini(&priv->uevent); |
172 | nouveau_event_destroy(&priv->cevent); | 249 | nvkm_event_fini(&priv->cevent); |
173 | nouveau_engine_destroy(&priv->base); | 250 | nouveau_engine_destroy(&priv->base); |
174 | } | 251 | } |
175 | 252 | ||
@@ -194,11 +271,7 @@ nouveau_fifo_create_(struct nouveau_object *parent, | |||
194 | if (!priv->channel) | 271 | if (!priv->channel) |
195 | return -ENOMEM; | 272 | return -ENOMEM; |
196 | 273 | ||
197 | ret = nouveau_event_create(1, 1, &priv->cevent); | 274 | ret = nvkm_event_init(&nouveau_fifo_event_func, 1, 1, &priv->cevent); |
198 | if (ret) | ||
199 | return ret; | ||
200 | |||
201 | ret = nouveau_event_create(1, 1, &priv->uevent); | ||
202 | if (ret) | 275 | if (ret) |
203 | return ret; | 276 | return ret; |
204 | 277 | ||
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv04.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nv04.c index c61b16a63884..5ae6a43893b5 100644 --- a/drivers/gpu/drm/nouveau/core/engine/fifo/nv04.c +++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv04.c | |||
@@ -22,8 +22,9 @@ | |||
22 | * Authors: Ben Skeggs | 22 | * Authors: Ben Skeggs |
23 | */ | 23 | */ |
24 | 24 | ||
25 | #include <core/os.h> | 25 | #include <core/client.h> |
26 | #include <core/class.h> | 26 | #include <nvif/unpack.h> |
27 | #include <nvif/class.h> | ||
27 | #include <core/engctx.h> | 28 | #include <core/engctx.h> |
28 | #include <core/namedb.h> | 29 | #include <core/namedb.h> |
29 | #include <core/handle.h> | 30 | #include <core/handle.h> |
@@ -117,16 +118,23 @@ nv04_fifo_chan_ctor(struct nouveau_object *parent, | |||
117 | struct nouveau_oclass *oclass, void *data, u32 size, | 118 | struct nouveau_oclass *oclass, void *data, u32 size, |
118 | struct nouveau_object **pobject) | 119 | struct nouveau_object **pobject) |
119 | { | 120 | { |
121 | union { | ||
122 | struct nv03_channel_dma_v0 v0; | ||
123 | } *args = data; | ||
120 | struct nv04_fifo_priv *priv = (void *)engine; | 124 | struct nv04_fifo_priv *priv = (void *)engine; |
121 | struct nv04_fifo_chan *chan; | 125 | struct nv04_fifo_chan *chan; |
122 | struct nv03_channel_dma_class *args = data; | ||
123 | int ret; | 126 | int ret; |
124 | 127 | ||
125 | if (size < sizeof(*args)) | 128 | nv_ioctl(parent, "create channel dma size %d\n", size); |
126 | return -EINVAL; | 129 | if (nvif_unpack(args->v0, 0, 0, false)) { |
130 | nv_ioctl(parent, "create channel dma vers %d pushbuf %08x " | ||
131 | "offset %016llx\n", args->v0.version, | ||
132 | args->v0.pushbuf, args->v0.offset); | ||
133 | } else | ||
134 | return ret; | ||
127 | 135 | ||
128 | ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0x800000, | 136 | ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0x800000, |
129 | 0x10000, args->pushbuf, | 137 | 0x10000, args->v0.pushbuf, |
130 | (1ULL << NVDEV_ENGINE_DMAOBJ) | | 138 | (1ULL << NVDEV_ENGINE_DMAOBJ) | |
131 | (1ULL << NVDEV_ENGINE_SW) | | 139 | (1ULL << NVDEV_ENGINE_SW) | |
132 | (1ULL << NVDEV_ENGINE_GR), &chan); | 140 | (1ULL << NVDEV_ENGINE_GR), &chan); |
@@ -134,13 +142,15 @@ nv04_fifo_chan_ctor(struct nouveau_object *parent, | |||
134 | if (ret) | 142 | if (ret) |
135 | return ret; | 143 | return ret; |
136 | 144 | ||
145 | args->v0.chid = chan->base.chid; | ||
146 | |||
137 | nv_parent(chan)->object_attach = nv04_fifo_object_attach; | 147 | nv_parent(chan)->object_attach = nv04_fifo_object_attach; |
138 | nv_parent(chan)->object_detach = nv04_fifo_object_detach; | 148 | nv_parent(chan)->object_detach = nv04_fifo_object_detach; |
139 | nv_parent(chan)->context_attach = nv04_fifo_context_attach; | 149 | nv_parent(chan)->context_attach = nv04_fifo_context_attach; |
140 | chan->ramfc = chan->base.chid * 32; | 150 | chan->ramfc = chan->base.chid * 32; |
141 | 151 | ||
142 | nv_wo32(priv->ramfc, chan->ramfc + 0x00, args->offset); | 152 | nv_wo32(priv->ramfc, chan->ramfc + 0x00, args->v0.offset); |
143 | nv_wo32(priv->ramfc, chan->ramfc + 0x04, args->offset); | 153 | nv_wo32(priv->ramfc, chan->ramfc + 0x04, args->v0.offset); |
144 | nv_wo32(priv->ramfc, chan->ramfc + 0x08, chan->base.pushgpu->addr >> 4); | 154 | nv_wo32(priv->ramfc, chan->ramfc + 0x08, chan->base.pushgpu->addr >> 4); |
145 | nv_wo32(priv->ramfc, chan->ramfc + 0x10, | 155 | nv_wo32(priv->ramfc, chan->ramfc + 0x10, |
146 | NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES | | 156 | NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES | |
@@ -242,13 +252,15 @@ nv04_fifo_ofuncs = { | |||
242 | .dtor = nv04_fifo_chan_dtor, | 252 | .dtor = nv04_fifo_chan_dtor, |
243 | .init = nv04_fifo_chan_init, | 253 | .init = nv04_fifo_chan_init, |
244 | .fini = nv04_fifo_chan_fini, | 254 | .fini = nv04_fifo_chan_fini, |
255 | .map = _nouveau_fifo_channel_map, | ||
245 | .rd32 = _nouveau_fifo_channel_rd32, | 256 | .rd32 = _nouveau_fifo_channel_rd32, |
246 | .wr32 = _nouveau_fifo_channel_wr32, | 257 | .wr32 = _nouveau_fifo_channel_wr32, |
258 | .ntfy = _nouveau_fifo_channel_ntfy | ||
247 | }; | 259 | }; |
248 | 260 | ||
249 | static struct nouveau_oclass | 261 | static struct nouveau_oclass |
250 | nv04_fifo_sclass[] = { | 262 | nv04_fifo_sclass[] = { |
251 | { NV03_CHANNEL_DMA_CLASS, &nv04_fifo_ofuncs }, | 263 | { NV03_CHANNEL_DMA, &nv04_fifo_ofuncs }, |
252 | {} | 264 | {} |
253 | }; | 265 | }; |
254 | 266 | ||
@@ -539,7 +551,7 @@ nv04_fifo_intr(struct nouveau_subdev *subdev) | |||
539 | } | 551 | } |
540 | 552 | ||
541 | if (status & 0x40000000) { | 553 | if (status & 0x40000000) { |
542 | nouveau_event_trigger(priv->base.uevent, 1, 0); | 554 | nouveau_fifo_uevent(&priv->base); |
543 | nv_wr32(priv, 0x002100, 0x40000000); | 555 | nv_wr32(priv, 0x002100, 0x40000000); |
544 | status &= ~0x40000000; | 556 | status &= ~0x40000000; |
545 | } | 557 | } |
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv10.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nv10.c index 571a22aa1ae5..2a32add51c81 100644 --- a/drivers/gpu/drm/nouveau/core/engine/fifo/nv10.c +++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv10.c | |||
@@ -22,8 +22,9 @@ | |||
22 | * Authors: Ben Skeggs | 22 | * Authors: Ben Skeggs |
23 | */ | 23 | */ |
24 | 24 | ||
25 | #include <core/os.h> | 25 | #include <core/client.h> |
26 | #include <core/class.h> | 26 | #include <nvif/unpack.h> |
27 | #include <nvif/class.h> | ||
27 | #include <core/engctx.h> | 28 | #include <core/engctx.h> |
28 | #include <core/ramht.h> | 29 | #include <core/ramht.h> |
29 | 30 | ||
@@ -59,16 +60,23 @@ nv10_fifo_chan_ctor(struct nouveau_object *parent, | |||
59 | struct nouveau_oclass *oclass, void *data, u32 size, | 60 | struct nouveau_oclass *oclass, void *data, u32 size, |
60 | struct nouveau_object **pobject) | 61 | struct nouveau_object **pobject) |
61 | { | 62 | { |
63 | union { | ||
64 | struct nv03_channel_dma_v0 v0; | ||
65 | } *args = data; | ||
62 | struct nv04_fifo_priv *priv = (void *)engine; | 66 | struct nv04_fifo_priv *priv = (void *)engine; |
63 | struct nv04_fifo_chan *chan; | 67 | struct nv04_fifo_chan *chan; |
64 | struct nv03_channel_dma_class *args = data; | ||
65 | int ret; | 68 | int ret; |
66 | 69 | ||
67 | if (size < sizeof(*args)) | 70 | nv_ioctl(parent, "create channel dma size %d\n", size); |
68 | return -EINVAL; | 71 | if (nvif_unpack(args->v0, 0, 0, false)) { |
72 | nv_ioctl(parent, "create channel dma vers %d pushbuf %08x " | ||
73 | "offset %016llx\n", args->v0.version, | ||
74 | args->v0.pushbuf, args->v0.offset); | ||
75 | } else | ||
76 | return ret; | ||
69 | 77 | ||
70 | ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0x800000, | 78 | ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0x800000, |
71 | 0x10000, args->pushbuf, | 79 | 0x10000, args->v0.pushbuf, |
72 | (1ULL << NVDEV_ENGINE_DMAOBJ) | | 80 | (1ULL << NVDEV_ENGINE_DMAOBJ) | |
73 | (1ULL << NVDEV_ENGINE_SW) | | 81 | (1ULL << NVDEV_ENGINE_SW) | |
74 | (1ULL << NVDEV_ENGINE_GR), &chan); | 82 | (1ULL << NVDEV_ENGINE_GR), &chan); |
@@ -76,13 +84,15 @@ nv10_fifo_chan_ctor(struct nouveau_object *parent, | |||
76 | if (ret) | 84 | if (ret) |
77 | return ret; | 85 | return ret; |
78 | 86 | ||
87 | args->v0.chid = chan->base.chid; | ||
88 | |||
79 | nv_parent(chan)->object_attach = nv04_fifo_object_attach; | 89 | nv_parent(chan)->object_attach = nv04_fifo_object_attach; |
80 | nv_parent(chan)->object_detach = nv04_fifo_object_detach; | 90 | nv_parent(chan)->object_detach = nv04_fifo_object_detach; |
81 | nv_parent(chan)->context_attach = nv04_fifo_context_attach; | 91 | nv_parent(chan)->context_attach = nv04_fifo_context_attach; |
82 | chan->ramfc = chan->base.chid * 32; | 92 | chan->ramfc = chan->base.chid * 32; |
83 | 93 | ||
84 | nv_wo32(priv->ramfc, chan->ramfc + 0x00, args->offset); | 94 | nv_wo32(priv->ramfc, chan->ramfc + 0x00, args->v0.offset); |
85 | nv_wo32(priv->ramfc, chan->ramfc + 0x04, args->offset); | 95 | nv_wo32(priv->ramfc, chan->ramfc + 0x04, args->v0.offset); |
86 | nv_wo32(priv->ramfc, chan->ramfc + 0x0c, chan->base.pushgpu->addr >> 4); | 96 | nv_wo32(priv->ramfc, chan->ramfc + 0x0c, chan->base.pushgpu->addr >> 4); |
87 | nv_wo32(priv->ramfc, chan->ramfc + 0x14, | 97 | nv_wo32(priv->ramfc, chan->ramfc + 0x14, |
88 | NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES | | 98 | NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES | |
@@ -100,13 +110,15 @@ nv10_fifo_ofuncs = { | |||
100 | .dtor = nv04_fifo_chan_dtor, | 110 | .dtor = nv04_fifo_chan_dtor, |
101 | .init = nv04_fifo_chan_init, | 111 | .init = nv04_fifo_chan_init, |
102 | .fini = nv04_fifo_chan_fini, | 112 | .fini = nv04_fifo_chan_fini, |
113 | .map = _nouveau_fifo_channel_map, | ||
103 | .rd32 = _nouveau_fifo_channel_rd32, | 114 | .rd32 = _nouveau_fifo_channel_rd32, |
104 | .wr32 = _nouveau_fifo_channel_wr32, | 115 | .wr32 = _nouveau_fifo_channel_wr32, |
116 | .ntfy = _nouveau_fifo_channel_ntfy | ||
105 | }; | 117 | }; |
106 | 118 | ||
107 | static struct nouveau_oclass | 119 | static struct nouveau_oclass |
108 | nv10_fifo_sclass[] = { | 120 | nv10_fifo_sclass[] = { |
109 | { NV10_CHANNEL_DMA_CLASS, &nv10_fifo_ofuncs }, | 121 | { NV10_CHANNEL_DMA, &nv10_fifo_ofuncs }, |
110 | {} | 122 | {} |
111 | }; | 123 | }; |
112 | 124 | ||
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv17.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nv17.c index f25760209316..12d76c8adb23 100644 --- a/drivers/gpu/drm/nouveau/core/engine/fifo/nv17.c +++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv17.c | |||
@@ -22,8 +22,9 @@ | |||
22 | * Authors: Ben Skeggs | 22 | * Authors: Ben Skeggs |
23 | */ | 23 | */ |
24 | 24 | ||
25 | #include <core/os.h> | 25 | #include <core/client.h> |
26 | #include <core/class.h> | 26 | #include <nvif/unpack.h> |
27 | #include <nvif/class.h> | ||
27 | #include <core/engctx.h> | 28 | #include <core/engctx.h> |
28 | #include <core/ramht.h> | 29 | #include <core/ramht.h> |
29 | 30 | ||
@@ -64,16 +65,23 @@ nv17_fifo_chan_ctor(struct nouveau_object *parent, | |||
64 | struct nouveau_oclass *oclass, void *data, u32 size, | 65 | struct nouveau_oclass *oclass, void *data, u32 size, |
65 | struct nouveau_object **pobject) | 66 | struct nouveau_object **pobject) |
66 | { | 67 | { |
68 | union { | ||
69 | struct nv03_channel_dma_v0 v0; | ||
70 | } *args = data; | ||
67 | struct nv04_fifo_priv *priv = (void *)engine; | 71 | struct nv04_fifo_priv *priv = (void *)engine; |
68 | struct nv04_fifo_chan *chan; | 72 | struct nv04_fifo_chan *chan; |
69 | struct nv03_channel_dma_class *args = data; | ||
70 | int ret; | 73 | int ret; |
71 | 74 | ||
72 | if (size < sizeof(*args)) | 75 | nv_ioctl(parent, "create channel dma size %d\n", size); |
73 | return -EINVAL; | 76 | if (nvif_unpack(args->v0, 0, 0, false)) { |
77 | nv_ioctl(parent, "create channel dma vers %d pushbuf %08x " | ||
78 | "offset %016llx\n", args->v0.version, | ||
79 | args->v0.pushbuf, args->v0.offset); | ||
80 | } else | ||
81 | return ret; | ||
74 | 82 | ||
75 | ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0x800000, | 83 | ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0x800000, |
76 | 0x10000, args->pushbuf, | 84 | 0x10000, args->v0.pushbuf, |
77 | (1ULL << NVDEV_ENGINE_DMAOBJ) | | 85 | (1ULL << NVDEV_ENGINE_DMAOBJ) | |
78 | (1ULL << NVDEV_ENGINE_SW) | | 86 | (1ULL << NVDEV_ENGINE_SW) | |
79 | (1ULL << NVDEV_ENGINE_GR) | | 87 | (1ULL << NVDEV_ENGINE_GR) | |
@@ -83,13 +91,15 @@ nv17_fifo_chan_ctor(struct nouveau_object *parent, | |||
83 | if (ret) | 91 | if (ret) |
84 | return ret; | 92 | return ret; |
85 | 93 | ||
94 | args->v0.chid = chan->base.chid; | ||
95 | |||
86 | nv_parent(chan)->object_attach = nv04_fifo_object_attach; | 96 | nv_parent(chan)->object_attach = nv04_fifo_object_attach; |
87 | nv_parent(chan)->object_detach = nv04_fifo_object_detach; | 97 | nv_parent(chan)->object_detach = nv04_fifo_object_detach; |
88 | nv_parent(chan)->context_attach = nv04_fifo_context_attach; | 98 | nv_parent(chan)->context_attach = nv04_fifo_context_attach; |
89 | chan->ramfc = chan->base.chid * 64; | 99 | chan->ramfc = chan->base.chid * 64; |
90 | 100 | ||
91 | nv_wo32(priv->ramfc, chan->ramfc + 0x00, args->offset); | 101 | nv_wo32(priv->ramfc, chan->ramfc + 0x00, args->v0.offset); |
92 | nv_wo32(priv->ramfc, chan->ramfc + 0x04, args->offset); | 102 | nv_wo32(priv->ramfc, chan->ramfc + 0x04, args->v0.offset); |
93 | nv_wo32(priv->ramfc, chan->ramfc + 0x0c, chan->base.pushgpu->addr >> 4); | 103 | nv_wo32(priv->ramfc, chan->ramfc + 0x0c, chan->base.pushgpu->addr >> 4); |
94 | nv_wo32(priv->ramfc, chan->ramfc + 0x14, | 104 | nv_wo32(priv->ramfc, chan->ramfc + 0x14, |
95 | NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES | | 105 | NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES | |
@@ -107,13 +117,15 @@ nv17_fifo_ofuncs = { | |||
107 | .dtor = nv04_fifo_chan_dtor, | 117 | .dtor = nv04_fifo_chan_dtor, |
108 | .init = nv04_fifo_chan_init, | 118 | .init = nv04_fifo_chan_init, |
109 | .fini = nv04_fifo_chan_fini, | 119 | .fini = nv04_fifo_chan_fini, |
120 | .map = _nouveau_fifo_channel_map, | ||
110 | .rd32 = _nouveau_fifo_channel_rd32, | 121 | .rd32 = _nouveau_fifo_channel_rd32, |
111 | .wr32 = _nouveau_fifo_channel_wr32, | 122 | .wr32 = _nouveau_fifo_channel_wr32, |
123 | .ntfy = _nouveau_fifo_channel_ntfy | ||
112 | }; | 124 | }; |
113 | 125 | ||
114 | static struct nouveau_oclass | 126 | static struct nouveau_oclass |
115 | nv17_fifo_sclass[] = { | 127 | nv17_fifo_sclass[] = { |
116 | { NV17_CHANNEL_DMA_CLASS, &nv17_fifo_ofuncs }, | 128 | { NV17_CHANNEL_DMA, &nv17_fifo_ofuncs }, |
117 | {} | 129 | {} |
118 | }; | 130 | }; |
119 | 131 | ||
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv40.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nv40.c index 343487ed2238..9f49c3a24dc6 100644 --- a/drivers/gpu/drm/nouveau/core/engine/fifo/nv40.c +++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv40.c | |||
@@ -22,8 +22,9 @@ | |||
22 | * Authors: Ben Skeggs | 22 | * Authors: Ben Skeggs |
23 | */ | 23 | */ |
24 | 24 | ||
25 | #include <core/os.h> | 25 | #include <core/client.h> |
26 | #include <core/class.h> | 26 | #include <nvif/unpack.h> |
27 | #include <nvif/class.h> | ||
27 | #include <core/engctx.h> | 28 | #include <core/engctx.h> |
28 | #include <core/ramht.h> | 29 | #include <core/ramht.h> |
29 | 30 | ||
@@ -182,16 +183,23 @@ nv40_fifo_chan_ctor(struct nouveau_object *parent, | |||
182 | struct nouveau_oclass *oclass, void *data, u32 size, | 183 | struct nouveau_oclass *oclass, void *data, u32 size, |
183 | struct nouveau_object **pobject) | 184 | struct nouveau_object **pobject) |
184 | { | 185 | { |
186 | union { | ||
187 | struct nv03_channel_dma_v0 v0; | ||
188 | } *args = data; | ||
185 | struct nv04_fifo_priv *priv = (void *)engine; | 189 | struct nv04_fifo_priv *priv = (void *)engine; |
186 | struct nv04_fifo_chan *chan; | 190 | struct nv04_fifo_chan *chan; |
187 | struct nv03_channel_dma_class *args = data; | ||
188 | int ret; | 191 | int ret; |
189 | 192 | ||
190 | if (size < sizeof(*args)) | 193 | nv_ioctl(parent, "create channel dma size %d\n", size); |
191 | return -EINVAL; | 194 | if (nvif_unpack(args->v0, 0, 0, false)) { |
195 | nv_ioctl(parent, "create channel dma vers %d pushbuf %08x " | ||
196 | "offset %016llx\n", args->v0.version, | ||
197 | args->v0.pushbuf, args->v0.offset); | ||
198 | } else | ||
199 | return ret; | ||
192 | 200 | ||
193 | ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0xc00000, | 201 | ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0xc00000, |
194 | 0x1000, args->pushbuf, | 202 | 0x1000, args->v0.pushbuf, |
195 | (1ULL << NVDEV_ENGINE_DMAOBJ) | | 203 | (1ULL << NVDEV_ENGINE_DMAOBJ) | |
196 | (1ULL << NVDEV_ENGINE_SW) | | 204 | (1ULL << NVDEV_ENGINE_SW) | |
197 | (1ULL << NVDEV_ENGINE_GR) | | 205 | (1ULL << NVDEV_ENGINE_GR) | |
@@ -200,14 +208,16 @@ nv40_fifo_chan_ctor(struct nouveau_object *parent, | |||
200 | if (ret) | 208 | if (ret) |
201 | return ret; | 209 | return ret; |
202 | 210 | ||
211 | args->v0.chid = chan->base.chid; | ||
212 | |||
203 | nv_parent(chan)->context_attach = nv40_fifo_context_attach; | 213 | nv_parent(chan)->context_attach = nv40_fifo_context_attach; |
204 | nv_parent(chan)->context_detach = nv40_fifo_context_detach; | 214 | nv_parent(chan)->context_detach = nv40_fifo_context_detach; |
205 | nv_parent(chan)->object_attach = nv40_fifo_object_attach; | 215 | nv_parent(chan)->object_attach = nv40_fifo_object_attach; |
206 | nv_parent(chan)->object_detach = nv04_fifo_object_detach; | 216 | nv_parent(chan)->object_detach = nv04_fifo_object_detach; |
207 | chan->ramfc = chan->base.chid * 128; | 217 | chan->ramfc = chan->base.chid * 128; |
208 | 218 | ||
209 | nv_wo32(priv->ramfc, chan->ramfc + 0x00, args->offset); | 219 | nv_wo32(priv->ramfc, chan->ramfc + 0x00, args->v0.offset); |
210 | nv_wo32(priv->ramfc, chan->ramfc + 0x04, args->offset); | 220 | nv_wo32(priv->ramfc, chan->ramfc + 0x04, args->v0.offset); |
211 | nv_wo32(priv->ramfc, chan->ramfc + 0x0c, chan->base.pushgpu->addr >> 4); | 221 | nv_wo32(priv->ramfc, chan->ramfc + 0x0c, chan->base.pushgpu->addr >> 4); |
212 | nv_wo32(priv->ramfc, chan->ramfc + 0x18, 0x30000000 | | 222 | nv_wo32(priv->ramfc, chan->ramfc + 0x18, 0x30000000 | |
213 | NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES | | 223 | NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES | |
@@ -226,13 +236,15 @@ nv40_fifo_ofuncs = { | |||
226 | .dtor = nv04_fifo_chan_dtor, | 236 | .dtor = nv04_fifo_chan_dtor, |
227 | .init = nv04_fifo_chan_init, | 237 | .init = nv04_fifo_chan_init, |
228 | .fini = nv04_fifo_chan_fini, | 238 | .fini = nv04_fifo_chan_fini, |
239 | .map = _nouveau_fifo_channel_map, | ||
229 | .rd32 = _nouveau_fifo_channel_rd32, | 240 | .rd32 = _nouveau_fifo_channel_rd32, |
230 | .wr32 = _nouveau_fifo_channel_wr32, | 241 | .wr32 = _nouveau_fifo_channel_wr32, |
242 | .ntfy = _nouveau_fifo_channel_ntfy | ||
231 | }; | 243 | }; |
232 | 244 | ||
233 | static struct nouveau_oclass | 245 | static struct nouveau_oclass |
234 | nv40_fifo_sclass[] = { | 246 | nv40_fifo_sclass[] = { |
235 | { NV40_CHANNEL_DMA_CLASS, &nv40_fifo_ofuncs }, | 247 | { NV40_CHANNEL_DMA, &nv40_fifo_ofuncs }, |
236 | {} | 248 | {} |
237 | }; | 249 | }; |
238 | 250 | ||
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c index e6352bd5b4ff..5d1e86bc244c 100644 --- a/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c +++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c | |||
@@ -25,7 +25,8 @@ | |||
25 | #include <core/client.h> | 25 | #include <core/client.h> |
26 | #include <core/engctx.h> | 26 | #include <core/engctx.h> |
27 | #include <core/ramht.h> | 27 | #include <core/ramht.h> |
28 | #include <core/class.h> | 28 | #include <nvif/unpack.h> |
29 | #include <nvif/class.h> | ||
29 | 30 | ||
30 | #include <subdev/timer.h> | 31 | #include <subdev/timer.h> |
31 | #include <subdev/bar.h> | 32 | #include <subdev/bar.h> |
@@ -194,17 +195,24 @@ nv50_fifo_chan_ctor_dma(struct nouveau_object *parent, | |||
194 | struct nouveau_oclass *oclass, void *data, u32 size, | 195 | struct nouveau_oclass *oclass, void *data, u32 size, |
195 | struct nouveau_object **pobject) | 196 | struct nouveau_object **pobject) |
196 | { | 197 | { |
198 | union { | ||
199 | struct nv03_channel_dma_v0 v0; | ||
200 | } *args = data; | ||
197 | struct nouveau_bar *bar = nouveau_bar(parent); | 201 | struct nouveau_bar *bar = nouveau_bar(parent); |
198 | struct nv50_fifo_base *base = (void *)parent; | 202 | struct nv50_fifo_base *base = (void *)parent; |
199 | struct nv50_fifo_chan *chan; | 203 | struct nv50_fifo_chan *chan; |
200 | struct nv03_channel_dma_class *args = data; | ||
201 | int ret; | 204 | int ret; |
202 | 205 | ||
203 | if (size < sizeof(*args)) | 206 | nv_ioctl(parent, "create channel dma size %d\n", size); |
204 | return -EINVAL; | 207 | if (nvif_unpack(args->v0, 0, 0, false)) { |
208 | nv_ioctl(parent, "create channel dma vers %d pushbuf %08x " | ||
209 | "offset %016llx\n", args->v0.version, | ||
210 | args->v0.pushbuf, args->v0.offset); | ||
211 | } else | ||
212 | return ret; | ||
205 | 213 | ||
206 | ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0xc00000, | 214 | ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0xc00000, |
207 | 0x2000, args->pushbuf, | 215 | 0x2000, args->v0.pushbuf, |
208 | (1ULL << NVDEV_ENGINE_DMAOBJ) | | 216 | (1ULL << NVDEV_ENGINE_DMAOBJ) | |
209 | (1ULL << NVDEV_ENGINE_SW) | | 217 | (1ULL << NVDEV_ENGINE_SW) | |
210 | (1ULL << NVDEV_ENGINE_GR) | | 218 | (1ULL << NVDEV_ENGINE_GR) | |
@@ -213,6 +221,8 @@ nv50_fifo_chan_ctor_dma(struct nouveau_object *parent, | |||
213 | if (ret) | 221 | if (ret) |
214 | return ret; | 222 | return ret; |
215 | 223 | ||
224 | args->v0.chid = chan->base.chid; | ||
225 | |||
216 | nv_parent(chan)->context_attach = nv50_fifo_context_attach; | 226 | nv_parent(chan)->context_attach = nv50_fifo_context_attach; |
217 | nv_parent(chan)->context_detach = nv50_fifo_context_detach; | 227 | nv_parent(chan)->context_detach = nv50_fifo_context_detach; |
218 | nv_parent(chan)->object_attach = nv50_fifo_object_attach; | 228 | nv_parent(chan)->object_attach = nv50_fifo_object_attach; |
@@ -223,10 +233,10 @@ nv50_fifo_chan_ctor_dma(struct nouveau_object *parent, | |||
223 | if (ret) | 233 | if (ret) |
224 | return ret; | 234 | return ret; |
225 | 235 | ||
226 | nv_wo32(base->ramfc, 0x08, lower_32_bits(args->offset)); | 236 | nv_wo32(base->ramfc, 0x08, lower_32_bits(args->v0.offset)); |
227 | nv_wo32(base->ramfc, 0x0c, upper_32_bits(args->offset)); | 237 | nv_wo32(base->ramfc, 0x0c, upper_32_bits(args->v0.offset)); |
228 | nv_wo32(base->ramfc, 0x10, lower_32_bits(args->offset)); | 238 | nv_wo32(base->ramfc, 0x10, lower_32_bits(args->v0.offset)); |
229 | nv_wo32(base->ramfc, 0x14, upper_32_bits(args->offset)); | 239 | nv_wo32(base->ramfc, 0x14, upper_32_bits(args->v0.offset)); |
230 | nv_wo32(base->ramfc, 0x3c, 0x003f6078); | 240 | nv_wo32(base->ramfc, 0x3c, 0x003f6078); |
231 | nv_wo32(base->ramfc, 0x44, 0x01003fff); | 241 | nv_wo32(base->ramfc, 0x44, 0x01003fff); |
232 | nv_wo32(base->ramfc, 0x48, chan->base.pushgpu->node->offset >> 4); | 242 | nv_wo32(base->ramfc, 0x48, chan->base.pushgpu->node->offset >> 4); |
@@ -247,18 +257,26 @@ nv50_fifo_chan_ctor_ind(struct nouveau_object *parent, | |||
247 | struct nouveau_oclass *oclass, void *data, u32 size, | 257 | struct nouveau_oclass *oclass, void *data, u32 size, |
248 | struct nouveau_object **pobject) | 258 | struct nouveau_object **pobject) |
249 | { | 259 | { |
250 | struct nv50_channel_ind_class *args = data; | 260 | union { |
261 | struct nv50_channel_gpfifo_v0 v0; | ||
262 | } *args = data; | ||
251 | struct nouveau_bar *bar = nouveau_bar(parent); | 263 | struct nouveau_bar *bar = nouveau_bar(parent); |
252 | struct nv50_fifo_base *base = (void *)parent; | 264 | struct nv50_fifo_base *base = (void *)parent; |
253 | struct nv50_fifo_chan *chan; | 265 | struct nv50_fifo_chan *chan; |
254 | u64 ioffset, ilength; | 266 | u64 ioffset, ilength; |
255 | int ret; | 267 | int ret; |
256 | 268 | ||
257 | if (size < sizeof(*args)) | 269 | nv_ioctl(parent, "create channel gpfifo size %d\n", size); |
258 | return -EINVAL; | 270 | if (nvif_unpack(args->v0, 0, 0, false)) { |
271 | nv_ioctl(parent, "create channel gpfifo vers %d pushbuf %08x " | ||
272 | "ioffset %016llx ilength %08x\n", | ||
273 | args->v0.version, args->v0.pushbuf, args->v0.ioffset, | ||
274 | args->v0.ilength); | ||
275 | } else | ||
276 | return ret; | ||
259 | 277 | ||
260 | ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0xc00000, | 278 | ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0xc00000, |
261 | 0x2000, args->pushbuf, | 279 | 0x2000, args->v0.pushbuf, |
262 | (1ULL << NVDEV_ENGINE_DMAOBJ) | | 280 | (1ULL << NVDEV_ENGINE_DMAOBJ) | |
263 | (1ULL << NVDEV_ENGINE_SW) | | 281 | (1ULL << NVDEV_ENGINE_SW) | |
264 | (1ULL << NVDEV_ENGINE_GR) | | 282 | (1ULL << NVDEV_ENGINE_GR) | |
@@ -267,6 +285,8 @@ nv50_fifo_chan_ctor_ind(struct nouveau_object *parent, | |||
267 | if (ret) | 285 | if (ret) |
268 | return ret; | 286 | return ret; |
269 | 287 | ||
288 | args->v0.chid = chan->base.chid; | ||
289 | |||
270 | nv_parent(chan)->context_attach = nv50_fifo_context_attach; | 290 | nv_parent(chan)->context_attach = nv50_fifo_context_attach; |
271 | nv_parent(chan)->context_detach = nv50_fifo_context_detach; | 291 | nv_parent(chan)->context_detach = nv50_fifo_context_detach; |
272 | nv_parent(chan)->object_attach = nv50_fifo_object_attach; | 292 | nv_parent(chan)->object_attach = nv50_fifo_object_attach; |
@@ -277,8 +297,8 @@ nv50_fifo_chan_ctor_ind(struct nouveau_object *parent, | |||
277 | if (ret) | 297 | if (ret) |
278 | return ret; | 298 | return ret; |
279 | 299 | ||
280 | ioffset = args->ioffset; | 300 | ioffset = args->v0.ioffset; |
281 | ilength = order_base_2(args->ilength / 8); | 301 | ilength = order_base_2(args->v0.ilength / 8); |
282 | 302 | ||
283 | nv_wo32(base->ramfc, 0x3c, 0x403f6078); | 303 | nv_wo32(base->ramfc, 0x3c, 0x403f6078); |
284 | nv_wo32(base->ramfc, 0x44, 0x01003fff); | 304 | nv_wo32(base->ramfc, 0x44, 0x01003fff); |
@@ -343,8 +363,10 @@ nv50_fifo_ofuncs_dma = { | |||
343 | .dtor = nv50_fifo_chan_dtor, | 363 | .dtor = nv50_fifo_chan_dtor, |
344 | .init = nv50_fifo_chan_init, | 364 | .init = nv50_fifo_chan_init, |
345 | .fini = nv50_fifo_chan_fini, | 365 | .fini = nv50_fifo_chan_fini, |
366 | .map = _nouveau_fifo_channel_map, | ||
346 | .rd32 = _nouveau_fifo_channel_rd32, | 367 | .rd32 = _nouveau_fifo_channel_rd32, |
347 | .wr32 = _nouveau_fifo_channel_wr32, | 368 | .wr32 = _nouveau_fifo_channel_wr32, |
369 | .ntfy = _nouveau_fifo_channel_ntfy | ||
348 | }; | 370 | }; |
349 | 371 | ||
350 | static struct nouveau_ofuncs | 372 | static struct nouveau_ofuncs |
@@ -353,14 +375,16 @@ nv50_fifo_ofuncs_ind = { | |||
353 | .dtor = nv50_fifo_chan_dtor, | 375 | .dtor = nv50_fifo_chan_dtor, |
354 | .init = nv50_fifo_chan_init, | 376 | .init = nv50_fifo_chan_init, |
355 | .fini = nv50_fifo_chan_fini, | 377 | .fini = nv50_fifo_chan_fini, |
378 | .map = _nouveau_fifo_channel_map, | ||
356 | .rd32 = _nouveau_fifo_channel_rd32, | 379 | .rd32 = _nouveau_fifo_channel_rd32, |
357 | .wr32 = _nouveau_fifo_channel_wr32, | 380 | .wr32 = _nouveau_fifo_channel_wr32, |
381 | .ntfy = _nouveau_fifo_channel_ntfy | ||
358 | }; | 382 | }; |
359 | 383 | ||
360 | static struct nouveau_oclass | 384 | static struct nouveau_oclass |
361 | nv50_fifo_sclass[] = { | 385 | nv50_fifo_sclass[] = { |
362 | { NV50_CHANNEL_DMA_CLASS, &nv50_fifo_ofuncs_dma }, | 386 | { NV50_CHANNEL_DMA, &nv50_fifo_ofuncs_dma }, |
363 | { NV50_CHANNEL_IND_CLASS, &nv50_fifo_ofuncs_ind }, | 387 | { NV50_CHANNEL_GPFIFO, &nv50_fifo_ofuncs_ind }, |
364 | {} | 388 | {} |
365 | }; | 389 | }; |
366 | 390 | ||
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c index 6e5ac16e5460..1f42996b354a 100644 --- a/drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c +++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c | |||
@@ -27,7 +27,8 @@ | |||
27 | #include <core/engctx.h> | 27 | #include <core/engctx.h> |
28 | #include <core/ramht.h> | 28 | #include <core/ramht.h> |
29 | #include <core/event.h> | 29 | #include <core/event.h> |
30 | #include <core/class.h> | 30 | #include <nvif/unpack.h> |
31 | #include <nvif/class.h> | ||
31 | 32 | ||
32 | #include <subdev/timer.h> | 33 | #include <subdev/timer.h> |
33 | #include <subdev/bar.h> | 34 | #include <subdev/bar.h> |
@@ -160,17 +161,24 @@ nv84_fifo_chan_ctor_dma(struct nouveau_object *parent, | |||
160 | struct nouveau_oclass *oclass, void *data, u32 size, | 161 | struct nouveau_oclass *oclass, void *data, u32 size, |
161 | struct nouveau_object **pobject) | 162 | struct nouveau_object **pobject) |
162 | { | 163 | { |
164 | union { | ||
165 | struct nv03_channel_dma_v0 v0; | ||
166 | } *args = data; | ||
163 | struct nouveau_bar *bar = nouveau_bar(parent); | 167 | struct nouveau_bar *bar = nouveau_bar(parent); |
164 | struct nv50_fifo_base *base = (void *)parent; | 168 | struct nv50_fifo_base *base = (void *)parent; |
165 | struct nv50_fifo_chan *chan; | 169 | struct nv50_fifo_chan *chan; |
166 | struct nv03_channel_dma_class *args = data; | ||
167 | int ret; | 170 | int ret; |
168 | 171 | ||
169 | if (size < sizeof(*args)) | 172 | nv_ioctl(parent, "create channel dma size %d\n", size); |
170 | return -EINVAL; | 173 | if (nvif_unpack(args->v0, 0, 0, false)) { |
174 | nv_ioctl(parent, "create channel dma vers %d pushbuf %08x " | ||
175 | "offset %016llx\n", args->v0.version, | ||
176 | args->v0.pushbuf, args->v0.offset); | ||
177 | } else | ||
178 | return ret; | ||
171 | 179 | ||
172 | ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0xc00000, | 180 | ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0xc00000, |
173 | 0x2000, args->pushbuf, | 181 | 0x2000, args->v0.pushbuf, |
174 | (1ULL << NVDEV_ENGINE_DMAOBJ) | | 182 | (1ULL << NVDEV_ENGINE_DMAOBJ) | |
175 | (1ULL << NVDEV_ENGINE_SW) | | 183 | (1ULL << NVDEV_ENGINE_SW) | |
176 | (1ULL << NVDEV_ENGINE_GR) | | 184 | (1ULL << NVDEV_ENGINE_GR) | |
@@ -186,6 +194,8 @@ nv84_fifo_chan_ctor_dma(struct nouveau_object *parent, | |||
186 | if (ret) | 194 | if (ret) |
187 | return ret; | 195 | return ret; |
188 | 196 | ||
197 | args->v0.chid = chan->base.chid; | ||
198 | |||
189 | ret = nouveau_ramht_new(nv_object(chan), nv_object(chan), 0x8000, 16, | 199 | ret = nouveau_ramht_new(nv_object(chan), nv_object(chan), 0x8000, 16, |
190 | &chan->ramht); | 200 | &chan->ramht); |
191 | if (ret) | 201 | if (ret) |
@@ -196,10 +206,10 @@ nv84_fifo_chan_ctor_dma(struct nouveau_object *parent, | |||
196 | nv_parent(chan)->object_attach = nv84_fifo_object_attach; | 206 | nv_parent(chan)->object_attach = nv84_fifo_object_attach; |
197 | nv_parent(chan)->object_detach = nv50_fifo_object_detach; | 207 | nv_parent(chan)->object_detach = nv50_fifo_object_detach; |
198 | 208 | ||
199 | nv_wo32(base->ramfc, 0x08, lower_32_bits(args->offset)); | 209 | nv_wo32(base->ramfc, 0x08, lower_32_bits(args->v0.offset)); |
200 | nv_wo32(base->ramfc, 0x0c, upper_32_bits(args->offset)); | 210 | nv_wo32(base->ramfc, 0x0c, upper_32_bits(args->v0.offset)); |
201 | nv_wo32(base->ramfc, 0x10, lower_32_bits(args->offset)); | 211 | nv_wo32(base->ramfc, 0x10, lower_32_bits(args->v0.offset)); |
202 | nv_wo32(base->ramfc, 0x14, upper_32_bits(args->offset)); | 212 | nv_wo32(base->ramfc, 0x14, upper_32_bits(args->v0.offset)); |
203 | nv_wo32(base->ramfc, 0x3c, 0x003f6078); | 213 | nv_wo32(base->ramfc, 0x3c, 0x003f6078); |
204 | nv_wo32(base->ramfc, 0x44, 0x01003fff); | 214 | nv_wo32(base->ramfc, 0x44, 0x01003fff); |
205 | nv_wo32(base->ramfc, 0x48, chan->base.pushgpu->node->offset >> 4); | 215 | nv_wo32(base->ramfc, 0x48, chan->base.pushgpu->node->offset >> 4); |
@@ -222,18 +232,26 @@ nv84_fifo_chan_ctor_ind(struct nouveau_object *parent, | |||
222 | struct nouveau_oclass *oclass, void *data, u32 size, | 232 | struct nouveau_oclass *oclass, void *data, u32 size, |
223 | struct nouveau_object **pobject) | 233 | struct nouveau_object **pobject) |
224 | { | 234 | { |
235 | union { | ||
236 | struct nv50_channel_gpfifo_v0 v0; | ||
237 | } *args = data; | ||
225 | struct nouveau_bar *bar = nouveau_bar(parent); | 238 | struct nouveau_bar *bar = nouveau_bar(parent); |
226 | struct nv50_fifo_base *base = (void *)parent; | 239 | struct nv50_fifo_base *base = (void *)parent; |
227 | struct nv50_fifo_chan *chan; | 240 | struct nv50_fifo_chan *chan; |
228 | struct nv50_channel_ind_class *args = data; | ||
229 | u64 ioffset, ilength; | 241 | u64 ioffset, ilength; |
230 | int ret; | 242 | int ret; |
231 | 243 | ||
232 | if (size < sizeof(*args)) | 244 | nv_ioctl(parent, "create channel gpfifo size %d\n", size); |
233 | return -EINVAL; | 245 | if (nvif_unpack(args->v0, 0, 0, false)) { |
246 | nv_ioctl(parent, "create channel gpfifo vers %d pushbuf %08x " | ||
247 | "ioffset %016llx ilength %08x\n", | ||
248 | args->v0.version, args->v0.pushbuf, args->v0.ioffset, | ||
249 | args->v0.ilength); | ||
250 | } else | ||
251 | return ret; | ||
234 | 252 | ||
235 | ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0xc00000, | 253 | ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0xc00000, |
236 | 0x2000, args->pushbuf, | 254 | 0x2000, args->v0.pushbuf, |
237 | (1ULL << NVDEV_ENGINE_DMAOBJ) | | 255 | (1ULL << NVDEV_ENGINE_DMAOBJ) | |
238 | (1ULL << NVDEV_ENGINE_SW) | | 256 | (1ULL << NVDEV_ENGINE_SW) | |
239 | (1ULL << NVDEV_ENGINE_GR) | | 257 | (1ULL << NVDEV_ENGINE_GR) | |
@@ -249,6 +267,8 @@ nv84_fifo_chan_ctor_ind(struct nouveau_object *parent, | |||
249 | if (ret) | 267 | if (ret) |
250 | return ret; | 268 | return ret; |
251 | 269 | ||
270 | args->v0.chid = chan->base.chid; | ||
271 | |||
252 | ret = nouveau_ramht_new(nv_object(chan), nv_object(chan), 0x8000, 16, | 272 | ret = nouveau_ramht_new(nv_object(chan), nv_object(chan), 0x8000, 16, |
253 | &chan->ramht); | 273 | &chan->ramht); |
254 | if (ret) | 274 | if (ret) |
@@ -259,8 +279,8 @@ nv84_fifo_chan_ctor_ind(struct nouveau_object *parent, | |||
259 | nv_parent(chan)->object_attach = nv84_fifo_object_attach; | 279 | nv_parent(chan)->object_attach = nv84_fifo_object_attach; |
260 | nv_parent(chan)->object_detach = nv50_fifo_object_detach; | 280 | nv_parent(chan)->object_detach = nv50_fifo_object_detach; |
261 | 281 | ||
262 | ioffset = args->ioffset; | 282 | ioffset = args->v0.ioffset; |
263 | ilength = order_base_2(args->ilength / 8); | 283 | ilength = order_base_2(args->v0.ilength / 8); |
264 | 284 | ||
265 | nv_wo32(base->ramfc, 0x3c, 0x403f6078); | 285 | nv_wo32(base->ramfc, 0x3c, 0x403f6078); |
266 | nv_wo32(base->ramfc, 0x44, 0x01003fff); | 286 | nv_wo32(base->ramfc, 0x44, 0x01003fff); |
@@ -304,8 +324,10 @@ nv84_fifo_ofuncs_dma = { | |||
304 | .dtor = nv50_fifo_chan_dtor, | 324 | .dtor = nv50_fifo_chan_dtor, |
305 | .init = nv84_fifo_chan_init, | 325 | .init = nv84_fifo_chan_init, |
306 | .fini = nv50_fifo_chan_fini, | 326 | .fini = nv50_fifo_chan_fini, |
327 | .map = _nouveau_fifo_channel_map, | ||
307 | .rd32 = _nouveau_fifo_channel_rd32, | 328 | .rd32 = _nouveau_fifo_channel_rd32, |
308 | .wr32 = _nouveau_fifo_channel_wr32, | 329 | .wr32 = _nouveau_fifo_channel_wr32, |
330 | .ntfy = _nouveau_fifo_channel_ntfy | ||
309 | }; | 331 | }; |
310 | 332 | ||
311 | static struct nouveau_ofuncs | 333 | static struct nouveau_ofuncs |
@@ -314,14 +336,16 @@ nv84_fifo_ofuncs_ind = { | |||
314 | .dtor = nv50_fifo_chan_dtor, | 336 | .dtor = nv50_fifo_chan_dtor, |
315 | .init = nv84_fifo_chan_init, | 337 | .init = nv84_fifo_chan_init, |
316 | .fini = nv50_fifo_chan_fini, | 338 | .fini = nv50_fifo_chan_fini, |
339 | .map = _nouveau_fifo_channel_map, | ||
317 | .rd32 = _nouveau_fifo_channel_rd32, | 340 | .rd32 = _nouveau_fifo_channel_rd32, |
318 | .wr32 = _nouveau_fifo_channel_wr32, | 341 | .wr32 = _nouveau_fifo_channel_wr32, |
342 | .ntfy = _nouveau_fifo_channel_ntfy | ||
319 | }; | 343 | }; |
320 | 344 | ||
321 | static struct nouveau_oclass | 345 | static struct nouveau_oclass |
322 | nv84_fifo_sclass[] = { | 346 | nv84_fifo_sclass[] = { |
323 | { NV84_CHANNEL_DMA_CLASS, &nv84_fifo_ofuncs_dma }, | 347 | { G82_CHANNEL_DMA, &nv84_fifo_ofuncs_dma }, |
324 | { NV84_CHANNEL_IND_CLASS, &nv84_fifo_ofuncs_ind }, | 348 | { G82_CHANNEL_GPFIFO, &nv84_fifo_ofuncs_ind }, |
325 | {} | 349 | {} |
326 | }; | 350 | }; |
327 | 351 | ||
@@ -389,19 +413,26 @@ nv84_fifo_cclass = { | |||
389 | ******************************************************************************/ | 413 | ******************************************************************************/ |
390 | 414 | ||
391 | static void | 415 | static void |
392 | nv84_fifo_uevent_enable(struct nouveau_event *event, int type, int index) | 416 | nv84_fifo_uevent_init(struct nvkm_event *event, int type, int index) |
393 | { | 417 | { |
394 | struct nv84_fifo_priv *priv = event->priv; | 418 | struct nouveau_fifo *fifo = container_of(event, typeof(*fifo), uevent); |
395 | nv_mask(priv, 0x002140, 0x40000000, 0x40000000); | 419 | nv_mask(fifo, 0x002140, 0x40000000, 0x40000000); |
396 | } | 420 | } |
397 | 421 | ||
398 | static void | 422 | static void |
399 | nv84_fifo_uevent_disable(struct nouveau_event *event, int type, int index) | 423 | nv84_fifo_uevent_fini(struct nvkm_event *event, int type, int index) |
400 | { | 424 | { |
401 | struct nv84_fifo_priv *priv = event->priv; | 425 | struct nouveau_fifo *fifo = container_of(event, typeof(*fifo), uevent); |
402 | nv_mask(priv, 0x002140, 0x40000000, 0x00000000); | 426 | nv_mask(fifo, 0x002140, 0x40000000, 0x00000000); |
403 | } | 427 | } |
404 | 428 | ||
429 | static const struct nvkm_event_func | ||
430 | nv84_fifo_uevent_func = { | ||
431 | .ctor = nouveau_fifo_uevent_ctor, | ||
432 | .init = nv84_fifo_uevent_init, | ||
433 | .fini = nv84_fifo_uevent_fini, | ||
434 | }; | ||
435 | |||
405 | static int | 436 | static int |
406 | nv84_fifo_ctor(struct nouveau_object *parent, struct nouveau_object *engine, | 437 | nv84_fifo_ctor(struct nouveau_object *parent, struct nouveau_object *engine, |
407 | struct nouveau_oclass *oclass, void *data, u32 size, | 438 | struct nouveau_oclass *oclass, void *data, u32 size, |
@@ -425,9 +456,9 @@ nv84_fifo_ctor(struct nouveau_object *parent, struct nouveau_object *engine, | |||
425 | if (ret) | 456 | if (ret) |
426 | return ret; | 457 | return ret; |
427 | 458 | ||
428 | priv->base.uevent->enable = nv84_fifo_uevent_enable; | 459 | ret = nvkm_event_init(&nv84_fifo_uevent_func, 1, 1, &priv->base.uevent); |
429 | priv->base.uevent->disable = nv84_fifo_uevent_disable; | 460 | if (ret) |
430 | priv->base.uevent->priv = priv; | 461 | return ret; |
431 | 462 | ||
432 | nv_subdev(priv)->unit = 0x00000100; | 463 | nv_subdev(priv)->unit = 0x00000100; |
433 | nv_subdev(priv)->intr = nv04_fifo_intr; | 464 | nv_subdev(priv)->intr = nv04_fifo_intr; |
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c index ae4a4dc5642a..1fe1f8fbda0c 100644 --- a/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c +++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c | |||
@@ -28,7 +28,8 @@ | |||
28 | #include <core/gpuobj.h> | 28 | #include <core/gpuobj.h> |
29 | #include <core/engctx.h> | 29 | #include <core/engctx.h> |
30 | #include <core/event.h> | 30 | #include <core/event.h> |
31 | #include <core/class.h> | 31 | #include <nvif/unpack.h> |
32 | #include <nvif/class.h> | ||
32 | #include <core/enum.h> | 33 | #include <core/enum.h> |
33 | 34 | ||
34 | #include <subdev/timer.h> | 35 | #include <subdev/timer.h> |
@@ -187,20 +188,28 @@ nvc0_fifo_chan_ctor(struct nouveau_object *parent, | |||
187 | struct nouveau_oclass *oclass, void *data, u32 size, | 188 | struct nouveau_oclass *oclass, void *data, u32 size, |
188 | struct nouveau_object **pobject) | 189 | struct nouveau_object **pobject) |
189 | { | 190 | { |
191 | union { | ||
192 | struct nv50_channel_gpfifo_v0 v0; | ||
193 | } *args = data; | ||
190 | struct nouveau_bar *bar = nouveau_bar(parent); | 194 | struct nouveau_bar *bar = nouveau_bar(parent); |
191 | struct nvc0_fifo_priv *priv = (void *)engine; | 195 | struct nvc0_fifo_priv *priv = (void *)engine; |
192 | struct nvc0_fifo_base *base = (void *)parent; | 196 | struct nvc0_fifo_base *base = (void *)parent; |
193 | struct nvc0_fifo_chan *chan; | 197 | struct nvc0_fifo_chan *chan; |
194 | struct nv50_channel_ind_class *args = data; | ||
195 | u64 usermem, ioffset, ilength; | 198 | u64 usermem, ioffset, ilength; |
196 | int ret, i; | 199 | int ret, i; |
197 | 200 | ||
198 | if (size < sizeof(*args)) | 201 | nv_ioctl(parent, "create channel gpfifo size %d\n", size); |
199 | return -EINVAL; | 202 | if (nvif_unpack(args->v0, 0, 0, false)) { |
203 | nv_ioctl(parent, "create channel gpfifo vers %d pushbuf %08x " | ||
204 | "ioffset %016llx ilength %08x\n", | ||
205 | args->v0.version, args->v0.pushbuf, args->v0.ioffset, | ||
206 | args->v0.ilength); | ||
207 | } else | ||
208 | return ret; | ||
200 | 209 | ||
201 | ret = nouveau_fifo_channel_create(parent, engine, oclass, 1, | 210 | ret = nouveau_fifo_channel_create(parent, engine, oclass, 1, |
202 | priv->user.bar.offset, 0x1000, | 211 | priv->user.bar.offset, 0x1000, |
203 | args->pushbuf, | 212 | args->v0.pushbuf, |
204 | (1ULL << NVDEV_ENGINE_SW) | | 213 | (1ULL << NVDEV_ENGINE_SW) | |
205 | (1ULL << NVDEV_ENGINE_GR) | | 214 | (1ULL << NVDEV_ENGINE_GR) | |
206 | (1ULL << NVDEV_ENGINE_COPY0) | | 215 | (1ULL << NVDEV_ENGINE_COPY0) | |
@@ -212,12 +221,14 @@ nvc0_fifo_chan_ctor(struct nouveau_object *parent, | |||
212 | if (ret) | 221 | if (ret) |
213 | return ret; | 222 | return ret; |
214 | 223 | ||
224 | args->v0.chid = chan->base.chid; | ||
225 | |||
215 | nv_parent(chan)->context_attach = nvc0_fifo_context_attach; | 226 | nv_parent(chan)->context_attach = nvc0_fifo_context_attach; |
216 | nv_parent(chan)->context_detach = nvc0_fifo_context_detach; | 227 | nv_parent(chan)->context_detach = nvc0_fifo_context_detach; |
217 | 228 | ||
218 | usermem = chan->base.chid * 0x1000; | 229 | usermem = chan->base.chid * 0x1000; |
219 | ioffset = args->ioffset; | 230 | ioffset = args->v0.ioffset; |
220 | ilength = order_base_2(args->ilength / 8); | 231 | ilength = order_base_2(args->v0.ilength / 8); |
221 | 232 | ||
222 | for (i = 0; i < 0x1000; i += 4) | 233 | for (i = 0; i < 0x1000; i += 4) |
223 | nv_wo32(priv->user.mem, usermem + i, 0x00000000); | 234 | nv_wo32(priv->user.mem, usermem + i, 0x00000000); |
@@ -291,13 +302,15 @@ nvc0_fifo_ofuncs = { | |||
291 | .dtor = _nouveau_fifo_channel_dtor, | 302 | .dtor = _nouveau_fifo_channel_dtor, |
292 | .init = nvc0_fifo_chan_init, | 303 | .init = nvc0_fifo_chan_init, |
293 | .fini = nvc0_fifo_chan_fini, | 304 | .fini = nvc0_fifo_chan_fini, |
305 | .map = _nouveau_fifo_channel_map, | ||
294 | .rd32 = _nouveau_fifo_channel_rd32, | 306 | .rd32 = _nouveau_fifo_channel_rd32, |
295 | .wr32 = _nouveau_fifo_channel_wr32, | 307 | .wr32 = _nouveau_fifo_channel_wr32, |
308 | .ntfy = _nouveau_fifo_channel_ntfy | ||
296 | }; | 309 | }; |
297 | 310 | ||
298 | static struct nouveau_oclass | 311 | static struct nouveau_oclass |
299 | nvc0_fifo_sclass[] = { | 312 | nvc0_fifo_sclass[] = { |
300 | { NVC0_CHANNEL_IND_CLASS, &nvc0_fifo_ofuncs }, | 313 | { FERMI_CHANNEL_GPFIFO, &nvc0_fifo_ofuncs }, |
301 | {} | 314 | {} |
302 | }; | 315 | }; |
303 | 316 | ||
@@ -654,7 +667,7 @@ nvc0_fifo_intr_fault(struct nvc0_fifo_priv *priv, int unit) | |||
654 | object = engctx; | 667 | object = engctx; |
655 | while (object) { | 668 | while (object) { |
656 | switch (nv_mclass(object)) { | 669 | switch (nv_mclass(object)) { |
657 | case NVC0_CHANNEL_IND_CLASS: | 670 | case FERMI_CHANNEL_GPFIFO: |
658 | nvc0_fifo_recover(priv, engine, (void *)object); | 671 | nvc0_fifo_recover(priv, engine, (void *)object); |
659 | break; | 672 | break; |
660 | } | 673 | } |
@@ -730,7 +743,7 @@ nvc0_fifo_intr_engine_unit(struct nvc0_fifo_priv *priv, int engn) | |||
730 | for (unkn = 0; unkn < 8; unkn++) { | 743 | for (unkn = 0; unkn < 8; unkn++) { |
731 | u32 ints = (intr >> (unkn * 0x04)) & inte; | 744 | u32 ints = (intr >> (unkn * 0x04)) & inte; |
732 | if (ints & 0x1) { | 745 | if (ints & 0x1) { |
733 | nouveau_event_trigger(priv->base.uevent, 1, 0); | 746 | nouveau_fifo_uevent(&priv->base); |
734 | ints &= ~1; | 747 | ints &= ~1; |
735 | } | 748 | } |
736 | if (ints) { | 749 | if (ints) { |
@@ -827,19 +840,26 @@ nvc0_fifo_intr(struct nouveau_subdev *subdev) | |||
827 | } | 840 | } |
828 | 841 | ||
829 | static void | 842 | static void |
830 | nvc0_fifo_uevent_enable(struct nouveau_event *event, int type, int index) | 843 | nvc0_fifo_uevent_init(struct nvkm_event *event, int type, int index) |
831 | { | 844 | { |
832 | struct nvc0_fifo_priv *priv = event->priv; | 845 | struct nouveau_fifo *fifo = container_of(event, typeof(*fifo), uevent); |
833 | nv_mask(priv, 0x002140, 0x80000000, 0x80000000); | 846 | nv_mask(fifo, 0x002140, 0x80000000, 0x80000000); |
834 | } | 847 | } |
835 | 848 | ||
836 | static void | 849 | static void |
837 | nvc0_fifo_uevent_disable(struct nouveau_event *event, int type, int index) | 850 | nvc0_fifo_uevent_fini(struct nvkm_event *event, int type, int index) |
838 | { | 851 | { |
839 | struct nvc0_fifo_priv *priv = event->priv; | 852 | struct nouveau_fifo *fifo = container_of(event, typeof(*fifo), uevent); |
840 | nv_mask(priv, 0x002140, 0x80000000, 0x00000000); | 853 | nv_mask(fifo, 0x002140, 0x80000000, 0x00000000); |
841 | } | 854 | } |
842 | 855 | ||
856 | static const struct nvkm_event_func | ||
857 | nvc0_fifo_uevent_func = { | ||
858 | .ctor = nouveau_fifo_uevent_ctor, | ||
859 | .init = nvc0_fifo_uevent_init, | ||
860 | .fini = nvc0_fifo_uevent_fini, | ||
861 | }; | ||
862 | |||
843 | static int | 863 | static int |
844 | nvc0_fifo_ctor(struct nouveau_object *parent, struct nouveau_object *engine, | 864 | nvc0_fifo_ctor(struct nouveau_object *parent, struct nouveau_object *engine, |
845 | struct nouveau_oclass *oclass, void *data, u32 size, | 865 | struct nouveau_oclass *oclass, void *data, u32 size, |
@@ -877,9 +897,9 @@ nvc0_fifo_ctor(struct nouveau_object *parent, struct nouveau_object *engine, | |||
877 | if (ret) | 897 | if (ret) |
878 | return ret; | 898 | return ret; |
879 | 899 | ||
880 | priv->base.uevent->enable = nvc0_fifo_uevent_enable; | 900 | ret = nvkm_event_init(&nvc0_fifo_uevent_func, 1, 1, &priv->base.uevent); |
881 | priv->base.uevent->disable = nvc0_fifo_uevent_disable; | 901 | if (ret) |
882 | priv->base.uevent->priv = priv; | 902 | return ret; |
883 | 903 | ||
884 | nv_subdev(priv)->unit = 0x00000100; | 904 | nv_subdev(priv)->unit = 0x00000100; |
885 | nv_subdev(priv)->intr = nvc0_fifo_intr; | 905 | nv_subdev(priv)->intr = nvc0_fifo_intr; |
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c index 298063edb92d..d2f0fd39c145 100644 --- a/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c +++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c | |||
@@ -28,7 +28,8 @@ | |||
28 | #include <core/gpuobj.h> | 28 | #include <core/gpuobj.h> |
29 | #include <core/engctx.h> | 29 | #include <core/engctx.h> |
30 | #include <core/event.h> | 30 | #include <core/event.h> |
31 | #include <core/class.h> | 31 | #include <nvif/unpack.h> |
32 | #include <nvif/class.h> | ||
32 | #include <core/enum.h> | 33 | #include <core/enum.h> |
33 | 34 | ||
34 | #include <subdev/timer.h> | 35 | #include <subdev/timer.h> |
@@ -216,46 +217,56 @@ nve0_fifo_chan_ctor(struct nouveau_object *parent, | |||
216 | struct nouveau_oclass *oclass, void *data, u32 size, | 217 | struct nouveau_oclass *oclass, void *data, u32 size, |
217 | struct nouveau_object **pobject) | 218 | struct nouveau_object **pobject) |
218 | { | 219 | { |
220 | union { | ||
221 | struct kepler_channel_gpfifo_a_v0 v0; | ||
222 | } *args = data; | ||
219 | struct nouveau_bar *bar = nouveau_bar(parent); | 223 | struct nouveau_bar *bar = nouveau_bar(parent); |
220 | struct nve0_fifo_priv *priv = (void *)engine; | 224 | struct nve0_fifo_priv *priv = (void *)engine; |
221 | struct nve0_fifo_base *base = (void *)parent; | 225 | struct nve0_fifo_base *base = (void *)parent; |
222 | struct nve0_fifo_chan *chan; | 226 | struct nve0_fifo_chan *chan; |
223 | struct nve0_channel_ind_class *args = data; | ||
224 | u64 usermem, ioffset, ilength; | 227 | u64 usermem, ioffset, ilength; |
225 | int ret, i; | 228 | int ret, i; |
226 | 229 | ||
227 | if (size < sizeof(*args)) | 230 | nv_ioctl(parent, "create channel gpfifo size %d\n", size); |
228 | return -EINVAL; | 231 | if (nvif_unpack(args->v0, 0, 0, false)) { |
232 | nv_ioctl(parent, "create channel gpfifo vers %d pushbuf %08x " | ||
233 | "ioffset %016llx ilength %08x engine %08x\n", | ||
234 | args->v0.version, args->v0.pushbuf, args->v0.ioffset, | ||
235 | args->v0.ilength, args->v0.engine); | ||
236 | } else | ||
237 | return ret; | ||
229 | 238 | ||
230 | for (i = 0; i < FIFO_ENGINE_NR; i++) { | 239 | for (i = 0; i < FIFO_ENGINE_NR; i++) { |
231 | if (args->engine & (1 << i)) { | 240 | if (args->v0.engine & (1 << i)) { |
232 | if (nouveau_engine(parent, fifo_engine[i].subdev)) { | 241 | if (nouveau_engine(parent, fifo_engine[i].subdev)) { |
233 | args->engine = (1 << i); | 242 | args->v0.engine = (1 << i); |
234 | break; | 243 | break; |
235 | } | 244 | } |
236 | } | 245 | } |
237 | } | 246 | } |
238 | 247 | ||
239 | if (i == FIFO_ENGINE_NR) { | 248 | if (i == FIFO_ENGINE_NR) { |
240 | nv_error(priv, "unsupported engines 0x%08x\n", args->engine); | 249 | nv_error(priv, "unsupported engines 0x%08x\n", args->v0.engine); |
241 | return -ENODEV; | 250 | return -ENODEV; |
242 | } | 251 | } |
243 | 252 | ||
244 | ret = nouveau_fifo_channel_create(parent, engine, oclass, 1, | 253 | ret = nouveau_fifo_channel_create(parent, engine, oclass, 1, |
245 | priv->user.bar.offset, 0x200, | 254 | priv->user.bar.offset, 0x200, |
246 | args->pushbuf, | 255 | args->v0.pushbuf, |
247 | fifo_engine[i].mask, &chan); | 256 | fifo_engine[i].mask, &chan); |
248 | *pobject = nv_object(chan); | 257 | *pobject = nv_object(chan); |
249 | if (ret) | 258 | if (ret) |
250 | return ret; | 259 | return ret; |
251 | 260 | ||
261 | args->v0.chid = chan->base.chid; | ||
262 | |||
252 | nv_parent(chan)->context_attach = nve0_fifo_context_attach; | 263 | nv_parent(chan)->context_attach = nve0_fifo_context_attach; |
253 | nv_parent(chan)->context_detach = nve0_fifo_context_detach; | 264 | nv_parent(chan)->context_detach = nve0_fifo_context_detach; |
254 | chan->engine = i; | 265 | chan->engine = i; |
255 | 266 | ||
256 | usermem = chan->base.chid * 0x200; | 267 | usermem = chan->base.chid * 0x200; |
257 | ioffset = args->ioffset; | 268 | ioffset = args->v0.ioffset; |
258 | ilength = order_base_2(args->ilength / 8); | 269 | ilength = order_base_2(args->v0.ilength / 8); |
259 | 270 | ||
260 | for (i = 0; i < 0x200; i += 4) | 271 | for (i = 0; i < 0x200; i += 4) |
261 | nv_wo32(priv->user.mem, usermem + i, 0x00000000); | 272 | nv_wo32(priv->user.mem, usermem + i, 0x00000000); |
@@ -325,13 +336,15 @@ nve0_fifo_ofuncs = { | |||
325 | .dtor = _nouveau_fifo_channel_dtor, | 336 | .dtor = _nouveau_fifo_channel_dtor, |
326 | .init = nve0_fifo_chan_init, | 337 | .init = nve0_fifo_chan_init, |
327 | .fini = nve0_fifo_chan_fini, | 338 | .fini = nve0_fifo_chan_fini, |
339 | .map = _nouveau_fifo_channel_map, | ||
328 | .rd32 = _nouveau_fifo_channel_rd32, | 340 | .rd32 = _nouveau_fifo_channel_rd32, |
329 | .wr32 = _nouveau_fifo_channel_wr32, | 341 | .wr32 = _nouveau_fifo_channel_wr32, |
342 | .ntfy = _nouveau_fifo_channel_ntfy | ||
330 | }; | 343 | }; |
331 | 344 | ||
332 | static struct nouveau_oclass | 345 | static struct nouveau_oclass |
333 | nve0_fifo_sclass[] = { | 346 | nve0_fifo_sclass[] = { |
334 | { NVE0_CHANNEL_IND_CLASS, &nve0_fifo_ofuncs }, | 347 | { KEPLER_CHANNEL_GPFIFO_A, &nve0_fifo_ofuncs }, |
335 | {} | 348 | {} |
336 | }; | 349 | }; |
337 | 350 | ||
@@ -769,7 +782,7 @@ nve0_fifo_intr_fault(struct nve0_fifo_priv *priv, int unit) | |||
769 | object = engctx; | 782 | object = engctx; |
770 | while (object) { | 783 | while (object) { |
771 | switch (nv_mclass(object)) { | 784 | switch (nv_mclass(object)) { |
772 | case NVE0_CHANNEL_IND_CLASS: | 785 | case KEPLER_CHANNEL_GPFIFO_A: |
773 | nve0_fifo_recover(priv, engine, (void *)object); | 786 | nve0_fifo_recover(priv, engine, (void *)object); |
774 | break; | 787 | break; |
775 | } | 788 | } |
@@ -859,7 +872,7 @@ nve0_fifo_intr_runlist(struct nve0_fifo_priv *priv) | |||
859 | static void | 872 | static void |
860 | nve0_fifo_intr_engine(struct nve0_fifo_priv *priv) | 873 | nve0_fifo_intr_engine(struct nve0_fifo_priv *priv) |
861 | { | 874 | { |
862 | nouveau_event_trigger(priv->base.uevent, 1, 0); | 875 | nouveau_fifo_uevent(&priv->base); |
863 | } | 876 | } |
864 | 877 | ||
865 | static void | 878 | static void |
@@ -952,19 +965,26 @@ nve0_fifo_intr(struct nouveau_subdev *subdev) | |||
952 | } | 965 | } |
953 | 966 | ||
954 | static void | 967 | static void |
955 | nve0_fifo_uevent_enable(struct nouveau_event *event, int type, int index) | 968 | nve0_fifo_uevent_init(struct nvkm_event *event, int type, int index) |
956 | { | 969 | { |
957 | struct nve0_fifo_priv *priv = event->priv; | 970 | struct nouveau_fifo *fifo = container_of(event, typeof(*fifo), uevent); |
958 | nv_mask(priv, 0x002140, 0x80000000, 0x80000000); | 971 | nv_mask(fifo, 0x002140, 0x80000000, 0x80000000); |
959 | } | 972 | } |
960 | 973 | ||
961 | static void | 974 | static void |
962 | nve0_fifo_uevent_disable(struct nouveau_event *event, int type, int index) | 975 | nve0_fifo_uevent_fini(struct nvkm_event *event, int type, int index) |
963 | { | 976 | { |
964 | struct nve0_fifo_priv *priv = event->priv; | 977 | struct nouveau_fifo *fifo = container_of(event, typeof(*fifo), uevent); |
965 | nv_mask(priv, 0x002140, 0x80000000, 0x00000000); | 978 | nv_mask(fifo, 0x002140, 0x80000000, 0x00000000); |
966 | } | 979 | } |
967 | 980 | ||
981 | static const struct nvkm_event_func | ||
982 | nve0_fifo_uevent_func = { | ||
983 | .ctor = nouveau_fifo_uevent_ctor, | ||
984 | .init = nve0_fifo_uevent_init, | ||
985 | .fini = nve0_fifo_uevent_fini, | ||
986 | }; | ||
987 | |||
968 | int | 988 | int |
969 | nve0_fifo_fini(struct nouveau_object *object, bool suspend) | 989 | nve0_fifo_fini(struct nouveau_object *object, bool suspend) |
970 | { | 990 | { |
@@ -1067,9 +1087,9 @@ nve0_fifo_ctor(struct nouveau_object *parent, struct nouveau_object *engine, | |||
1067 | if (ret) | 1087 | if (ret) |
1068 | return ret; | 1088 | return ret; |
1069 | 1089 | ||
1070 | priv->base.uevent->enable = nve0_fifo_uevent_enable; | 1090 | ret = nvkm_event_init(&nve0_fifo_uevent_func, 1, 1, &priv->base.uevent); |
1071 | priv->base.uevent->disable = nve0_fifo_uevent_disable; | 1091 | if (ret) |
1072 | priv->base.uevent->priv = priv; | 1092 | return ret; |
1073 | 1093 | ||
1074 | nv_subdev(priv)->unit = 0x00000100; | 1094 | nv_subdev(priv)->unit = 0x00000100; |
1075 | nv_subdev(priv)->intr = nve0_fifo_intr; | 1095 | nv_subdev(priv)->intr = nve0_fifo_intr; |
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/ctxgk110b.c b/drivers/gpu/drm/nouveau/core/engine/graph/ctxgk110b.c new file mode 100644 index 000000000000..3adb7fe91772 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/engine/graph/ctxgk110b.c | |||
@@ -0,0 +1,104 @@ | |||
1 | /* | ||
2 | * Copyright 2013 Red Hat Inc. | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice shall be included in | ||
12 | * all copies or substantial portions of the Software. | ||
13 | * | ||
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
21 | * | ||
22 | * Authors: Ben Skeggs <bskeggs@redhat.com> | ||
23 | */ | ||
24 | |||
25 | #include "ctxnvc0.h" | ||
26 | |||
27 | /******************************************************************************* | ||
28 | * PGRAPH context register lists | ||
29 | ******************************************************************************/ | ||
30 | |||
31 | static const struct nvc0_graph_init | ||
32 | gk110b_grctx_init_sm_0[] = { | ||
33 | { 0x419e04, 1, 0x04, 0x00000000 }, | ||
34 | { 0x419e08, 1, 0x04, 0x0000001d }, | ||
35 | { 0x419e0c, 1, 0x04, 0x00000000 }, | ||
36 | { 0x419e10, 1, 0x04, 0x00001c02 }, | ||
37 | { 0x419e44, 1, 0x04, 0x0013eff2 }, | ||
38 | { 0x419e48, 1, 0x04, 0x00000000 }, | ||
39 | { 0x419e4c, 1, 0x04, 0x0000007f }, | ||
40 | { 0x419e50, 2, 0x04, 0x00000000 }, | ||
41 | { 0x419e58, 1, 0x04, 0x00000001 }, | ||
42 | { 0x419e5c, 3, 0x04, 0x00000000 }, | ||
43 | { 0x419e68, 1, 0x04, 0x00000002 }, | ||
44 | { 0x419e6c, 12, 0x04, 0x00000000 }, | ||
45 | { 0x419eac, 1, 0x04, 0x00001f8f }, | ||
46 | { 0x419eb0, 1, 0x04, 0x0db00d2f }, | ||
47 | { 0x419eb8, 1, 0x04, 0x00000000 }, | ||
48 | { 0x419ec8, 1, 0x04, 0x0001304f }, | ||
49 | { 0x419f30, 4, 0x04, 0x00000000 }, | ||
50 | { 0x419f40, 1, 0x04, 0x00000018 }, | ||
51 | { 0x419f44, 3, 0x04, 0x00000000 }, | ||
52 | { 0x419f58, 1, 0x04, 0x00000000 }, | ||
53 | { 0x419f70, 1, 0x04, 0x00006300 }, | ||
54 | { 0x419f78, 1, 0x04, 0x000000eb }, | ||
55 | { 0x419f7c, 1, 0x04, 0x00000404 }, | ||
56 | {} | ||
57 | }; | ||
58 | |||
59 | static const struct nvc0_graph_pack | ||
60 | gk110b_grctx_pack_tpc[] = { | ||
61 | { nvd7_grctx_init_pe_0 }, | ||
62 | { nvf0_grctx_init_tex_0 }, | ||
63 | { nvf0_grctx_init_mpc_0 }, | ||
64 | { nvf0_grctx_init_l1c_0 }, | ||
65 | { gk110b_grctx_init_sm_0 }, | ||
66 | {} | ||
67 | }; | ||
68 | |||
69 | /******************************************************************************* | ||
70 | * PGRAPH context implementation | ||
71 | ******************************************************************************/ | ||
72 | |||
73 | struct nouveau_oclass * | ||
74 | gk110b_grctx_oclass = &(struct nvc0_grctx_oclass) { | ||
75 | .base.handle = NV_ENGCTX(GR, 0xf1), | ||
76 | .base.ofuncs = &(struct nouveau_ofuncs) { | ||
77 | .ctor = nvc0_graph_context_ctor, | ||
78 | .dtor = nvc0_graph_context_dtor, | ||
79 | .init = _nouveau_graph_context_init, | ||
80 | .fini = _nouveau_graph_context_fini, | ||
81 | .rd32 = _nouveau_graph_context_rd32, | ||
82 | .wr32 = _nouveau_graph_context_wr32, | ||
83 | }, | ||
84 | .main = nve4_grctx_generate_main, | ||
85 | .unkn = nve4_grctx_generate_unkn, | ||
86 | .hub = nvf0_grctx_pack_hub, | ||
87 | .gpc = nvf0_grctx_pack_gpc, | ||
88 | .zcull = nvc0_grctx_pack_zcull, | ||
89 | .tpc = gk110b_grctx_pack_tpc, | ||
90 | .ppc = nvf0_grctx_pack_ppc, | ||
91 | .icmd = nvf0_grctx_pack_icmd, | ||
92 | .mthd = nvf0_grctx_pack_mthd, | ||
93 | .bundle = nve4_grctx_generate_bundle, | ||
94 | .bundle_size = 0x3000, | ||
95 | .bundle_min_gpm_fifo_depth = 0x180, | ||
96 | .bundle_token_limit = 0x600, | ||
97 | .pagepool = nve4_grctx_generate_pagepool, | ||
98 | .pagepool_size = 0x8000, | ||
99 | .attrib = nvd7_grctx_generate_attrib, | ||
100 | .attrib_nr_max = 0x324, | ||
101 | .attrib_nr = 0x218, | ||
102 | .alpha_nr_max = 0x7ff, | ||
103 | .alpha_nr = 0x648, | ||
104 | }.base; | ||
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/ctxgk20a.c b/drivers/gpu/drm/nouveau/core/engine/graph/ctxgk20a.c index 224ee0287ab7..36fc9831cc93 100644 --- a/drivers/gpu/drm/nouveau/core/engine/graph/ctxgk20a.c +++ b/drivers/gpu/drm/nouveau/core/engine/graph/ctxgk20a.c | |||
@@ -41,7 +41,6 @@ gk20a_grctx_oclass = &(struct nvc0_grctx_oclass) { | |||
41 | .wr32 = _nouveau_graph_context_wr32, | 41 | .wr32 = _nouveau_graph_context_wr32, |
42 | }, | 42 | }, |
43 | .main = nve4_grctx_generate_main, | 43 | .main = nve4_grctx_generate_main, |
44 | .mods = nve4_grctx_generate_mods, | ||
45 | .unkn = nve4_grctx_generate_unkn, | 44 | .unkn = nve4_grctx_generate_unkn, |
46 | .hub = nve4_grctx_pack_hub, | 45 | .hub = nve4_grctx_pack_hub, |
47 | .gpc = nve4_grctx_pack_gpc, | 46 | .gpc = nve4_grctx_pack_gpc, |
@@ -50,4 +49,15 @@ gk20a_grctx_oclass = &(struct nvc0_grctx_oclass) { | |||
50 | .ppc = nve4_grctx_pack_ppc, | 49 | .ppc = nve4_grctx_pack_ppc, |
51 | .icmd = nve4_grctx_pack_icmd, | 50 | .icmd = nve4_grctx_pack_icmd, |
52 | .mthd = gk20a_grctx_pack_mthd, | 51 | .mthd = gk20a_grctx_pack_mthd, |
52 | .bundle = nve4_grctx_generate_bundle, | ||
53 | .bundle_size = 0x1800, | ||
54 | .bundle_min_gpm_fifo_depth = 0x62, | ||
55 | .bundle_token_limit = 0x100, | ||
56 | .pagepool = nve4_grctx_generate_pagepool, | ||
57 | .pagepool_size = 0x8000, | ||
58 | .attrib = nvd7_grctx_generate_attrib, | ||
59 | .attrib_nr_max = 0x240, | ||
60 | .attrib_nr = 0x240, | ||
61 | .alpha_nr_max = 0x648 + (0x648 / 2), | ||
62 | .alpha_nr = 0x648, | ||
53 | }.base; | 63 | }.base; |
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/ctxgm107.c b/drivers/gpu/drm/nouveau/core/engine/graph/ctxgm107.c index b0d0fb2f4d08..62e918b9fa81 100644 --- a/drivers/gpu/drm/nouveau/core/engine/graph/ctxgm107.c +++ b/drivers/gpu/drm/nouveau/core/engine/graph/ctxgm107.c | |||
@@ -859,45 +859,74 @@ gm107_grctx_pack_ppc[] = { | |||
859 | ******************************************************************************/ | 859 | ******************************************************************************/ |
860 | 860 | ||
861 | static void | 861 | static void |
862 | gm107_grctx_generate_mods(struct nvc0_graph_priv *priv, struct nvc0_grctx *info) | 862 | gm107_grctx_generate_bundle(struct nvc0_grctx *info) |
863 | { | 863 | { |
864 | mmio_data(0x003000, 0x0100, NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS); | 864 | const struct nvc0_grctx_oclass *impl = nvc0_grctx_impl(info->priv); |
865 | mmio_data(0x008000, 0x0100, NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS); | 865 | const u32 state_limit = min(impl->bundle_min_gpm_fifo_depth, |
866 | mmio_data(0x200000, 0x1000, NV_MEM_ACCESS_RW); | 866 | impl->bundle_size / 0x20); |
867 | 867 | const u32 token_limit = impl->bundle_token_limit; | |
868 | mmio_list(0x40800c, 0x00000000, 8, 1); | 868 | const u32 access = NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS; |
869 | mmio_list(0x408010, 0x80000000, 0, 0); | 869 | const int s = 8; |
870 | mmio_list(0x419004, 0x00000000, 8, 1); | 870 | const int b = mmio_vram(info, impl->bundle_size, (1 << s), access); |
871 | mmio_list(0x419008, 0x00000000, 0, 0); | 871 | mmio_refn(info, 0x408004, 0x00000000, s, b); |
872 | mmio_list(0x4064cc, 0x80000000, 0, 0); | 872 | mmio_refn(info, 0x408008, 0x80000000 | (impl->bundle_size >> s), 0, b); |
873 | mmio_list(0x418e30, 0x80000000, 0, 0); | 873 | mmio_refn(info, 0x418e24, 0x00000000, s, b); |
874 | 874 | mmio_refn(info, 0x418e28, 0x80000000 | (impl->bundle_size >> s), 0, b); | |
875 | mmio_list(0x408004, 0x00000000, 8, 0); | 875 | mmio_wr32(info, 0x4064c8, (state_limit << 16) | token_limit); |
876 | mmio_list(0x408008, 0x80000030, 0, 0); | 876 | } |
877 | mmio_list(0x418e24, 0x00000000, 8, 0); | 877 | |
878 | mmio_list(0x418e28, 0x80000030, 0, 0); | 878 | static void |
879 | 879 | gm107_grctx_generate_pagepool(struct nvc0_grctx *info) | |
880 | mmio_list(0x4064c8, 0x018002c0, 0, 0); | 880 | { |
881 | 881 | const struct nvc0_grctx_oclass *impl = nvc0_grctx_impl(info->priv); | |
882 | mmio_list(0x418810, 0x80000000, 12, 2); | 882 | const u32 access = NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS; |
883 | mmio_list(0x419848, 0x10000000, 12, 2); | 883 | const int s = 8; |
884 | mmio_list(0x419c2c, 0x10000000, 12, 2); | 884 | const int b = mmio_vram(info, impl->pagepool_size, (1 << s), access); |
885 | 885 | mmio_refn(info, 0x40800c, 0x00000000, s, b); | |
886 | mmio_list(0x405830, 0x0aa01000, 0, 0); | 886 | mmio_wr32(info, 0x408010, 0x80000000); |
887 | mmio_list(0x4064c4, 0x0400ffff, 0, 0); | 887 | mmio_refn(info, 0x419004, 0x00000000, s, b); |
888 | 888 | mmio_wr32(info, 0x419008, 0x00000000); | |
889 | /*XXX*/ | 889 | mmio_wr32(info, 0x4064cc, 0x80000000); |
890 | mmio_list(0x5030c0, 0x00001540, 0, 0); | 890 | mmio_wr32(info, 0x418e30, 0x80000000); /* guess at it being related */ |
891 | mmio_list(0x5030f4, 0x00000000, 0, 0); | 891 | } |
892 | mmio_list(0x5030e4, 0x00002000, 0, 0); | 892 | |
893 | mmio_list(0x5030f8, 0x00003fc0, 0, 0); | 893 | static void |
894 | mmio_list(0x418ea0, 0x07151540, 0, 0); | 894 | gm107_grctx_generate_attrib(struct nvc0_grctx *info) |
895 | 895 | { | |
896 | mmio_list(0x5032c0, 0x00001540, 0, 0); | 896 | struct nvc0_graph_priv *priv = info->priv; |
897 | mmio_list(0x5032f4, 0x00001fe0, 0, 0); | 897 | const struct nvc0_grctx_oclass *impl = (void *)nvc0_grctx_impl(priv); |
898 | mmio_list(0x5032e4, 0x00002000, 0, 0); | 898 | const u32 alpha = impl->alpha_nr; |
899 | mmio_list(0x5032f8, 0x00006fc0, 0, 0); | 899 | const u32 attrib = impl->attrib_nr; |
900 | mmio_list(0x418ea4, 0x07151540, 0, 0); | 900 | const u32 size = 0x20 * (impl->attrib_nr_max + impl->alpha_nr_max); |
901 | const u32 access = NV_MEM_ACCESS_RW; | ||
902 | const int s = 12; | ||
903 | const int b = mmio_vram(info, size * priv->tpc_total, (1 << s), access); | ||
904 | const int max_batches = 0xffff; | ||
905 | u32 bo = 0; | ||
906 | u32 ao = bo + impl->attrib_nr_max * priv->tpc_total; | ||
907 | int gpc, ppc, n = 0; | ||
908 | |||
909 | mmio_refn(info, 0x418810, 0x80000000, s, b); | ||
910 | mmio_refn(info, 0x419848, 0x10000000, s, b); | ||
911 | mmio_refn(info, 0x419c2c, 0x10000000, s, b); | ||
912 | mmio_wr32(info, 0x405830, (attrib << 16) | alpha); | ||
913 | mmio_wr32(info, 0x4064c4, ((alpha / 4) << 16) | max_batches); | ||
914 | |||
915 | for (gpc = 0; gpc < priv->gpc_nr; gpc++) { | ||
916 | for (ppc = 0; ppc < priv->ppc_nr[gpc]; ppc++, n++) { | ||
917 | const u32 as = alpha * priv->ppc_tpc_nr[gpc][ppc]; | ||
918 | const u32 bs = attrib * priv->ppc_tpc_nr[gpc][ppc]; | ||
919 | const u32 u = 0x418ea0 + (n * 0x04); | ||
920 | const u32 o = PPC_UNIT(gpc, ppc, 0); | ||
921 | mmio_wr32(info, o + 0xc0, bs); | ||
922 | mmio_wr32(info, o + 0xf4, bo); | ||
923 | bo += impl->attrib_nr_max * priv->ppc_tpc_nr[gpc][ppc]; | ||
924 | mmio_wr32(info, o + 0xe4, as); | ||
925 | mmio_wr32(info, o + 0xf8, ao); | ||
926 | ao += impl->alpha_nr_max * priv->ppc_tpc_nr[gpc][ppc]; | ||
927 | mmio_wr32(info, u, (0x715 /*XXX*/ << 16) | bs); | ||
928 | } | ||
929 | } | ||
901 | } | 930 | } |
902 | 931 | ||
903 | static void | 932 | static void |
@@ -934,7 +963,9 @@ gm107_grctx_generate_main(struct nvc0_graph_priv *priv, struct nvc0_grctx *info) | |||
934 | 963 | ||
935 | nv_wr32(priv, 0x404154, 0x00000000); | 964 | nv_wr32(priv, 0x404154, 0x00000000); |
936 | 965 | ||
937 | oclass->mods(priv, info); | 966 | oclass->bundle(info); |
967 | oclass->pagepool(info); | ||
968 | oclass->attrib(info); | ||
938 | oclass->unkn(priv); | 969 | oclass->unkn(priv); |
939 | 970 | ||
940 | gm107_grctx_generate_tpcid(priv); | 971 | gm107_grctx_generate_tpcid(priv); |
@@ -979,7 +1010,6 @@ gm107_grctx_oclass = &(struct nvc0_grctx_oclass) { | |||
979 | .wr32 = _nouveau_graph_context_wr32, | 1010 | .wr32 = _nouveau_graph_context_wr32, |
980 | }, | 1011 | }, |
981 | .main = gm107_grctx_generate_main, | 1012 | .main = gm107_grctx_generate_main, |
982 | .mods = gm107_grctx_generate_mods, | ||
983 | .unkn = nve4_grctx_generate_unkn, | 1013 | .unkn = nve4_grctx_generate_unkn, |
984 | .hub = gm107_grctx_pack_hub, | 1014 | .hub = gm107_grctx_pack_hub, |
985 | .gpc = gm107_grctx_pack_gpc, | 1015 | .gpc = gm107_grctx_pack_gpc, |
@@ -988,4 +1018,15 @@ gm107_grctx_oclass = &(struct nvc0_grctx_oclass) { | |||
988 | .ppc = gm107_grctx_pack_ppc, | 1018 | .ppc = gm107_grctx_pack_ppc, |
989 | .icmd = gm107_grctx_pack_icmd, | 1019 | .icmd = gm107_grctx_pack_icmd, |
990 | .mthd = gm107_grctx_pack_mthd, | 1020 | .mthd = gm107_grctx_pack_mthd, |
1021 | .bundle = gm107_grctx_generate_bundle, | ||
1022 | .bundle_size = 0x3000, | ||
1023 | .bundle_min_gpm_fifo_depth = 0x180, | ||
1024 | .bundle_token_limit = 0x2c0, | ||
1025 | .pagepool = gm107_grctx_generate_pagepool, | ||
1026 | .pagepool_size = 0x8000, | ||
1027 | .attrib = gm107_grctx_generate_attrib, | ||
1028 | .attrib_nr_max = 0xff0, | ||
1029 | .attrib_nr = 0xaa0, | ||
1030 | .alpha_nr_max = 0x1800, | ||
1031 | .alpha_nr = 0x1000, | ||
991 | }.base; | 1032 | }.base; |
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnv108.c b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnv108.c index 8de4a4291548..ce252adbef81 100644 --- a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnv108.c +++ b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnv108.c | |||
@@ -531,50 +531,6 @@ nv108_grctx_pack_ppc[] = { | |||
531 | * PGRAPH context implementation | 531 | * PGRAPH context implementation |
532 | ******************************************************************************/ | 532 | ******************************************************************************/ |
533 | 533 | ||
534 | static void | ||
535 | nv108_grctx_generate_mods(struct nvc0_graph_priv *priv, struct nvc0_grctx *info) | ||
536 | { | ||
537 | u32 magic[GPC_MAX][2]; | ||
538 | u32 offset; | ||
539 | int gpc; | ||
540 | |||
541 | mmio_data(0x003000, 0x0100, NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS); | ||
542 | mmio_data(0x008000, 0x0100, NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS); | ||
543 | mmio_data(0x060000, 0x1000, NV_MEM_ACCESS_RW); | ||
544 | mmio_list(0x40800c, 0x00000000, 8, 1); | ||
545 | mmio_list(0x408010, 0x80000000, 0, 0); | ||
546 | mmio_list(0x419004, 0x00000000, 8, 1); | ||
547 | mmio_list(0x419008, 0x00000000, 0, 0); | ||
548 | mmio_list(0x4064cc, 0x80000000, 0, 0); | ||
549 | mmio_list(0x408004, 0x00000000, 8, 0); | ||
550 | mmio_list(0x408008, 0x80000030, 0, 0); | ||
551 | mmio_list(0x418808, 0x00000000, 8, 0); | ||
552 | mmio_list(0x41880c, 0x80000030, 0, 0); | ||
553 | mmio_list(0x4064c8, 0x00c20200, 0, 0); | ||
554 | mmio_list(0x418810, 0x80000000, 12, 2); | ||
555 | mmio_list(0x419848, 0x10000000, 12, 2); | ||
556 | |||
557 | mmio_list(0x405830, 0x02180648, 0, 0); | ||
558 | mmio_list(0x4064c4, 0x0192ffff, 0, 0); | ||
559 | |||
560 | for (gpc = 0, offset = 0; gpc < priv->gpc_nr; gpc++) { | ||
561 | u16 magic0 = 0x0218 * priv->tpc_nr[gpc]; | ||
562 | u16 magic1 = 0x0648 * priv->tpc_nr[gpc]; | ||
563 | magic[gpc][0] = 0x10000000 | (magic0 << 16) | offset; | ||
564 | magic[gpc][1] = 0x00000000 | (magic1 << 16); | ||
565 | offset += 0x0324 * priv->tpc_nr[gpc]; | ||
566 | } | ||
567 | |||
568 | for (gpc = 0; gpc < priv->gpc_nr; gpc++) { | ||
569 | mmio_list(GPC_UNIT(gpc, 0x30c0), magic[gpc][0], 0, 0); | ||
570 | mmio_list(GPC_UNIT(gpc, 0x30e4), magic[gpc][1] | offset, 0, 0); | ||
571 | offset += 0x07ff * priv->tpc_nr[gpc]; | ||
572 | } | ||
573 | |||
574 | mmio_list(0x17e91c, 0x0b040a0b, 0, 0); | ||
575 | mmio_list(0x17e920, 0x00090d08, 0, 0); | ||
576 | } | ||
577 | |||
578 | struct nouveau_oclass * | 534 | struct nouveau_oclass * |
579 | nv108_grctx_oclass = &(struct nvc0_grctx_oclass) { | 535 | nv108_grctx_oclass = &(struct nvc0_grctx_oclass) { |
580 | .base.handle = NV_ENGCTX(GR, 0x08), | 536 | .base.handle = NV_ENGCTX(GR, 0x08), |
@@ -587,7 +543,6 @@ nv108_grctx_oclass = &(struct nvc0_grctx_oclass) { | |||
587 | .wr32 = _nouveau_graph_context_wr32, | 543 | .wr32 = _nouveau_graph_context_wr32, |
588 | }, | 544 | }, |
589 | .main = nve4_grctx_generate_main, | 545 | .main = nve4_grctx_generate_main, |
590 | .mods = nv108_grctx_generate_mods, | ||
591 | .unkn = nve4_grctx_generate_unkn, | 546 | .unkn = nve4_grctx_generate_unkn, |
592 | .hub = nv108_grctx_pack_hub, | 547 | .hub = nv108_grctx_pack_hub, |
593 | .gpc = nv108_grctx_pack_gpc, | 548 | .gpc = nv108_grctx_pack_gpc, |
@@ -596,4 +551,15 @@ nv108_grctx_oclass = &(struct nvc0_grctx_oclass) { | |||
596 | .ppc = nv108_grctx_pack_ppc, | 551 | .ppc = nv108_grctx_pack_ppc, |
597 | .icmd = nv108_grctx_pack_icmd, | 552 | .icmd = nv108_grctx_pack_icmd, |
598 | .mthd = nvf0_grctx_pack_mthd, | 553 | .mthd = nvf0_grctx_pack_mthd, |
554 | .bundle = nve4_grctx_generate_bundle, | ||
555 | .bundle_size = 0x3000, | ||
556 | .bundle_min_gpm_fifo_depth = 0xc2, | ||
557 | .bundle_token_limit = 0x200, | ||
558 | .pagepool = nve4_grctx_generate_pagepool, | ||
559 | .pagepool_size = 0x8000, | ||
560 | .attrib = nvd7_grctx_generate_attrib, | ||
561 | .attrib_nr_max = 0x324, | ||
562 | .attrib_nr = 0x218, | ||
563 | .alpha_nr_max = 0x7ff, | ||
564 | .alpha_nr = 0x648, | ||
599 | }.base; | 565 | }.base; |
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc0.c b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc0.c index 833a96508c4e..b8e5fe60a1eb 100644 --- a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc0.c +++ b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc0.c | |||
@@ -982,34 +982,93 @@ nvc0_grctx_pack_tpc[] = { | |||
982 | * PGRAPH context implementation | 982 | * PGRAPH context implementation |
983 | ******************************************************************************/ | 983 | ******************************************************************************/ |
984 | 984 | ||
985 | int | ||
986 | nvc0_grctx_mmio_data(struct nvc0_grctx *info, u32 size, u32 align, u32 access) | ||
987 | { | ||
988 | if (info->data) { | ||
989 | info->buffer[info->buffer_nr] = round_up(info->addr, align); | ||
990 | info->addr = info->buffer[info->buffer_nr] + size; | ||
991 | info->data->size = size; | ||
992 | info->data->align = align; | ||
993 | info->data->access = access; | ||
994 | info->data++; | ||
995 | return info->buffer_nr++; | ||
996 | } | ||
997 | return -1; | ||
998 | } | ||
999 | |||
1000 | void | ||
1001 | nvc0_grctx_mmio_item(struct nvc0_grctx *info, u32 addr, u32 data, | ||
1002 | int shift, int buffer) | ||
1003 | { | ||
1004 | if (info->data) { | ||
1005 | if (shift >= 0) { | ||
1006 | info->mmio->addr = addr; | ||
1007 | info->mmio->data = data; | ||
1008 | info->mmio->shift = shift; | ||
1009 | info->mmio->buffer = buffer; | ||
1010 | if (buffer >= 0) | ||
1011 | data |= info->buffer[buffer] >> shift; | ||
1012 | info->mmio++; | ||
1013 | } else | ||
1014 | return; | ||
1015 | } else { | ||
1016 | if (buffer >= 0) | ||
1017 | return; | ||
1018 | } | ||
1019 | |||
1020 | nv_wr32(info->priv, addr, data); | ||
1021 | } | ||
1022 | |||
1023 | void | ||
1024 | nvc0_grctx_generate_bundle(struct nvc0_grctx *info) | ||
1025 | { | ||
1026 | const struct nvc0_grctx_oclass *impl = nvc0_grctx_impl(info->priv); | ||
1027 | const u32 access = NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS; | ||
1028 | const int s = 8; | ||
1029 | const int b = mmio_vram(info, impl->bundle_size, (1 << s), access); | ||
1030 | mmio_refn(info, 0x408004, 0x00000000, s, b); | ||
1031 | mmio_refn(info, 0x408008, 0x80000000 | (impl->bundle_size >> s), 0, b); | ||
1032 | mmio_refn(info, 0x418808, 0x00000000, s, b); | ||
1033 | mmio_refn(info, 0x41880c, 0x80000000 | (impl->bundle_size >> s), 0, b); | ||
1034 | } | ||
1035 | |||
985 | void | 1036 | void |
986 | nvc0_grctx_generate_mods(struct nvc0_graph_priv *priv, struct nvc0_grctx *info) | 1037 | nvc0_grctx_generate_pagepool(struct nvc0_grctx *info) |
987 | { | 1038 | { |
1039 | const struct nvc0_grctx_oclass *impl = nvc0_grctx_impl(info->priv); | ||
1040 | const u32 access = NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS; | ||
1041 | const int s = 8; | ||
1042 | const int b = mmio_vram(info, impl->pagepool_size, (1 << s), access); | ||
1043 | mmio_refn(info, 0x40800c, 0x00000000, s, b); | ||
1044 | mmio_wr32(info, 0x408010, 0x80000000); | ||
1045 | mmio_refn(info, 0x419004, 0x00000000, s, b); | ||
1046 | mmio_wr32(info, 0x419008, 0x00000000); | ||
1047 | } | ||
1048 | |||
1049 | void | ||
1050 | nvc0_grctx_generate_attrib(struct nvc0_grctx *info) | ||
1051 | { | ||
1052 | struct nvc0_graph_priv *priv = info->priv; | ||
1053 | const struct nvc0_grctx_oclass *impl = nvc0_grctx_impl(priv); | ||
1054 | const u32 attrib = impl->attrib_nr; | ||
1055 | const u32 size = 0x20 * (impl->attrib_nr_max + impl->alpha_nr_max); | ||
1056 | const u32 access = NV_MEM_ACCESS_RW; | ||
1057 | const int s = 12; | ||
1058 | const int b = mmio_vram(info, size * priv->tpc_total, (1 << s), access); | ||
988 | int gpc, tpc; | 1059 | int gpc, tpc; |
989 | u32 offset; | 1060 | u32 bo = 0; |
990 | 1061 | ||
991 | mmio_data(0x002000, 0x0100, NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS); | 1062 | mmio_refn(info, 0x418810, 0x80000000, s, b); |
992 | mmio_data(0x008000, 0x0100, NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS); | 1063 | mmio_refn(info, 0x419848, 0x10000000, s, b); |
993 | mmio_data(0x060000, 0x1000, NV_MEM_ACCESS_RW); | 1064 | mmio_wr32(info, 0x405830, (attrib << 16)); |
994 | 1065 | ||
995 | mmio_list(0x408004, 0x00000000, 8, 0); | 1066 | for (gpc = 0; gpc < priv->gpc_nr; gpc++) { |
996 | mmio_list(0x408008, 0x80000018, 0, 0); | ||
997 | mmio_list(0x40800c, 0x00000000, 8, 1); | ||
998 | mmio_list(0x408010, 0x80000000, 0, 0); | ||
999 | mmio_list(0x418810, 0x80000000, 12, 2); | ||
1000 | mmio_list(0x419848, 0x10000000, 12, 2); | ||
1001 | mmio_list(0x419004, 0x00000000, 8, 1); | ||
1002 | mmio_list(0x419008, 0x00000000, 0, 0); | ||
1003 | mmio_list(0x418808, 0x00000000, 8, 0); | ||
1004 | mmio_list(0x41880c, 0x80000018, 0, 0); | ||
1005 | |||
1006 | mmio_list(0x405830, 0x02180000, 0, 0); | ||
1007 | |||
1008 | for (gpc = 0, offset = 0; gpc < priv->gpc_nr; gpc++) { | ||
1009 | for (tpc = 0; tpc < priv->tpc_nr[gpc]; tpc++) { | 1067 | for (tpc = 0; tpc < priv->tpc_nr[gpc]; tpc++) { |
1010 | u32 addr = TPC_UNIT(gpc, tpc, 0x0520); | 1068 | const u32 o = TPC_UNIT(gpc, tpc, 0x0520); |
1011 | mmio_list(addr, 0x02180000 | offset, 0, 0); | 1069 | mmio_skip(info, o, (attrib << 16) | ++bo); |
1012 | offset += 0x0324; | 1070 | mmio_wr32(info, o, (attrib << 16) | --bo); |
1071 | bo += impl->attrib_nr_max; | ||
1013 | } | 1072 | } |
1014 | } | 1073 | } |
1015 | } | 1074 | } |
@@ -1170,7 +1229,7 @@ nvc0_grctx_generate_main(struct nvc0_graph_priv *priv, struct nvc0_grctx *info) | |||
1170 | { | 1229 | { |
1171 | struct nvc0_grctx_oclass *oclass = (void *)nv_engine(priv)->cclass; | 1230 | struct nvc0_grctx_oclass *oclass = (void *)nv_engine(priv)->cclass; |
1172 | 1231 | ||
1173 | nv_mask(priv, 0x000260, 0x00000001, 0x00000000); | 1232 | nouveau_mc(priv)->unk260(nouveau_mc(priv), 0); |
1174 | 1233 | ||
1175 | nvc0_graph_mmio(priv, oclass->hub); | 1234 | nvc0_graph_mmio(priv, oclass->hub); |
1176 | nvc0_graph_mmio(priv, oclass->gpc); | 1235 | nvc0_graph_mmio(priv, oclass->gpc); |
@@ -1180,7 +1239,9 @@ nvc0_grctx_generate_main(struct nvc0_graph_priv *priv, struct nvc0_grctx *info) | |||
1180 | 1239 | ||
1181 | nv_wr32(priv, 0x404154, 0x00000000); | 1240 | nv_wr32(priv, 0x404154, 0x00000000); |
1182 | 1241 | ||
1183 | oclass->mods(priv, info); | 1242 | oclass->bundle(info); |
1243 | oclass->pagepool(info); | ||
1244 | oclass->attrib(info); | ||
1184 | oclass->unkn(priv); | 1245 | oclass->unkn(priv); |
1185 | 1246 | ||
1186 | nvc0_grctx_generate_tpcid(priv); | 1247 | nvc0_grctx_generate_tpcid(priv); |
@@ -1192,7 +1253,7 @@ nvc0_grctx_generate_main(struct nvc0_graph_priv *priv, struct nvc0_grctx *info) | |||
1192 | nvc0_graph_icmd(priv, oclass->icmd); | 1253 | nvc0_graph_icmd(priv, oclass->icmd); |
1193 | nv_wr32(priv, 0x404154, 0x00000400); | 1254 | nv_wr32(priv, 0x404154, 0x00000400); |
1194 | nvc0_graph_mthd(priv, oclass->mthd); | 1255 | nvc0_graph_mthd(priv, oclass->mthd); |
1195 | nv_mask(priv, 0x000260, 0x00000001, 0x00000001); | 1256 | nouveau_mc(priv)->unk260(nouveau_mc(priv), 1); |
1196 | } | 1257 | } |
1197 | 1258 | ||
1198 | int | 1259 | int |
@@ -1308,7 +1369,6 @@ nvc0_grctx_oclass = &(struct nvc0_grctx_oclass) { | |||
1308 | .wr32 = _nouveau_graph_context_wr32, | 1369 | .wr32 = _nouveau_graph_context_wr32, |
1309 | }, | 1370 | }, |
1310 | .main = nvc0_grctx_generate_main, | 1371 | .main = nvc0_grctx_generate_main, |
1311 | .mods = nvc0_grctx_generate_mods, | ||
1312 | .unkn = nvc0_grctx_generate_unkn, | 1372 | .unkn = nvc0_grctx_generate_unkn, |
1313 | .hub = nvc0_grctx_pack_hub, | 1373 | .hub = nvc0_grctx_pack_hub, |
1314 | .gpc = nvc0_grctx_pack_gpc, | 1374 | .gpc = nvc0_grctx_pack_gpc, |
@@ -1316,4 +1376,11 @@ nvc0_grctx_oclass = &(struct nvc0_grctx_oclass) { | |||
1316 | .tpc = nvc0_grctx_pack_tpc, | 1376 | .tpc = nvc0_grctx_pack_tpc, |
1317 | .icmd = nvc0_grctx_pack_icmd, | 1377 | .icmd = nvc0_grctx_pack_icmd, |
1318 | .mthd = nvc0_grctx_pack_mthd, | 1378 | .mthd = nvc0_grctx_pack_mthd, |
1379 | .bundle = nvc0_grctx_generate_bundle, | ||
1380 | .bundle_size = 0x1800, | ||
1381 | .pagepool = nvc0_grctx_generate_pagepool, | ||
1382 | .pagepool_size = 0x8000, | ||
1383 | .attrib = nvc0_grctx_generate_attrib, | ||
1384 | .attrib_nr_max = 0x324, | ||
1385 | .attrib_nr = 0x218, | ||
1319 | }.base; | 1386 | }.base; |
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc0.h b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc0.h index 8da8b627b9d0..c776cd715e33 100644 --- a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc0.h +++ b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc0.h | |||
@@ -12,12 +12,19 @@ struct nvc0_grctx { | |||
12 | u64 addr; | 12 | u64 addr; |
13 | }; | 13 | }; |
14 | 14 | ||
15 | int nvc0_grctx_mmio_data(struct nvc0_grctx *, u32 size, u32 align, u32 access); | ||
16 | void nvc0_grctx_mmio_item(struct nvc0_grctx *, u32 addr, u32 data, int s, int); | ||
17 | |||
18 | #define mmio_vram(a,b,c,d) nvc0_grctx_mmio_data((a), (b), (c), (d)) | ||
19 | #define mmio_refn(a,b,c,d,e) nvc0_grctx_mmio_item((a), (b), (c), (d), (e)) | ||
20 | #define mmio_skip(a,b,c) mmio_refn((a), (b), (c), -1, -1) | ||
21 | #define mmio_wr32(a,b,c) mmio_refn((a), (b), (c), 0, -1) | ||
22 | |||
15 | struct nvc0_grctx_oclass { | 23 | struct nvc0_grctx_oclass { |
16 | struct nouveau_oclass base; | 24 | struct nouveau_oclass base; |
17 | /* main context generation function */ | 25 | /* main context generation function */ |
18 | void (*main)(struct nvc0_graph_priv *, struct nvc0_grctx *); | 26 | void (*main)(struct nvc0_graph_priv *, struct nvc0_grctx *); |
19 | /* context-specific modify-on-first-load list generation function */ | 27 | /* context-specific modify-on-first-load list generation function */ |
20 | void (*mods)(struct nvc0_graph_priv *, struct nvc0_grctx *); | ||
21 | void (*unkn)(struct nvc0_graph_priv *); | 28 | void (*unkn)(struct nvc0_graph_priv *); |
22 | /* mmio context data */ | 29 | /* mmio context data */ |
23 | const struct nvc0_graph_pack *hub; | 30 | const struct nvc0_graph_pack *hub; |
@@ -28,30 +35,34 @@ struct nvc0_grctx_oclass { | |||
28 | /* indirect context data, generated with icmds/mthds */ | 35 | /* indirect context data, generated with icmds/mthds */ |
29 | const struct nvc0_graph_pack *icmd; | 36 | const struct nvc0_graph_pack *icmd; |
30 | const struct nvc0_graph_pack *mthd; | 37 | const struct nvc0_graph_pack *mthd; |
38 | /* bundle circular buffer */ | ||
39 | void (*bundle)(struct nvc0_grctx *); | ||
40 | u32 bundle_size; | ||
41 | u32 bundle_min_gpm_fifo_depth; | ||
42 | u32 bundle_token_limit; | ||
43 | /* pagepool */ | ||
44 | void (*pagepool)(struct nvc0_grctx *); | ||
45 | u32 pagepool_size; | ||
46 | /* attribute(/alpha) circular buffer */ | ||
47 | void (*attrib)(struct nvc0_grctx *); | ||
48 | u32 attrib_nr_max; | ||
49 | u32 attrib_nr; | ||
50 | u32 alpha_nr_max; | ||
51 | u32 alpha_nr; | ||
31 | }; | 52 | }; |
32 | 53 | ||
33 | #define mmio_data(s,a,p) do { \ | 54 | static inline const struct nvc0_grctx_oclass * |
34 | info->buffer[info->buffer_nr] = round_up(info->addr, (a)); \ | 55 | nvc0_grctx_impl(struct nvc0_graph_priv *priv) |
35 | info->addr = info->buffer[info->buffer_nr++] + (s); \ | 56 | { |
36 | info->data->size = (s); \ | 57 | return (void *)nv_engine(priv)->cclass; |
37 | info->data->align = (a); \ | 58 | } |
38 | info->data->access = (p); \ | ||
39 | info->data++; \ | ||
40 | } while(0) | ||
41 | |||
42 | #define mmio_list(r,d,s,b) do { \ | ||
43 | info->mmio->addr = (r); \ | ||
44 | info->mmio->data = (d); \ | ||
45 | info->mmio->shift = (s); \ | ||
46 | info->mmio->buffer = (b); \ | ||
47 | info->mmio++; \ | ||
48 | nv_wr32(priv, (r), (d) | ((s) ? (info->buffer[(b)] >> (s)) : 0)); \ | ||
49 | } while(0) | ||
50 | 59 | ||
51 | extern struct nouveau_oclass *nvc0_grctx_oclass; | 60 | extern struct nouveau_oclass *nvc0_grctx_oclass; |
52 | int nvc0_grctx_generate(struct nvc0_graph_priv *); | 61 | int nvc0_grctx_generate(struct nvc0_graph_priv *); |
53 | void nvc0_grctx_generate_main(struct nvc0_graph_priv *, struct nvc0_grctx *); | 62 | void nvc0_grctx_generate_main(struct nvc0_graph_priv *, struct nvc0_grctx *); |
54 | void nvc0_grctx_generate_mods(struct nvc0_graph_priv *, struct nvc0_grctx *); | 63 | void nvc0_grctx_generate_bundle(struct nvc0_grctx *); |
64 | void nvc0_grctx_generate_pagepool(struct nvc0_grctx *); | ||
65 | void nvc0_grctx_generate_attrib(struct nvc0_grctx *); | ||
55 | void nvc0_grctx_generate_unkn(struct nvc0_graph_priv *); | 66 | void nvc0_grctx_generate_unkn(struct nvc0_graph_priv *); |
56 | void nvc0_grctx_generate_tpcid(struct nvc0_graph_priv *); | 67 | void nvc0_grctx_generate_tpcid(struct nvc0_graph_priv *); |
57 | void nvc0_grctx_generate_r406028(struct nvc0_graph_priv *); | 68 | void nvc0_grctx_generate_r406028(struct nvc0_graph_priv *); |
@@ -60,22 +71,27 @@ void nvc0_grctx_generate_r418bb8(struct nvc0_graph_priv *); | |||
60 | void nvc0_grctx_generate_r406800(struct nvc0_graph_priv *); | 71 | void nvc0_grctx_generate_r406800(struct nvc0_graph_priv *); |
61 | 72 | ||
62 | extern struct nouveau_oclass *nvc1_grctx_oclass; | 73 | extern struct nouveau_oclass *nvc1_grctx_oclass; |
63 | void nvc1_grctx_generate_mods(struct nvc0_graph_priv *, struct nvc0_grctx *); | 74 | void nvc1_grctx_generate_attrib(struct nvc0_grctx *); |
64 | void nvc1_grctx_generate_unkn(struct nvc0_graph_priv *); | 75 | void nvc1_grctx_generate_unkn(struct nvc0_graph_priv *); |
65 | 76 | ||
66 | extern struct nouveau_oclass *nvc4_grctx_oclass; | 77 | extern struct nouveau_oclass *nvc4_grctx_oclass; |
67 | extern struct nouveau_oclass *nvc8_grctx_oclass; | 78 | extern struct nouveau_oclass *nvc8_grctx_oclass; |
79 | |||
68 | extern struct nouveau_oclass *nvd7_grctx_oclass; | 80 | extern struct nouveau_oclass *nvd7_grctx_oclass; |
81 | void nvd7_grctx_generate_attrib(struct nvc0_grctx *); | ||
82 | |||
69 | extern struct nouveau_oclass *nvd9_grctx_oclass; | 83 | extern struct nouveau_oclass *nvd9_grctx_oclass; |
70 | 84 | ||
71 | extern struct nouveau_oclass *nve4_grctx_oclass; | 85 | extern struct nouveau_oclass *nve4_grctx_oclass; |
72 | extern struct nouveau_oclass *gk20a_grctx_oclass; | 86 | extern struct nouveau_oclass *gk20a_grctx_oclass; |
73 | void nve4_grctx_generate_main(struct nvc0_graph_priv *, struct nvc0_grctx *); | 87 | void nve4_grctx_generate_main(struct nvc0_graph_priv *, struct nvc0_grctx *); |
74 | void nve4_grctx_generate_mods(struct nvc0_graph_priv *, struct nvc0_grctx *); | 88 | void nve4_grctx_generate_bundle(struct nvc0_grctx *); |
89 | void nve4_grctx_generate_pagepool(struct nvc0_grctx *); | ||
75 | void nve4_grctx_generate_unkn(struct nvc0_graph_priv *); | 90 | void nve4_grctx_generate_unkn(struct nvc0_graph_priv *); |
76 | void nve4_grctx_generate_r418bb8(struct nvc0_graph_priv *); | 91 | void nve4_grctx_generate_r418bb8(struct nvc0_graph_priv *); |
77 | 92 | ||
78 | extern struct nouveau_oclass *nvf0_grctx_oclass; | 93 | extern struct nouveau_oclass *nvf0_grctx_oclass; |
94 | extern struct nouveau_oclass *gk110b_grctx_oclass; | ||
79 | extern struct nouveau_oclass *nv108_grctx_oclass; | 95 | extern struct nouveau_oclass *nv108_grctx_oclass; |
80 | extern struct nouveau_oclass *gm107_grctx_oclass; | 96 | extern struct nouveau_oclass *gm107_grctx_oclass; |
81 | 97 | ||
@@ -160,16 +176,23 @@ extern const struct nvc0_graph_pack nve4_grctx_pack_ppc[]; | |||
160 | extern const struct nvc0_graph_pack nve4_grctx_pack_icmd[]; | 176 | extern const struct nvc0_graph_pack nve4_grctx_pack_icmd[]; |
161 | extern const struct nvc0_graph_init nve4_grctx_init_a097_0[]; | 177 | extern const struct nvc0_graph_init nve4_grctx_init_a097_0[]; |
162 | 178 | ||
179 | extern const struct nvc0_graph_pack nvf0_grctx_pack_icmd[]; | ||
180 | |||
163 | extern const struct nvc0_graph_pack nvf0_grctx_pack_mthd[]; | 181 | extern const struct nvc0_graph_pack nvf0_grctx_pack_mthd[]; |
164 | 182 | ||
183 | extern const struct nvc0_graph_pack nvf0_grctx_pack_hub[]; | ||
165 | extern const struct nvc0_graph_init nvf0_grctx_init_pri_0[]; | 184 | extern const struct nvc0_graph_init nvf0_grctx_init_pri_0[]; |
166 | extern const struct nvc0_graph_init nvf0_grctx_init_cwd_0[]; | 185 | extern const struct nvc0_graph_init nvf0_grctx_init_cwd_0[]; |
167 | 186 | ||
187 | extern const struct nvc0_graph_pack nvf0_grctx_pack_gpc[]; | ||
168 | extern const struct nvc0_graph_init nvf0_grctx_init_gpc_unk_2[]; | 188 | extern const struct nvc0_graph_init nvf0_grctx_init_gpc_unk_2[]; |
169 | 189 | ||
190 | extern const struct nvc0_graph_init nvf0_grctx_init_tex_0[]; | ||
170 | extern const struct nvc0_graph_init nvf0_grctx_init_mpc_0[]; | 191 | extern const struct nvc0_graph_init nvf0_grctx_init_mpc_0[]; |
171 | extern const struct nvc0_graph_init nvf0_grctx_init_l1c_0[]; | 192 | extern const struct nvc0_graph_init nvf0_grctx_init_l1c_0[]; |
172 | 193 | ||
194 | extern const struct nvc0_graph_pack nvf0_grctx_pack_ppc[]; | ||
195 | |||
173 | extern const struct nvc0_graph_init nv108_grctx_init_rstr2d_0[]; | 196 | extern const struct nvc0_graph_init nv108_grctx_init_rstr2d_0[]; |
174 | 197 | ||
175 | extern const struct nvc0_graph_init nv108_grctx_init_prop_0[]; | 198 | extern const struct nvc0_graph_init nv108_grctx_init_prop_0[]; |
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc1.c b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc1.c index 24a92c569c0a..c6ba8fed18f1 100644 --- a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc1.c +++ b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc1.c | |||
@@ -727,38 +727,38 @@ nvc1_grctx_pack_tpc[] = { | |||
727 | ******************************************************************************/ | 727 | ******************************************************************************/ |
728 | 728 | ||
729 | void | 729 | void |
730 | nvc1_grctx_generate_mods(struct nvc0_graph_priv *priv, struct nvc0_grctx *info) | 730 | nvc1_grctx_generate_attrib(struct nvc0_grctx *info) |
731 | { | 731 | { |
732 | struct nvc0_graph_priv *priv = info->priv; | ||
733 | const struct nvc0_grctx_oclass *impl = nvc0_grctx_impl(priv); | ||
734 | const u32 alpha = impl->alpha_nr; | ||
735 | const u32 beta = impl->attrib_nr; | ||
736 | const u32 size = 0x20 * (impl->attrib_nr_max + impl->alpha_nr_max); | ||
737 | const u32 access = NV_MEM_ACCESS_RW; | ||
738 | const int s = 12; | ||
739 | const int b = mmio_vram(info, size * priv->tpc_total, (1 << s), access); | ||
740 | const int timeslice_mode = 1; | ||
741 | const int max_batches = 0xffff; | ||
742 | u32 bo = 0; | ||
743 | u32 ao = bo + impl->attrib_nr_max * priv->tpc_total; | ||
732 | int gpc, tpc; | 744 | int gpc, tpc; |
733 | u32 offset; | ||
734 | 745 | ||
735 | mmio_data(0x002000, 0x0100, NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS); | 746 | mmio_refn(info, 0x418810, 0x80000000, s, b); |
736 | mmio_data(0x008000, 0x0100, NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS); | 747 | mmio_refn(info, 0x419848, 0x10000000, s, b); |
737 | mmio_data(0x060000, 0x1000, NV_MEM_ACCESS_RW); | 748 | mmio_wr32(info, 0x405830, (beta << 16) | alpha); |
738 | mmio_list(0x408004, 0x00000000, 8, 0); | 749 | mmio_wr32(info, 0x4064c4, ((alpha / 4) << 16) | max_batches); |
739 | mmio_list(0x408008, 0x80000018, 0, 0); | ||
740 | mmio_list(0x40800c, 0x00000000, 8, 1); | ||
741 | mmio_list(0x408010, 0x80000000, 0, 0); | ||
742 | mmio_list(0x418810, 0x80000000, 12, 2); | ||
743 | mmio_list(0x419848, 0x10000000, 12, 2); | ||
744 | mmio_list(0x419004, 0x00000000, 8, 1); | ||
745 | mmio_list(0x419008, 0x00000000, 0, 0); | ||
746 | mmio_list(0x418808, 0x00000000, 8, 0); | ||
747 | mmio_list(0x41880c, 0x80000018, 0, 0); | ||
748 | 750 | ||
749 | mmio_list(0x405830, 0x02180218, 0, 0); | 751 | for (gpc = 0; gpc < priv->gpc_nr; gpc++) { |
750 | mmio_list(0x4064c4, 0x0086ffff, 0, 0); | ||
751 | |||
752 | for (gpc = 0, offset = 0; gpc < priv->gpc_nr; gpc++) { | ||
753 | for (tpc = 0; tpc < priv->tpc_nr[gpc]; tpc++) { | ||
754 | u32 addr = TPC_UNIT(gpc, tpc, 0x0520); | ||
755 | mmio_list(addr, 0x12180000 | offset, 0, 0); | ||
756 | offset += 0x0324; | ||
757 | } | ||
758 | for (tpc = 0; tpc < priv->tpc_nr[gpc]; tpc++) { | 752 | for (tpc = 0; tpc < priv->tpc_nr[gpc]; tpc++) { |
759 | u32 addr = TPC_UNIT(gpc, tpc, 0x0544); | 753 | const u32 a = alpha; |
760 | mmio_list(addr, 0x02180000 | offset, 0, 0); | 754 | const u32 b = beta; |
761 | offset += 0x0324; | 755 | const u32 t = timeslice_mode; |
756 | const u32 o = TPC_UNIT(gpc, tpc, 0x500); | ||
757 | mmio_skip(info, o + 0x20, (t << 28) | (b << 16) | ++bo); | ||
758 | mmio_wr32(info, o + 0x20, (t << 28) | (b << 16) | --bo); | ||
759 | bo += impl->attrib_nr_max; | ||
760 | mmio_wr32(info, o + 0x44, (a << 16) | ao); | ||
761 | ao += impl->alpha_nr_max; | ||
762 | } | 762 | } |
763 | } | 763 | } |
764 | } | 764 | } |
@@ -786,7 +786,6 @@ nvc1_grctx_oclass = &(struct nvc0_grctx_oclass) { | |||
786 | .wr32 = _nouveau_graph_context_wr32, | 786 | .wr32 = _nouveau_graph_context_wr32, |
787 | }, | 787 | }, |
788 | .main = nvc0_grctx_generate_main, | 788 | .main = nvc0_grctx_generate_main, |
789 | .mods = nvc1_grctx_generate_mods, | ||
790 | .unkn = nvc1_grctx_generate_unkn, | 789 | .unkn = nvc1_grctx_generate_unkn, |
791 | .hub = nvc1_grctx_pack_hub, | 790 | .hub = nvc1_grctx_pack_hub, |
792 | .gpc = nvc1_grctx_pack_gpc, | 791 | .gpc = nvc1_grctx_pack_gpc, |
@@ -794,4 +793,13 @@ nvc1_grctx_oclass = &(struct nvc0_grctx_oclass) { | |||
794 | .tpc = nvc1_grctx_pack_tpc, | 793 | .tpc = nvc1_grctx_pack_tpc, |
795 | .icmd = nvc1_grctx_pack_icmd, | 794 | .icmd = nvc1_grctx_pack_icmd, |
796 | .mthd = nvc1_grctx_pack_mthd, | 795 | .mthd = nvc1_grctx_pack_mthd, |
796 | .bundle = nvc0_grctx_generate_bundle, | ||
797 | .bundle_size = 0x1800, | ||
798 | .pagepool = nvc0_grctx_generate_pagepool, | ||
799 | .pagepool_size = 0x8000, | ||
800 | .attrib = nvc1_grctx_generate_attrib, | ||
801 | .attrib_nr_max = 0x324, | ||
802 | .attrib_nr = 0x218, | ||
803 | .alpha_nr_max = 0x324, | ||
804 | .alpha_nr = 0x218, | ||
797 | }.base; | 805 | }.base; |
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc4.c b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc4.c index e11ed5538193..41705c60cc47 100644 --- a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc4.c +++ b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc4.c | |||
@@ -92,7 +92,6 @@ nvc4_grctx_oclass = &(struct nvc0_grctx_oclass) { | |||
92 | .wr32 = _nouveau_graph_context_wr32, | 92 | .wr32 = _nouveau_graph_context_wr32, |
93 | }, | 93 | }, |
94 | .main = nvc0_grctx_generate_main, | 94 | .main = nvc0_grctx_generate_main, |
95 | .mods = nvc0_grctx_generate_mods, | ||
96 | .unkn = nvc0_grctx_generate_unkn, | 95 | .unkn = nvc0_grctx_generate_unkn, |
97 | .hub = nvc0_grctx_pack_hub, | 96 | .hub = nvc0_grctx_pack_hub, |
98 | .gpc = nvc0_grctx_pack_gpc, | 97 | .gpc = nvc0_grctx_pack_gpc, |
@@ -100,4 +99,11 @@ nvc4_grctx_oclass = &(struct nvc0_grctx_oclass) { | |||
100 | .tpc = nvc4_grctx_pack_tpc, | 99 | .tpc = nvc4_grctx_pack_tpc, |
101 | .icmd = nvc0_grctx_pack_icmd, | 100 | .icmd = nvc0_grctx_pack_icmd, |
102 | .mthd = nvc0_grctx_pack_mthd, | 101 | .mthd = nvc0_grctx_pack_mthd, |
102 | .bundle = nvc0_grctx_generate_bundle, | ||
103 | .bundle_size = 0x1800, | ||
104 | .pagepool = nvc0_grctx_generate_pagepool, | ||
105 | .pagepool_size = 0x8000, | ||
106 | .attrib = nvc0_grctx_generate_attrib, | ||
107 | .attrib_nr_max = 0x324, | ||
108 | .attrib_nr = 0x218, | ||
103 | }.base; | 109 | }.base; |
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc8.c b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc8.c index feebd58dfe8d..8f804cd8f9c7 100644 --- a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc8.c +++ b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc8.c | |||
@@ -343,7 +343,6 @@ nvc8_grctx_oclass = &(struct nvc0_grctx_oclass) { | |||
343 | .wr32 = _nouveau_graph_context_wr32, | 343 | .wr32 = _nouveau_graph_context_wr32, |
344 | }, | 344 | }, |
345 | .main = nvc0_grctx_generate_main, | 345 | .main = nvc0_grctx_generate_main, |
346 | .mods = nvc0_grctx_generate_mods, | ||
347 | .unkn = nvc0_grctx_generate_unkn, | 346 | .unkn = nvc0_grctx_generate_unkn, |
348 | .hub = nvc0_grctx_pack_hub, | 347 | .hub = nvc0_grctx_pack_hub, |
349 | .gpc = nvc8_grctx_pack_gpc, | 348 | .gpc = nvc8_grctx_pack_gpc, |
@@ -351,4 +350,11 @@ nvc8_grctx_oclass = &(struct nvc0_grctx_oclass) { | |||
351 | .tpc = nvc0_grctx_pack_tpc, | 350 | .tpc = nvc0_grctx_pack_tpc, |
352 | .icmd = nvc8_grctx_pack_icmd, | 351 | .icmd = nvc8_grctx_pack_icmd, |
353 | .mthd = nvc8_grctx_pack_mthd, | 352 | .mthd = nvc8_grctx_pack_mthd, |
353 | .bundle = nvc0_grctx_generate_bundle, | ||
354 | .bundle_size = 0x1800, | ||
355 | .pagepool = nvc0_grctx_generate_pagepool, | ||
356 | .pagepool_size = 0x8000, | ||
357 | .attrib = nvc0_grctx_generate_attrib, | ||
358 | .attrib_nr_max = 0x324, | ||
359 | .attrib_nr = 0x218, | ||
354 | }.base; | 360 | }.base; |
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvd7.c b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvd7.c index 1dbc8d7f2e86..fcf534fd9e65 100644 --- a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvd7.c +++ b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvd7.c | |||
@@ -177,44 +177,41 @@ nvd7_grctx_pack_ppc[] = { | |||
177 | * PGRAPH context implementation | 177 | * PGRAPH context implementation |
178 | ******************************************************************************/ | 178 | ******************************************************************************/ |
179 | 179 | ||
180 | static void | 180 | void |
181 | nvd7_grctx_generate_mods(struct nvc0_graph_priv *priv, struct nvc0_grctx *info) | 181 | nvd7_grctx_generate_attrib(struct nvc0_grctx *info) |
182 | { | 182 | { |
183 | u32 magic[GPC_MAX][2]; | 183 | struct nvc0_graph_priv *priv = info->priv; |
184 | u32 offset; | 184 | const struct nvc0_grctx_oclass *impl = nvc0_grctx_impl(priv); |
185 | int gpc; | 185 | const u32 alpha = impl->alpha_nr; |
186 | 186 | const u32 beta = impl->attrib_nr; | |
187 | mmio_data(0x003000, 0x0100, NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS); | 187 | const u32 size = 0x20 * (impl->attrib_nr_max + impl->alpha_nr_max); |
188 | mmio_data(0x008000, 0x0100, NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS); | 188 | const u32 access = NV_MEM_ACCESS_RW; |
189 | mmio_data(0x060000, 0x1000, NV_MEM_ACCESS_RW); | 189 | const int s = 12; |
190 | mmio_list(0x40800c, 0x00000000, 8, 1); | 190 | const int b = mmio_vram(info, size * priv->tpc_total, (1 << s), access); |
191 | mmio_list(0x408010, 0x80000000, 0, 0); | 191 | const int timeslice_mode = 1; |
192 | mmio_list(0x419004, 0x00000000, 8, 1); | 192 | const int max_batches = 0xffff; |
193 | mmio_list(0x419008, 0x00000000, 0, 0); | 193 | u32 bo = 0; |
194 | mmio_list(0x408004, 0x00000000, 8, 0); | 194 | u32 ao = bo + impl->attrib_nr_max * priv->tpc_total; |
195 | mmio_list(0x408008, 0x80000018, 0, 0); | 195 | int gpc, ppc; |
196 | mmio_list(0x418808, 0x00000000, 8, 0); | ||
197 | mmio_list(0x41880c, 0x80000018, 0, 0); | ||
198 | mmio_list(0x418810, 0x80000000, 12, 2); | ||
199 | mmio_list(0x419848, 0x10000000, 12, 2); | ||
200 | 196 | ||
201 | mmio_list(0x405830, 0x02180324, 0, 0); | 197 | mmio_refn(info, 0x418810, 0x80000000, s, b); |
202 | mmio_list(0x4064c4, 0x00c9ffff, 0, 0); | 198 | mmio_refn(info, 0x419848, 0x10000000, s, b); |
203 | 199 | mmio_wr32(info, 0x405830, (beta << 16) | alpha); | |
204 | for (gpc = 0, offset = 0; gpc < priv->gpc_nr; gpc++) { | 200 | mmio_wr32(info, 0x4064c4, ((alpha / 4) << 16) | max_batches); |
205 | u16 magic0 = 0x0218 * priv->tpc_nr[gpc]; | ||
206 | u16 magic1 = 0x0324 * priv->tpc_nr[gpc]; | ||
207 | magic[gpc][0] = 0x10000000 | (magic0 << 16) | offset; | ||
208 | magic[gpc][1] = 0x00000000 | (magic1 << 16); | ||
209 | offset += 0x0324 * priv->tpc_nr[gpc]; | ||
210 | } | ||
211 | 201 | ||
212 | for (gpc = 0; gpc < priv->gpc_nr; gpc++) { | 202 | for (gpc = 0; gpc < priv->gpc_nr; gpc++) { |
213 | mmio_list(GPC_UNIT(gpc, 0x30c0), magic[gpc][0], 0, 0); | 203 | for (ppc = 0; ppc < priv->ppc_nr[gpc]; ppc++) { |
214 | mmio_list(GPC_UNIT(gpc, 0x30e4), magic[gpc][1] | offset, 0, 0); | 204 | const u32 a = alpha * priv->ppc_tpc_nr[gpc][ppc]; |
215 | offset += 0x07ff * priv->tpc_nr[gpc]; | 205 | const u32 b = beta * priv->ppc_tpc_nr[gpc][ppc]; |
206 | const u32 t = timeslice_mode; | ||
207 | const u32 o = PPC_UNIT(gpc, ppc, 0); | ||
208 | mmio_skip(info, o + 0xc0, (t << 28) | (b << 16) | ++bo); | ||
209 | mmio_wr32(info, o + 0xc0, (t << 28) | (b << 16) | --bo); | ||
210 | bo += impl->attrib_nr_max * priv->ppc_tpc_nr[gpc][ppc]; | ||
211 | mmio_wr32(info, o + 0xe4, (a << 16) | ao); | ||
212 | ao += impl->alpha_nr_max * priv->ppc_tpc_nr[gpc][ppc]; | ||
213 | } | ||
216 | } | 214 | } |
217 | mmio_list(0x17e91c, 0x03060609, 0, 0); /* different from kepler */ | ||
218 | } | 215 | } |
219 | 216 | ||
220 | void | 217 | void |
@@ -223,7 +220,7 @@ nvd7_grctx_generate_main(struct nvc0_graph_priv *priv, struct nvc0_grctx *info) | |||
223 | struct nvc0_grctx_oclass *oclass = (void *)nv_engine(priv)->cclass; | 220 | struct nvc0_grctx_oclass *oclass = (void *)nv_engine(priv)->cclass; |
224 | int i; | 221 | int i; |
225 | 222 | ||
226 | nv_mask(priv, 0x000260, 0x00000001, 0x00000000); | 223 | nouveau_mc(priv)->unk260(nouveau_mc(priv), 0); |
227 | 224 | ||
228 | nvc0_graph_mmio(priv, oclass->hub); | 225 | nvc0_graph_mmio(priv, oclass->hub); |
229 | nvc0_graph_mmio(priv, oclass->gpc); | 226 | nvc0_graph_mmio(priv, oclass->gpc); |
@@ -233,7 +230,9 @@ nvd7_grctx_generate_main(struct nvc0_graph_priv *priv, struct nvc0_grctx *info) | |||
233 | 230 | ||
234 | nv_wr32(priv, 0x404154, 0x00000000); | 231 | nv_wr32(priv, 0x404154, 0x00000000); |
235 | 232 | ||
236 | oclass->mods(priv, info); | 233 | oclass->bundle(info); |
234 | oclass->pagepool(info); | ||
235 | oclass->attrib(info); | ||
237 | oclass->unkn(priv); | 236 | oclass->unkn(priv); |
238 | 237 | ||
239 | nvc0_grctx_generate_tpcid(priv); | 238 | nvc0_grctx_generate_tpcid(priv); |
@@ -248,7 +247,7 @@ nvd7_grctx_generate_main(struct nvc0_graph_priv *priv, struct nvc0_grctx *info) | |||
248 | nvc0_graph_icmd(priv, oclass->icmd); | 247 | nvc0_graph_icmd(priv, oclass->icmd); |
249 | nv_wr32(priv, 0x404154, 0x00000400); | 248 | nv_wr32(priv, 0x404154, 0x00000400); |
250 | nvc0_graph_mthd(priv, oclass->mthd); | 249 | nvc0_graph_mthd(priv, oclass->mthd); |
251 | nv_mask(priv, 0x000260, 0x00000001, 0x00000001); | 250 | nouveau_mc(priv)->unk260(nouveau_mc(priv), 1); |
252 | } | 251 | } |
253 | 252 | ||
254 | struct nouveau_oclass * | 253 | struct nouveau_oclass * |
@@ -263,7 +262,6 @@ nvd7_grctx_oclass = &(struct nvc0_grctx_oclass) { | |||
263 | .wr32 = _nouveau_graph_context_wr32, | 262 | .wr32 = _nouveau_graph_context_wr32, |
264 | }, | 263 | }, |
265 | .main = nvd7_grctx_generate_main, | 264 | .main = nvd7_grctx_generate_main, |
266 | .mods = nvd7_grctx_generate_mods, | ||
267 | .unkn = nve4_grctx_generate_unkn, | 265 | .unkn = nve4_grctx_generate_unkn, |
268 | .hub = nvd7_grctx_pack_hub, | 266 | .hub = nvd7_grctx_pack_hub, |
269 | .gpc = nvd7_grctx_pack_gpc, | 267 | .gpc = nvd7_grctx_pack_gpc, |
@@ -272,4 +270,13 @@ nvd7_grctx_oclass = &(struct nvc0_grctx_oclass) { | |||
272 | .ppc = nvd7_grctx_pack_ppc, | 270 | .ppc = nvd7_grctx_pack_ppc, |
273 | .icmd = nvd9_grctx_pack_icmd, | 271 | .icmd = nvd9_grctx_pack_icmd, |
274 | .mthd = nvd9_grctx_pack_mthd, | 272 | .mthd = nvd9_grctx_pack_mthd, |
273 | .bundle = nvc0_grctx_generate_bundle, | ||
274 | .bundle_size = 0x1800, | ||
275 | .pagepool = nvc0_grctx_generate_pagepool, | ||
276 | .pagepool_size = 0x8000, | ||
277 | .attrib = nvd7_grctx_generate_attrib, | ||
278 | .attrib_nr_max = 0x324, | ||
279 | .attrib_nr = 0x218, | ||
280 | .alpha_nr_max = 0x7ff, | ||
281 | .alpha_nr = 0x324, | ||
275 | }.base; | 282 | }.base; |
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvd9.c b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvd9.c index c665fb7e4660..b9a301b6fd9f 100644 --- a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvd9.c +++ b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvd9.c | |||
@@ -511,7 +511,6 @@ nvd9_grctx_oclass = &(struct nvc0_grctx_oclass) { | |||
511 | .wr32 = _nouveau_graph_context_wr32, | 511 | .wr32 = _nouveau_graph_context_wr32, |
512 | }, | 512 | }, |
513 | .main = nvc0_grctx_generate_main, | 513 | .main = nvc0_grctx_generate_main, |
514 | .mods = nvc1_grctx_generate_mods, | ||
515 | .unkn = nvc1_grctx_generate_unkn, | 514 | .unkn = nvc1_grctx_generate_unkn, |
516 | .hub = nvd9_grctx_pack_hub, | 515 | .hub = nvd9_grctx_pack_hub, |
517 | .gpc = nvd9_grctx_pack_gpc, | 516 | .gpc = nvd9_grctx_pack_gpc, |
@@ -519,4 +518,13 @@ nvd9_grctx_oclass = &(struct nvc0_grctx_oclass) { | |||
519 | .tpc = nvd9_grctx_pack_tpc, | 518 | .tpc = nvd9_grctx_pack_tpc, |
520 | .icmd = nvd9_grctx_pack_icmd, | 519 | .icmd = nvd9_grctx_pack_icmd, |
521 | .mthd = nvd9_grctx_pack_mthd, | 520 | .mthd = nvd9_grctx_pack_mthd, |
521 | .bundle = nvc0_grctx_generate_bundle, | ||
522 | .bundle_size = 0x1800, | ||
523 | .pagepool = nvc0_grctx_generate_pagepool, | ||
524 | .pagepool_size = 0x8000, | ||
525 | .attrib = nvc1_grctx_generate_attrib, | ||
526 | .attrib_nr_max = 0x324, | ||
527 | .attrib_nr = 0x218, | ||
528 | .alpha_nr_max = 0x324, | ||
529 | .alpha_nr = 0x218, | ||
522 | }.base; | 530 | }.base; |
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnve4.c b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnve4.c index c5b249238587..ccac2ee1a1cb 100644 --- a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnve4.c +++ b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnve4.c | |||
@@ -839,47 +839,34 @@ nve4_grctx_pack_ppc[] = { | |||
839 | ******************************************************************************/ | 839 | ******************************************************************************/ |
840 | 840 | ||
841 | void | 841 | void |
842 | nve4_grctx_generate_mods(struct nvc0_graph_priv *priv, struct nvc0_grctx *info) | 842 | nve4_grctx_generate_bundle(struct nvc0_grctx *info) |
843 | { | 843 | { |
844 | u32 magic[GPC_MAX][2]; | 844 | const struct nvc0_grctx_oclass *impl = nvc0_grctx_impl(info->priv); |
845 | u32 offset; | 845 | const u32 state_limit = min(impl->bundle_min_gpm_fifo_depth, |
846 | int gpc; | 846 | impl->bundle_size / 0x20); |
847 | 847 | const u32 token_limit = impl->bundle_token_limit; | |
848 | mmio_data(0x003000, 0x0100, NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS); | 848 | const u32 access = NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS; |
849 | mmio_data(0x008000, 0x0100, NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS); | 849 | const int s = 8; |
850 | mmio_data(0x060000, 0x1000, NV_MEM_ACCESS_RW); | 850 | const int b = mmio_vram(info, impl->bundle_size, (1 << s), access); |
851 | mmio_list(0x40800c, 0x00000000, 8, 1); | 851 | mmio_refn(info, 0x408004, 0x00000000, s, b); |
852 | mmio_list(0x408010, 0x80000000, 0, 0); | 852 | mmio_refn(info, 0x408008, 0x80000000 | (impl->bundle_size >> s), 0, b); |
853 | mmio_list(0x419004, 0x00000000, 8, 1); | 853 | mmio_refn(info, 0x418808, 0x00000000, s, b); |
854 | mmio_list(0x419008, 0x00000000, 0, 0); | 854 | mmio_refn(info, 0x41880c, 0x80000000 | (impl->bundle_size >> s), 0, b); |
855 | mmio_list(0x4064cc, 0x80000000, 0, 0); | 855 | mmio_wr32(info, 0x4064c8, (state_limit << 16) | token_limit); |
856 | mmio_list(0x408004, 0x00000000, 8, 0); | 856 | } |
857 | mmio_list(0x408008, 0x80000030, 0, 0); | ||
858 | mmio_list(0x418808, 0x00000000, 8, 0); | ||
859 | mmio_list(0x41880c, 0x80000030, 0, 0); | ||
860 | mmio_list(0x4064c8, 0x01800600, 0, 0); | ||
861 | mmio_list(0x418810, 0x80000000, 12, 2); | ||
862 | mmio_list(0x419848, 0x10000000, 12, 2); | ||
863 | |||
864 | mmio_list(0x405830, 0x02180648, 0, 0); | ||
865 | mmio_list(0x4064c4, 0x0192ffff, 0, 0); | ||
866 | |||
867 | for (gpc = 0, offset = 0; gpc < priv->gpc_nr; gpc++) { | ||
868 | u16 magic0 = 0x0218 * priv->tpc_nr[gpc]; | ||
869 | u16 magic1 = 0x0648 * priv->tpc_nr[gpc]; | ||
870 | magic[gpc][0] = 0x10000000 | (magic0 << 16) | offset; | ||
871 | magic[gpc][1] = 0x00000000 | (magic1 << 16); | ||
872 | offset += 0x0324 * priv->tpc_nr[gpc]; | ||
873 | } | ||
874 | |||
875 | for (gpc = 0; gpc < priv->gpc_nr; gpc++) { | ||
876 | mmio_list(GPC_UNIT(gpc, 0x30c0), magic[gpc][0], 0, 0); | ||
877 | mmio_list(GPC_UNIT(gpc, 0x30e4), magic[gpc][1] | offset, 0, 0); | ||
878 | offset += 0x07ff * priv->tpc_nr[gpc]; | ||
879 | } | ||
880 | 857 | ||
881 | mmio_list(0x17e91c, 0x06060609, 0, 0); | 858 | void |
882 | mmio_list(0x17e920, 0x00090a05, 0, 0); | 859 | nve4_grctx_generate_pagepool(struct nvc0_grctx *info) |
860 | { | ||
861 | const struct nvc0_grctx_oclass *impl = nvc0_grctx_impl(info->priv); | ||
862 | const u32 access = NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS; | ||
863 | const int s = 8; | ||
864 | const int b = mmio_vram(info, impl->pagepool_size, (1 << s), access); | ||
865 | mmio_refn(info, 0x40800c, 0x00000000, s, b); | ||
866 | mmio_wr32(info, 0x408010, 0x80000000); | ||
867 | mmio_refn(info, 0x419004, 0x00000000, s, b); | ||
868 | mmio_wr32(info, 0x419008, 0x00000000); | ||
869 | mmio_wr32(info, 0x4064cc, 0x80000000); | ||
883 | } | 870 | } |
884 | 871 | ||
885 | void | 872 | void |
@@ -957,7 +944,7 @@ nve4_grctx_generate_main(struct nvc0_graph_priv *priv, struct nvc0_grctx *info) | |||
957 | struct nvc0_grctx_oclass *oclass = (void *)nv_engine(priv)->cclass; | 944 | struct nvc0_grctx_oclass *oclass = (void *)nv_engine(priv)->cclass; |
958 | int i; | 945 | int i; |
959 | 946 | ||
960 | nv_mask(priv, 0x000260, 0x00000001, 0x00000000); | 947 | nouveau_mc(priv)->unk260(nouveau_mc(priv), 0); |
961 | 948 | ||
962 | nvc0_graph_mmio(priv, oclass->hub); | 949 | nvc0_graph_mmio(priv, oclass->hub); |
963 | nvc0_graph_mmio(priv, oclass->gpc); | 950 | nvc0_graph_mmio(priv, oclass->gpc); |
@@ -967,7 +954,9 @@ nve4_grctx_generate_main(struct nvc0_graph_priv *priv, struct nvc0_grctx *info) | |||
967 | 954 | ||
968 | nv_wr32(priv, 0x404154, 0x00000000); | 955 | nv_wr32(priv, 0x404154, 0x00000000); |
969 | 956 | ||
970 | oclass->mods(priv, info); | 957 | oclass->bundle(info); |
958 | oclass->pagepool(info); | ||
959 | oclass->attrib(info); | ||
971 | oclass->unkn(priv); | 960 | oclass->unkn(priv); |
972 | 961 | ||
973 | nvc0_grctx_generate_tpcid(priv); | 962 | nvc0_grctx_generate_tpcid(priv); |
@@ -991,7 +980,7 @@ nve4_grctx_generate_main(struct nvc0_graph_priv *priv, struct nvc0_grctx *info) | |||
991 | nvc0_graph_icmd(priv, oclass->icmd); | 980 | nvc0_graph_icmd(priv, oclass->icmd); |
992 | nv_wr32(priv, 0x404154, 0x00000400); | 981 | nv_wr32(priv, 0x404154, 0x00000400); |
993 | nvc0_graph_mthd(priv, oclass->mthd); | 982 | nvc0_graph_mthd(priv, oclass->mthd); |
994 | nv_mask(priv, 0x000260, 0x00000001, 0x00000001); | 983 | nouveau_mc(priv)->unk260(nouveau_mc(priv), 1); |
995 | 984 | ||
996 | nv_mask(priv, 0x418800, 0x00200000, 0x00200000); | 985 | nv_mask(priv, 0x418800, 0x00200000, 0x00200000); |
997 | nv_mask(priv, 0x41be10, 0x00800000, 0x00800000); | 986 | nv_mask(priv, 0x41be10, 0x00800000, 0x00800000); |
@@ -1009,7 +998,6 @@ nve4_grctx_oclass = &(struct nvc0_grctx_oclass) { | |||
1009 | .wr32 = _nouveau_graph_context_wr32, | 998 | .wr32 = _nouveau_graph_context_wr32, |
1010 | }, | 999 | }, |
1011 | .main = nve4_grctx_generate_main, | 1000 | .main = nve4_grctx_generate_main, |
1012 | .mods = nve4_grctx_generate_mods, | ||
1013 | .unkn = nve4_grctx_generate_unkn, | 1001 | .unkn = nve4_grctx_generate_unkn, |
1014 | .hub = nve4_grctx_pack_hub, | 1002 | .hub = nve4_grctx_pack_hub, |
1015 | .gpc = nve4_grctx_pack_gpc, | 1003 | .gpc = nve4_grctx_pack_gpc, |
@@ -1018,4 +1006,15 @@ nve4_grctx_oclass = &(struct nvc0_grctx_oclass) { | |||
1018 | .ppc = nve4_grctx_pack_ppc, | 1006 | .ppc = nve4_grctx_pack_ppc, |
1019 | .icmd = nve4_grctx_pack_icmd, | 1007 | .icmd = nve4_grctx_pack_icmd, |
1020 | .mthd = nve4_grctx_pack_mthd, | 1008 | .mthd = nve4_grctx_pack_mthd, |
1009 | .bundle = nve4_grctx_generate_bundle, | ||
1010 | .bundle_size = 0x3000, | ||
1011 | .bundle_min_gpm_fifo_depth = 0x180, | ||
1012 | .bundle_token_limit = 0x600, | ||
1013 | .pagepool = nve4_grctx_generate_pagepool, | ||
1014 | .pagepool_size = 0x8000, | ||
1015 | .attrib = nvd7_grctx_generate_attrib, | ||
1016 | .attrib_nr_max = 0x324, | ||
1017 | .attrib_nr = 0x218, | ||
1018 | .alpha_nr_max = 0x7ff, | ||
1019 | .alpha_nr = 0x648, | ||
1021 | }.base; | 1020 | }.base; |
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvf0.c b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvf0.c index dec03f04114d..e9b0dcf95a49 100644 --- a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvf0.c +++ b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvf0.c | |||
@@ -279,7 +279,7 @@ nvf0_grctx_init_icmd_0[] = { | |||
279 | {} | 279 | {} |
280 | }; | 280 | }; |
281 | 281 | ||
282 | static const struct nvc0_graph_pack | 282 | const struct nvc0_graph_pack |
283 | nvf0_grctx_pack_icmd[] = { | 283 | nvf0_grctx_pack_icmd[] = { |
284 | { nvf0_grctx_init_icmd_0 }, | 284 | { nvf0_grctx_init_icmd_0 }, |
285 | {} | 285 | {} |
@@ -668,7 +668,7 @@ nvf0_grctx_init_be_0[] = { | |||
668 | {} | 668 | {} |
669 | }; | 669 | }; |
670 | 670 | ||
671 | static const struct nvc0_graph_pack | 671 | const struct nvc0_graph_pack |
672 | nvf0_grctx_pack_hub[] = { | 672 | nvf0_grctx_pack_hub[] = { |
673 | { nvc0_grctx_init_main_0 }, | 673 | { nvc0_grctx_init_main_0 }, |
674 | { nvf0_grctx_init_fe_0 }, | 674 | { nvf0_grctx_init_fe_0 }, |
@@ -704,7 +704,7 @@ nvf0_grctx_init_gpc_unk_2[] = { | |||
704 | {} | 704 | {} |
705 | }; | 705 | }; |
706 | 706 | ||
707 | static const struct nvc0_graph_pack | 707 | const struct nvc0_graph_pack |
708 | nvf0_grctx_pack_gpc[] = { | 708 | nvf0_grctx_pack_gpc[] = { |
709 | { nvc0_grctx_init_gpc_unk_0 }, | 709 | { nvc0_grctx_init_gpc_unk_0 }, |
710 | { nvd9_grctx_init_prop_0 }, | 710 | { nvd9_grctx_init_prop_0 }, |
@@ -718,7 +718,7 @@ nvf0_grctx_pack_gpc[] = { | |||
718 | {} | 718 | {} |
719 | }; | 719 | }; |
720 | 720 | ||
721 | static const struct nvc0_graph_init | 721 | const struct nvc0_graph_init |
722 | nvf0_grctx_init_tex_0[] = { | 722 | nvf0_grctx_init_tex_0[] = { |
723 | { 0x419a00, 1, 0x04, 0x000000f0 }, | 723 | { 0x419a00, 1, 0x04, 0x000000f0 }, |
724 | { 0x419a04, 1, 0x04, 0x00000001 }, | 724 | { 0x419a04, 1, 0x04, 0x00000001 }, |
@@ -797,7 +797,7 @@ nvf0_grctx_init_cbm_0[] = { | |||
797 | {} | 797 | {} |
798 | }; | 798 | }; |
799 | 799 | ||
800 | static const struct nvc0_graph_pack | 800 | const struct nvc0_graph_pack |
801 | nvf0_grctx_pack_ppc[] = { | 801 | nvf0_grctx_pack_ppc[] = { |
802 | { nve4_grctx_init_pes_0 }, | 802 | { nve4_grctx_init_pes_0 }, |
803 | { nvf0_grctx_init_cbm_0 }, | 803 | { nvf0_grctx_init_cbm_0 }, |
@@ -809,58 +809,6 @@ nvf0_grctx_pack_ppc[] = { | |||
809 | * PGRAPH context implementation | 809 | * PGRAPH context implementation |
810 | ******************************************************************************/ | 810 | ******************************************************************************/ |
811 | 811 | ||
812 | static void | ||
813 | nvf0_grctx_generate_mods(struct nvc0_graph_priv *priv, struct nvc0_grctx *info) | ||
814 | { | ||
815 | u32 magic[GPC_MAX][4]; | ||
816 | u32 offset; | ||
817 | int gpc; | ||
818 | |||
819 | mmio_data(0x003000, 0x0100, NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS); | ||
820 | mmio_data(0x008000, 0x0100, NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS); | ||
821 | mmio_data(0x060000, 0x1000, NV_MEM_ACCESS_RW); | ||
822 | mmio_list(0x40800c, 0x00000000, 8, 1); | ||
823 | mmio_list(0x408010, 0x80000000, 0, 0); | ||
824 | mmio_list(0x419004, 0x00000000, 8, 1); | ||
825 | mmio_list(0x419008, 0x00000000, 0, 0); | ||
826 | mmio_list(0x4064cc, 0x80000000, 0, 0); | ||
827 | mmio_list(0x408004, 0x00000000, 8, 0); | ||
828 | mmio_list(0x408008, 0x80000030, 0, 0); | ||
829 | mmio_list(0x418808, 0x00000000, 8, 0); | ||
830 | mmio_list(0x41880c, 0x80000030, 0, 0); | ||
831 | mmio_list(0x4064c8, 0x01800600, 0, 0); | ||
832 | mmio_list(0x418810, 0x80000000, 12, 2); | ||
833 | mmio_list(0x419848, 0x10000000, 12, 2); | ||
834 | |||
835 | mmio_list(0x405830, 0x02180648, 0, 0); | ||
836 | mmio_list(0x4064c4, 0x0192ffff, 0, 0); | ||
837 | |||
838 | for (gpc = 0, offset = 0; gpc < priv->gpc_nr; gpc++) { | ||
839 | u16 magic0 = 0x0218 * (priv->tpc_nr[gpc] - 1); | ||
840 | u16 magic1 = 0x0648 * (priv->tpc_nr[gpc] - 1); | ||
841 | u16 magic2 = 0x0218; | ||
842 | u16 magic3 = 0x0648; | ||
843 | magic[gpc][0] = 0x10000000 | (magic0 << 16) | offset; | ||
844 | magic[gpc][1] = 0x00000000 | (magic1 << 16); | ||
845 | offset += 0x0324 * (priv->tpc_nr[gpc] - 1); | ||
846 | magic[gpc][2] = 0x10000000 | (magic2 << 16) | offset; | ||
847 | magic[gpc][3] = 0x00000000 | (magic3 << 16); | ||
848 | offset += 0x0324; | ||
849 | } | ||
850 | |||
851 | for (gpc = 0; gpc < priv->gpc_nr; gpc++) { | ||
852 | mmio_list(GPC_UNIT(gpc, 0x30c0), magic[gpc][0], 0, 0); | ||
853 | mmio_list(GPC_UNIT(gpc, 0x30e4), magic[gpc][1] | offset, 0, 0); | ||
854 | offset += 0x07ff * (priv->tpc_nr[gpc] - 1); | ||
855 | mmio_list(GPC_UNIT(gpc, 0x32c0), magic[gpc][2], 0, 0); | ||
856 | mmio_list(GPC_UNIT(gpc, 0x32e4), magic[gpc][3] | offset, 0, 0); | ||
857 | offset += 0x07ff; | ||
858 | } | ||
859 | |||
860 | mmio_list(0x17e91c, 0x06060609, 0, 0); | ||
861 | mmio_list(0x17e920, 0x00090a05, 0, 0); | ||
862 | } | ||
863 | |||
864 | struct nouveau_oclass * | 812 | struct nouveau_oclass * |
865 | nvf0_grctx_oclass = &(struct nvc0_grctx_oclass) { | 813 | nvf0_grctx_oclass = &(struct nvc0_grctx_oclass) { |
866 | .base.handle = NV_ENGCTX(GR, 0xf0), | 814 | .base.handle = NV_ENGCTX(GR, 0xf0), |
@@ -873,7 +821,6 @@ nvf0_grctx_oclass = &(struct nvc0_grctx_oclass) { | |||
873 | .wr32 = _nouveau_graph_context_wr32, | 821 | .wr32 = _nouveau_graph_context_wr32, |
874 | }, | 822 | }, |
875 | .main = nve4_grctx_generate_main, | 823 | .main = nve4_grctx_generate_main, |
876 | .mods = nvf0_grctx_generate_mods, | ||
877 | .unkn = nve4_grctx_generate_unkn, | 824 | .unkn = nve4_grctx_generate_unkn, |
878 | .hub = nvf0_grctx_pack_hub, | 825 | .hub = nvf0_grctx_pack_hub, |
879 | .gpc = nvf0_grctx_pack_gpc, | 826 | .gpc = nvf0_grctx_pack_gpc, |
@@ -882,4 +829,15 @@ nvf0_grctx_oclass = &(struct nvc0_grctx_oclass) { | |||
882 | .ppc = nvf0_grctx_pack_ppc, | 829 | .ppc = nvf0_grctx_pack_ppc, |
883 | .icmd = nvf0_grctx_pack_icmd, | 830 | .icmd = nvf0_grctx_pack_icmd, |
884 | .mthd = nvf0_grctx_pack_mthd, | 831 | .mthd = nvf0_grctx_pack_mthd, |
832 | .bundle = nve4_grctx_generate_bundle, | ||
833 | .bundle_size = 0x3000, | ||
834 | .bundle_min_gpm_fifo_depth = 0x180, | ||
835 | .bundle_token_limit = 0x7c0, | ||
836 | .pagepool = nve4_grctx_generate_pagepool, | ||
837 | .pagepool_size = 0x8000, | ||
838 | .attrib = nvd7_grctx_generate_attrib, | ||
839 | .attrib_nr_max = 0x324, | ||
840 | .attrib_nr = 0x218, | ||
841 | .alpha_nr_max = 0x7ff, | ||
842 | .alpha_nr = 0x648, | ||
885 | }.base; | 843 | }.base; |
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/gk110b.c b/drivers/gpu/drm/nouveau/core/engine/graph/gk110b.c new file mode 100644 index 000000000000..d07b19dc168d --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/engine/graph/gk110b.c | |||
@@ -0,0 +1,117 @@ | |||
1 | /* | ||
2 | * Copyright 2013 Red Hat Inc. | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice shall be included in | ||
12 | * all copies or substantial portions of the Software. | ||
13 | * | ||
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
21 | * | ||
22 | * Authors: Ben Skeggs <bskeggs@redhat.com> | ||
23 | */ | ||
24 | |||
25 | #include "nvc0.h" | ||
26 | #include "ctxnvc0.h" | ||
27 | |||
28 | /******************************************************************************* | ||
29 | * PGRAPH register lists | ||
30 | ******************************************************************************/ | ||
31 | |||
32 | static const struct nvc0_graph_init | ||
33 | gk110b_graph_init_l1c_0[] = { | ||
34 | { 0x419c98, 1, 0x04, 0x00000000 }, | ||
35 | { 0x419ca8, 1, 0x04, 0x00000000 }, | ||
36 | { 0x419cb0, 1, 0x04, 0x09000000 }, | ||
37 | { 0x419cb4, 1, 0x04, 0x00000000 }, | ||
38 | { 0x419cb8, 1, 0x04, 0x00b08bea }, | ||
39 | { 0x419c84, 1, 0x04, 0x00010384 }, | ||
40 | { 0x419cbc, 1, 0x04, 0x281b3646 }, | ||
41 | { 0x419cc0, 2, 0x04, 0x00000000 }, | ||
42 | { 0x419c80, 1, 0x04, 0x00020230 }, | ||
43 | { 0x419ccc, 2, 0x04, 0x00000000 }, | ||
44 | {} | ||
45 | }; | ||
46 | |||
47 | static const struct nvc0_graph_init | ||
48 | gk110b_graph_init_sm_0[] = { | ||
49 | { 0x419e00, 1, 0x04, 0x00000080 }, | ||
50 | { 0x419ea0, 1, 0x04, 0x00000000 }, | ||
51 | { 0x419ee4, 1, 0x04, 0x00000000 }, | ||
52 | { 0x419ea4, 1, 0x04, 0x00000100 }, | ||
53 | { 0x419ea8, 1, 0x04, 0x00000000 }, | ||
54 | { 0x419eb4, 1, 0x04, 0x00000000 }, | ||
55 | { 0x419ebc, 2, 0x04, 0x00000000 }, | ||
56 | { 0x419edc, 1, 0x04, 0x00000000 }, | ||
57 | { 0x419f00, 1, 0x04, 0x00000000 }, | ||
58 | { 0x419ed0, 1, 0x04, 0x00002616 }, | ||
59 | { 0x419f74, 1, 0x04, 0x00015555 }, | ||
60 | { 0x419f80, 4, 0x04, 0x00000000 }, | ||
61 | {} | ||
62 | }; | ||
63 | |||
64 | static const struct nvc0_graph_pack | ||
65 | gk110b_graph_pack_mmio[] = { | ||
66 | { nve4_graph_init_main_0 }, | ||
67 | { nvf0_graph_init_fe_0 }, | ||
68 | { nvc0_graph_init_pri_0 }, | ||
69 | { nvc0_graph_init_rstr2d_0 }, | ||
70 | { nvd9_graph_init_pd_0 }, | ||
71 | { nvf0_graph_init_ds_0 }, | ||
72 | { nvc0_graph_init_scc_0 }, | ||
73 | { nvf0_graph_init_sked_0 }, | ||
74 | { nvf0_graph_init_cwd_0 }, | ||
75 | { nvd9_graph_init_prop_0 }, | ||
76 | { nvc1_graph_init_gpc_unk_0 }, | ||
77 | { nvc0_graph_init_setup_0 }, | ||
78 | { nvc0_graph_init_crstr_0 }, | ||
79 | { nvc1_graph_init_setup_1 }, | ||
80 | { nvc0_graph_init_zcull_0 }, | ||
81 | { nvd9_graph_init_gpm_0 }, | ||
82 | { nvf0_graph_init_gpc_unk_1 }, | ||
83 | { nvc0_graph_init_gcc_0 }, | ||
84 | { nve4_graph_init_tpccs_0 }, | ||
85 | { nvf0_graph_init_tex_0 }, | ||
86 | { nve4_graph_init_pe_0 }, | ||
87 | { gk110b_graph_init_l1c_0 }, | ||
88 | { nvc0_graph_init_mpc_0 }, | ||
89 | { gk110b_graph_init_sm_0 }, | ||
90 | { nvd7_graph_init_pes_0 }, | ||
91 | { nvd7_graph_init_wwdx_0 }, | ||
92 | { nvd7_graph_init_cbm_0 }, | ||
93 | { nve4_graph_init_be_0 }, | ||
94 | { nvc0_graph_init_fe_1 }, | ||
95 | {} | ||
96 | }; | ||
97 | |||
98 | /******************************************************************************* | ||
99 | * PGRAPH engine/subdev functions | ||
100 | ******************************************************************************/ | ||
101 | |||
102 | struct nouveau_oclass * | ||
103 | gk110b_graph_oclass = &(struct nvc0_graph_oclass) { | ||
104 | .base.handle = NV_ENGINE(GR, 0xf1), | ||
105 | .base.ofuncs = &(struct nouveau_ofuncs) { | ||
106 | .ctor = nvc0_graph_ctor, | ||
107 | .dtor = nvc0_graph_dtor, | ||
108 | .init = nve4_graph_init, | ||
109 | .fini = nvf0_graph_fini, | ||
110 | }, | ||
111 | .cclass = &gk110b_grctx_oclass, | ||
112 | .sclass = nvf0_graph_sclass, | ||
113 | .mmio = gk110b_graph_pack_mmio, | ||
114 | .fecs.ucode = &nvf0_graph_fecs_ucode, | ||
115 | .gpccs.ucode = &nvf0_graph_gpccs_ucode, | ||
116 | .ppc_nr = 2, | ||
117 | }.base; | ||
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/gk20a.c b/drivers/gpu/drm/nouveau/core/engine/graph/gk20a.c index 83048a56430d..7d0abe9f3fe7 100644 --- a/drivers/gpu/drm/nouveau/core/engine/graph/gk20a.c +++ b/drivers/gpu/drm/nouveau/core/engine/graph/gk20a.c | |||
@@ -27,8 +27,8 @@ static struct nouveau_oclass | |||
27 | gk20a_graph_sclass[] = { | 27 | gk20a_graph_sclass[] = { |
28 | { 0x902d, &nouveau_object_ofuncs }, | 28 | { 0x902d, &nouveau_object_ofuncs }, |
29 | { 0xa040, &nouveau_object_ofuncs }, | 29 | { 0xa040, &nouveau_object_ofuncs }, |
30 | { 0xa297, &nouveau_object_ofuncs }, | 30 | { KEPLER_C, &nvc0_fermi_ofuncs, nvc0_graph_9097_omthds }, |
31 | { 0xa0c0, &nouveau_object_ofuncs }, | 31 | { KEPLER_COMPUTE_A, &nouveau_object_ofuncs, nvc0_graph_90c0_omthds }, |
32 | {} | 32 | {} |
33 | }; | 33 | }; |
34 | 34 | ||
@@ -39,9 +39,10 @@ gk20a_graph_oclass = &(struct nvc0_graph_oclass) { | |||
39 | .ctor = nvc0_graph_ctor, | 39 | .ctor = nvc0_graph_ctor, |
40 | .dtor = nvc0_graph_dtor, | 40 | .dtor = nvc0_graph_dtor, |
41 | .init = nve4_graph_init, | 41 | .init = nve4_graph_init, |
42 | .fini = nve4_graph_fini, | 42 | .fini = _nouveau_graph_fini, |
43 | }, | 43 | }, |
44 | .cclass = &gk20a_grctx_oclass, | 44 | .cclass = &gk20a_grctx_oclass, |
45 | .sclass = gk20a_graph_sclass, | 45 | .sclass = gk20a_graph_sclass, |
46 | .mmio = nve4_graph_pack_mmio, | 46 | .mmio = nve4_graph_pack_mmio, |
47 | .ppc_nr = 1, | ||
47 | }.base; | 48 | }.base; |
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/gm107.c b/drivers/gpu/drm/nouveau/core/engine/graph/gm107.c index 21c5f31d607f..4bdbdab2fd9a 100644 --- a/drivers/gpu/drm/nouveau/core/engine/graph/gm107.c +++ b/drivers/gpu/drm/nouveau/core/engine/graph/gm107.c | |||
@@ -36,8 +36,8 @@ static struct nouveau_oclass | |||
36 | gm107_graph_sclass[] = { | 36 | gm107_graph_sclass[] = { |
37 | { 0x902d, &nouveau_object_ofuncs }, | 37 | { 0x902d, &nouveau_object_ofuncs }, |
38 | { 0xa140, &nouveau_object_ofuncs }, | 38 | { 0xa140, &nouveau_object_ofuncs }, |
39 | { 0xb097, &nouveau_object_ofuncs }, | 39 | { MAXWELL_A, &nvc0_fermi_ofuncs, nvc0_graph_9097_omthds }, |
40 | { 0xb0c0, &nouveau_object_ofuncs }, | 40 | { MAXWELL_COMPUTE_A, &nouveau_object_ofuncs, nvc0_graph_90c0_omthds }, |
41 | {} | 41 | {} |
42 | }; | 42 | }; |
43 | 43 | ||
@@ -425,6 +425,9 @@ gm107_graph_init(struct nouveau_object *object) | |||
425 | nv_wr32(priv, 0x400134, 0xffffffff); | 425 | nv_wr32(priv, 0x400134, 0xffffffff); |
426 | 426 | ||
427 | nv_wr32(priv, 0x400054, 0x2c350f63); | 427 | nv_wr32(priv, 0x400054, 0x2c350f63); |
428 | |||
429 | nvc0_graph_zbc_init(priv); | ||
430 | |||
428 | return nvc0_graph_init_ctxctl(priv); | 431 | return nvc0_graph_init_ctxctl(priv); |
429 | } | 432 | } |
430 | 433 | ||
@@ -462,4 +465,5 @@ gm107_graph_oclass = &(struct nvc0_graph_oclass) { | |||
462 | .mmio = gm107_graph_pack_mmio, | 465 | .mmio = gm107_graph_pack_mmio, |
463 | .fecs.ucode = 0 ? &gm107_graph_fecs_ucode : NULL, | 466 | .fecs.ucode = 0 ? &gm107_graph_fecs_ucode : NULL, |
464 | .gpccs.ucode = &gm107_graph_gpccs_ucode, | 467 | .gpccs.ucode = &gm107_graph_gpccs_ucode, |
468 | .ppc_nr = 2, | ||
465 | }.base; | 469 | }.base; |
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv04.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv04.c index ad13dcdd15f9..f70e2f67a4dd 100644 --- a/drivers/gpu/drm/nouveau/core/engine/graph/nv04.c +++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv04.c | |||
@@ -24,7 +24,6 @@ | |||
24 | 24 | ||
25 | #include <core/client.h> | 25 | #include <core/client.h> |
26 | #include <core/os.h> | 26 | #include <core/os.h> |
27 | #include <core/class.h> | ||
28 | #include <core/handle.h> | 27 | #include <core/handle.h> |
29 | #include <core/namedb.h> | 28 | #include <core/namedb.h> |
30 | 29 | ||
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv10.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv10.c index 4532f7e5618c..2b12b09683c8 100644 --- a/drivers/gpu/drm/nouveau/core/engine/graph/nv10.c +++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv10.c | |||
@@ -24,7 +24,6 @@ | |||
24 | 24 | ||
25 | #include <core/client.h> | 25 | #include <core/client.h> |
26 | #include <core/os.h> | 26 | #include <core/os.h> |
27 | #include <core/class.h> | ||
28 | #include <core/handle.h> | 27 | #include <core/handle.h> |
29 | 28 | ||
30 | #include <subdev/fb.h> | 29 | #include <subdev/fb.h> |
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv108.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv108.c index 00ea1a089822..2b0e8f48c029 100644 --- a/drivers/gpu/drm/nouveau/core/engine/graph/nv108.c +++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv108.c | |||
@@ -33,7 +33,7 @@ static struct nouveau_oclass | |||
33 | nv108_graph_sclass[] = { | 33 | nv108_graph_sclass[] = { |
34 | { 0x902d, &nouveau_object_ofuncs }, | 34 | { 0x902d, &nouveau_object_ofuncs }, |
35 | { 0xa140, &nouveau_object_ofuncs }, | 35 | { 0xa140, &nouveau_object_ofuncs }, |
36 | { 0xa197, &nouveau_object_ofuncs }, | 36 | { KEPLER_B, &nvc0_fermi_ofuncs }, |
37 | { 0xa1c0, &nouveau_object_ofuncs }, | 37 | { 0xa1c0, &nouveau_object_ofuncs }, |
38 | {} | 38 | {} |
39 | }; | 39 | }; |
@@ -220,4 +220,5 @@ nv108_graph_oclass = &(struct nvc0_graph_oclass) { | |||
220 | .mmio = nv108_graph_pack_mmio, | 220 | .mmio = nv108_graph_pack_mmio, |
221 | .fecs.ucode = &nv108_graph_fecs_ucode, | 221 | .fecs.ucode = &nv108_graph_fecs_ucode, |
222 | .gpccs.ucode = &nv108_graph_gpccs_ucode, | 222 | .gpccs.ucode = &nv108_graph_gpccs_ucode, |
223 | .ppc_nr = 1, | ||
223 | }.base; | 224 | }.base; |
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv20.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv20.c index d145e080899a..ceb9c746d94e 100644 --- a/drivers/gpu/drm/nouveau/core/engine/graph/nv20.c +++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv20.c | |||
@@ -1,6 +1,5 @@ | |||
1 | #include <core/client.h> | 1 | #include <core/client.h> |
2 | #include <core/os.h> | 2 | #include <core/os.h> |
3 | #include <core/class.h> | ||
4 | #include <core/engctx.h> | 3 | #include <core/engctx.h> |
5 | #include <core/handle.h> | 4 | #include <core/handle.h> |
6 | #include <core/enum.h> | 5 | #include <core/enum.h> |
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv25.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv25.c index 7a80d005a974..f8a6fdd7d5e8 100644 --- a/drivers/gpu/drm/nouveau/core/engine/graph/nv25.c +++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv25.c | |||
@@ -1,5 +1,4 @@ | |||
1 | #include <core/os.h> | 1 | #include <core/os.h> |
2 | #include <core/class.h> | ||
3 | #include <core/engctx.h> | 2 | #include <core/engctx.h> |
4 | #include <core/enum.h> | 3 | #include <core/enum.h> |
5 | 4 | ||
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv2a.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv2a.c index 3e1f32ee43d4..5de9caa2ef67 100644 --- a/drivers/gpu/drm/nouveau/core/engine/graph/nv2a.c +++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv2a.c | |||
@@ -1,5 +1,4 @@ | |||
1 | #include <core/os.h> | 1 | #include <core/os.h> |
2 | #include <core/class.h> | ||
3 | #include <core/engctx.h> | 2 | #include <core/engctx.h> |
4 | #include <core/enum.h> | 3 | #include <core/enum.h> |
5 | 4 | ||
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv30.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv30.c index e451db32e92a..2f9dbc709389 100644 --- a/drivers/gpu/drm/nouveau/core/engine/graph/nv30.c +++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv30.c | |||
@@ -1,5 +1,4 @@ | |||
1 | #include <core/os.h> | 1 | #include <core/os.h> |
2 | #include <core/class.h> | ||
3 | #include <core/engctx.h> | 2 | #include <core/engctx.h> |
4 | #include <core/enum.h> | 3 | #include <core/enum.h> |
5 | 4 | ||
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv34.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv34.c index 9385ac7b44a4..34dd26c70b64 100644 --- a/drivers/gpu/drm/nouveau/core/engine/graph/nv34.c +++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv34.c | |||
@@ -1,5 +1,4 @@ | |||
1 | #include <core/os.h> | 1 | #include <core/os.h> |
2 | #include <core/class.h> | ||
3 | #include <core/engctx.h> | 2 | #include <core/engctx.h> |
4 | #include <core/enum.h> | 3 | #include <core/enum.h> |
5 | 4 | ||
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv35.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv35.c index 9ce84b73f86a..2fb5756d9f66 100644 --- a/drivers/gpu/drm/nouveau/core/engine/graph/nv35.c +++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv35.c | |||
@@ -1,5 +1,4 @@ | |||
1 | #include <core/os.h> | 1 | #include <core/os.h> |
2 | #include <core/class.h> | ||
3 | #include <core/engctx.h> | 2 | #include <core/engctx.h> |
4 | #include <core/enum.h> | 3 | #include <core/enum.h> |
5 | 4 | ||
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv40.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv40.c index 6477fbf6a550..4f401174868d 100644 --- a/drivers/gpu/drm/nouveau/core/engine/graph/nv40.c +++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv40.c | |||
@@ -24,7 +24,6 @@ | |||
24 | 24 | ||
25 | #include <core/client.h> | 25 | #include <core/client.h> |
26 | #include <core/os.h> | 26 | #include <core/os.h> |
27 | #include <core/class.h> | ||
28 | #include <core/handle.h> | 27 | #include <core/handle.h> |
29 | #include <core/engctx.h> | 28 | #include <core/engctx.h> |
30 | 29 | ||
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv50.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv50.c index 20665c21d80e..38e0aa26f1cd 100644 --- a/drivers/gpu/drm/nouveau/core/engine/graph/nv50.c +++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv50.c | |||
@@ -23,7 +23,6 @@ | |||
23 | */ | 23 | */ |
24 | 24 | ||
25 | #include <core/os.h> | 25 | #include <core/os.h> |
26 | #include <core/class.h> | ||
27 | #include <core/client.h> | 26 | #include <core/client.h> |
28 | #include <core/handle.h> | 27 | #include <core/handle.h> |
29 | #include <core/engctx.h> | 28 | #include <core/engctx.h> |
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c index aa0838916354..db19191176fa 100644 --- a/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c +++ b/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c | |||
@@ -26,15 +26,226 @@ | |||
26 | #include "ctxnvc0.h" | 26 | #include "ctxnvc0.h" |
27 | 27 | ||
28 | /******************************************************************************* | 28 | /******************************************************************************* |
29 | * Zero Bandwidth Clear | ||
30 | ******************************************************************************/ | ||
31 | |||
32 | static void | ||
33 | nvc0_graph_zbc_clear_color(struct nvc0_graph_priv *priv, int zbc) | ||
34 | { | ||
35 | if (priv->zbc_color[zbc].format) { | ||
36 | nv_wr32(priv, 0x405804, priv->zbc_color[zbc].ds[0]); | ||
37 | nv_wr32(priv, 0x405808, priv->zbc_color[zbc].ds[1]); | ||
38 | nv_wr32(priv, 0x40580c, priv->zbc_color[zbc].ds[2]); | ||
39 | nv_wr32(priv, 0x405810, priv->zbc_color[zbc].ds[3]); | ||
40 | } | ||
41 | nv_wr32(priv, 0x405814, priv->zbc_color[zbc].format); | ||
42 | nv_wr32(priv, 0x405820, zbc); | ||
43 | nv_wr32(priv, 0x405824, 0x00000004); /* TRIGGER | WRITE | COLOR */ | ||
44 | } | ||
45 | |||
46 | static int | ||
47 | nvc0_graph_zbc_color_get(struct nvc0_graph_priv *priv, int format, | ||
48 | const u32 ds[4], const u32 l2[4]) | ||
49 | { | ||
50 | struct nouveau_ltc *ltc = nouveau_ltc(priv); | ||
51 | int zbc = -ENOSPC, i; | ||
52 | |||
53 | for (i = ltc->zbc_min; i <= ltc->zbc_max; i++) { | ||
54 | if (priv->zbc_color[i].format) { | ||
55 | if (priv->zbc_color[i].format != format) | ||
56 | continue; | ||
57 | if (memcmp(priv->zbc_color[i].ds, ds, sizeof( | ||
58 | priv->zbc_color[i].ds))) | ||
59 | continue; | ||
60 | if (memcmp(priv->zbc_color[i].l2, l2, sizeof( | ||
61 | priv->zbc_color[i].l2))) { | ||
62 | WARN_ON(1); | ||
63 | return -EINVAL; | ||
64 | } | ||
65 | return i; | ||
66 | } else { | ||
67 | zbc = (zbc < 0) ? i : zbc; | ||
68 | } | ||
69 | } | ||
70 | |||
71 | memcpy(priv->zbc_color[zbc].ds, ds, sizeof(priv->zbc_color[zbc].ds)); | ||
72 | memcpy(priv->zbc_color[zbc].l2, l2, sizeof(priv->zbc_color[zbc].l2)); | ||
73 | priv->zbc_color[zbc].format = format; | ||
74 | ltc->zbc_color_get(ltc, zbc, l2); | ||
75 | nvc0_graph_zbc_clear_color(priv, zbc); | ||
76 | return zbc; | ||
77 | } | ||
78 | |||
79 | static void | ||
80 | nvc0_graph_zbc_clear_depth(struct nvc0_graph_priv *priv, int zbc) | ||
81 | { | ||
82 | if (priv->zbc_depth[zbc].format) | ||
83 | nv_wr32(priv, 0x405818, priv->zbc_depth[zbc].ds); | ||
84 | nv_wr32(priv, 0x40581c, priv->zbc_depth[zbc].format); | ||
85 | nv_wr32(priv, 0x405820, zbc); | ||
86 | nv_wr32(priv, 0x405824, 0x00000005); /* TRIGGER | WRITE | DEPTH */ | ||
87 | } | ||
88 | |||
89 | static int | ||
90 | nvc0_graph_zbc_depth_get(struct nvc0_graph_priv *priv, int format, | ||
91 | const u32 ds, const u32 l2) | ||
92 | { | ||
93 | struct nouveau_ltc *ltc = nouveau_ltc(priv); | ||
94 | int zbc = -ENOSPC, i; | ||
95 | |||
96 | for (i = ltc->zbc_min; i <= ltc->zbc_max; i++) { | ||
97 | if (priv->zbc_depth[i].format) { | ||
98 | if (priv->zbc_depth[i].format != format) | ||
99 | continue; | ||
100 | if (priv->zbc_depth[i].ds != ds) | ||
101 | continue; | ||
102 | if (priv->zbc_depth[i].l2 != l2) { | ||
103 | WARN_ON(1); | ||
104 | return -EINVAL; | ||
105 | } | ||
106 | return i; | ||
107 | } else { | ||
108 | zbc = (zbc < 0) ? i : zbc; | ||
109 | } | ||
110 | } | ||
111 | |||
112 | priv->zbc_depth[zbc].format = format; | ||
113 | priv->zbc_depth[zbc].ds = ds; | ||
114 | priv->zbc_depth[zbc].l2 = l2; | ||
115 | ltc->zbc_depth_get(ltc, zbc, l2); | ||
116 | nvc0_graph_zbc_clear_depth(priv, zbc); | ||
117 | return zbc; | ||
118 | } | ||
119 | |||
120 | /******************************************************************************* | ||
29 | * Graphics object classes | 121 | * Graphics object classes |
30 | ******************************************************************************/ | 122 | ******************************************************************************/ |
31 | 123 | ||
124 | static int | ||
125 | nvc0_fermi_mthd_zbc_color(struct nouveau_object *object, void *data, u32 size) | ||
126 | { | ||
127 | struct nvc0_graph_priv *priv = (void *)object->engine; | ||
128 | union { | ||
129 | struct fermi_a_zbc_color_v0 v0; | ||
130 | } *args = data; | ||
131 | int ret; | ||
132 | |||
133 | if (nvif_unpack(args->v0, 0, 0, false)) { | ||
134 | switch (args->v0.format) { | ||
135 | case FERMI_A_ZBC_COLOR_V0_FMT_ZERO: | ||
136 | case FERMI_A_ZBC_COLOR_V0_FMT_UNORM_ONE: | ||
137 | case FERMI_A_ZBC_COLOR_V0_FMT_RF32_GF32_BF32_AF32: | ||
138 | case FERMI_A_ZBC_COLOR_V0_FMT_R16_G16_B16_A16: | ||
139 | case FERMI_A_ZBC_COLOR_V0_FMT_RN16_GN16_BN16_AN16: | ||
140 | case FERMI_A_ZBC_COLOR_V0_FMT_RS16_GS16_BS16_AS16: | ||
141 | case FERMI_A_ZBC_COLOR_V0_FMT_RU16_GU16_BU16_AU16: | ||
142 | case FERMI_A_ZBC_COLOR_V0_FMT_RF16_GF16_BF16_AF16: | ||
143 | case FERMI_A_ZBC_COLOR_V0_FMT_A8R8G8B8: | ||
144 | case FERMI_A_ZBC_COLOR_V0_FMT_A8RL8GL8BL8: | ||
145 | case FERMI_A_ZBC_COLOR_V0_FMT_A2B10G10R10: | ||
146 | case FERMI_A_ZBC_COLOR_V0_FMT_AU2BU10GU10RU10: | ||
147 | case FERMI_A_ZBC_COLOR_V0_FMT_A8B8G8R8: | ||
148 | case FERMI_A_ZBC_COLOR_V0_FMT_A8BL8GL8RL8: | ||
149 | case FERMI_A_ZBC_COLOR_V0_FMT_AN8BN8GN8RN8: | ||
150 | case FERMI_A_ZBC_COLOR_V0_FMT_AS8BS8GS8RS8: | ||
151 | case FERMI_A_ZBC_COLOR_V0_FMT_AU8BU8GU8RU8: | ||
152 | case FERMI_A_ZBC_COLOR_V0_FMT_A2R10G10B10: | ||
153 | case FERMI_A_ZBC_COLOR_V0_FMT_BF10GF11RF11: | ||
154 | ret = nvc0_graph_zbc_color_get(priv, args->v0.format, | ||
155 | args->v0.ds, | ||
156 | args->v0.l2); | ||
157 | if (ret >= 0) { | ||
158 | args->v0.index = ret; | ||
159 | return 0; | ||
160 | } | ||
161 | break; | ||
162 | default: | ||
163 | return -EINVAL; | ||
164 | } | ||
165 | } | ||
166 | |||
167 | return ret; | ||
168 | } | ||
169 | |||
170 | static int | ||
171 | nvc0_fermi_mthd_zbc_depth(struct nouveau_object *object, void *data, u32 size) | ||
172 | { | ||
173 | struct nvc0_graph_priv *priv = (void *)object->engine; | ||
174 | union { | ||
175 | struct fermi_a_zbc_depth_v0 v0; | ||
176 | } *args = data; | ||
177 | int ret; | ||
178 | |||
179 | if (nvif_unpack(args->v0, 0, 0, false)) { | ||
180 | switch (args->v0.format) { | ||
181 | case FERMI_A_ZBC_DEPTH_V0_FMT_FP32: | ||
182 | ret = nvc0_graph_zbc_depth_get(priv, args->v0.format, | ||
183 | args->v0.ds, | ||
184 | args->v0.l2); | ||
185 | return (ret >= 0) ? 0 : -ENOSPC; | ||
186 | default: | ||
187 | return -EINVAL; | ||
188 | } | ||
189 | } | ||
190 | |||
191 | return ret; | ||
192 | } | ||
193 | |||
194 | static int | ||
195 | nvc0_fermi_mthd(struct nouveau_object *object, u32 mthd, void *data, u32 size) | ||
196 | { | ||
197 | switch (mthd) { | ||
198 | case FERMI_A_ZBC_COLOR: | ||
199 | return nvc0_fermi_mthd_zbc_color(object, data, size); | ||
200 | case FERMI_A_ZBC_DEPTH: | ||
201 | return nvc0_fermi_mthd_zbc_depth(object, data, size); | ||
202 | default: | ||
203 | break; | ||
204 | } | ||
205 | return -EINVAL; | ||
206 | } | ||
207 | |||
208 | struct nouveau_ofuncs | ||
209 | nvc0_fermi_ofuncs = { | ||
210 | .ctor = _nouveau_object_ctor, | ||
211 | .dtor = nouveau_object_destroy, | ||
212 | .init = nouveau_object_init, | ||
213 | .fini = nouveau_object_fini, | ||
214 | .mthd = nvc0_fermi_mthd, | ||
215 | }; | ||
216 | |||
217 | static int | ||
218 | nvc0_graph_set_shader_exceptions(struct nouveau_object *object, u32 mthd, | ||
219 | void *pdata, u32 size) | ||
220 | { | ||
221 | struct nvc0_graph_priv *priv = (void *)nv_engine(object); | ||
222 | if (size >= sizeof(u32)) { | ||
223 | u32 data = *(u32 *)pdata ? 0xffffffff : 0x00000000; | ||
224 | nv_wr32(priv, 0x419e44, data); | ||
225 | nv_wr32(priv, 0x419e4c, data); | ||
226 | return 0; | ||
227 | } | ||
228 | return -EINVAL; | ||
229 | } | ||
230 | |||
231 | struct nouveau_omthds | ||
232 | nvc0_graph_9097_omthds[] = { | ||
233 | { 0x1528, 0x1528, nvc0_graph_set_shader_exceptions }, | ||
234 | {} | ||
235 | }; | ||
236 | |||
237 | struct nouveau_omthds | ||
238 | nvc0_graph_90c0_omthds[] = { | ||
239 | { 0x1528, 0x1528, nvc0_graph_set_shader_exceptions }, | ||
240 | {} | ||
241 | }; | ||
242 | |||
32 | struct nouveau_oclass | 243 | struct nouveau_oclass |
33 | nvc0_graph_sclass[] = { | 244 | nvc0_graph_sclass[] = { |
34 | { 0x902d, &nouveau_object_ofuncs }, | 245 | { 0x902d, &nouveau_object_ofuncs }, |
35 | { 0x9039, &nouveau_object_ofuncs }, | 246 | { 0x9039, &nouveau_object_ofuncs }, |
36 | { 0x9097, &nouveau_object_ofuncs }, | 247 | { FERMI_A, &nvc0_fermi_ofuncs, nvc0_graph_9097_omthds }, |
37 | { 0x90c0, &nouveau_object_ofuncs }, | 248 | { FERMI_COMPUTE_A, &nouveau_object_ofuncs, nvc0_graph_90c0_omthds }, |
38 | {} | 249 | {} |
39 | }; | 250 | }; |
40 | 251 | ||
@@ -98,7 +309,7 @@ nvc0_graph_context_ctor(struct nouveau_object *parent, | |||
98 | u32 addr = mmio->addr; | 309 | u32 addr = mmio->addr; |
99 | u32 data = mmio->data; | 310 | u32 data = mmio->data; |
100 | 311 | ||
101 | if (mmio->shift) { | 312 | if (mmio->buffer >= 0) { |
102 | u64 info = chan->data[mmio->buffer].vma.offset; | 313 | u64 info = chan->data[mmio->buffer].vma.offset; |
103 | data |= info >> mmio->shift; | 314 | data |= info >> mmio->shift; |
104 | } | 315 | } |
@@ -407,6 +618,35 @@ nvc0_graph_pack_mmio[] = { | |||
407 | ******************************************************************************/ | 618 | ******************************************************************************/ |
408 | 619 | ||
409 | void | 620 | void |
621 | nvc0_graph_zbc_init(struct nvc0_graph_priv *priv) | ||
622 | { | ||
623 | const u32 zero[] = { 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
624 | 0x00000000, 0x00000000, 0x00000000, 0x00000000 }; | ||
625 | const u32 one[] = { 0x3f800000, 0x3f800000, 0x3f800000, 0x3f800000, | ||
626 | 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff }; | ||
627 | const u32 f32_0[] = { 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
628 | 0x00000000, 0x00000000, 0x00000000, 0x00000000 }; | ||
629 | const u32 f32_1[] = { 0x3f800000, 0x3f800000, 0x3f800000, 0x3f800000, | ||
630 | 0x3f800000, 0x3f800000, 0x3f800000, 0x3f800000 }; | ||
631 | struct nouveau_ltc *ltc = nouveau_ltc(priv); | ||
632 | int index; | ||
633 | |||
634 | if (!priv->zbc_color[0].format) { | ||
635 | nvc0_graph_zbc_color_get(priv, 1, & zero[0], &zero[4]); | ||
636 | nvc0_graph_zbc_color_get(priv, 2, & one[0], &one[4]); | ||
637 | nvc0_graph_zbc_color_get(priv, 4, &f32_0[0], &f32_0[4]); | ||
638 | nvc0_graph_zbc_color_get(priv, 4, &f32_1[0], &f32_1[4]); | ||
639 | nvc0_graph_zbc_depth_get(priv, 1, 0x00000000, 0x00000000); | ||
640 | nvc0_graph_zbc_depth_get(priv, 1, 0x3f800000, 0x3f800000); | ||
641 | } | ||
642 | |||
643 | for (index = ltc->zbc_min; index <= ltc->zbc_max; index++) | ||
644 | nvc0_graph_zbc_clear_color(priv, index); | ||
645 | for (index = ltc->zbc_min; index <= ltc->zbc_max; index++) | ||
646 | nvc0_graph_zbc_clear_depth(priv, index); | ||
647 | } | ||
648 | |||
649 | void | ||
410 | nvc0_graph_mmio(struct nvc0_graph_priv *priv, const struct nvc0_graph_pack *p) | 650 | nvc0_graph_mmio(struct nvc0_graph_priv *priv, const struct nvc0_graph_pack *p) |
411 | { | 651 | { |
412 | const struct nvc0_graph_pack *pack; | 652 | const struct nvc0_graph_pack *pack; |
@@ -969,17 +1209,16 @@ nvc0_graph_init_ctxctl(struct nvc0_graph_priv *priv) | |||
969 | { | 1209 | { |
970 | struct nvc0_graph_oclass *oclass = (void *)nv_object(priv)->oclass; | 1210 | struct nvc0_graph_oclass *oclass = (void *)nv_object(priv)->oclass; |
971 | struct nvc0_grctx_oclass *cclass = (void *)nv_engine(priv)->cclass; | 1211 | struct nvc0_grctx_oclass *cclass = (void *)nv_engine(priv)->cclass; |
972 | u32 r000260; | ||
973 | int i; | 1212 | int i; |
974 | 1213 | ||
975 | if (priv->firmware) { | 1214 | if (priv->firmware) { |
976 | /* load fuc microcode */ | 1215 | /* load fuc microcode */ |
977 | r000260 = nv_mask(priv, 0x000260, 0x00000001, 0x00000000); | 1216 | nouveau_mc(priv)->unk260(nouveau_mc(priv), 0); |
978 | nvc0_graph_init_fw(priv, 0x409000, &priv->fuc409c, | 1217 | nvc0_graph_init_fw(priv, 0x409000, &priv->fuc409c, |
979 | &priv->fuc409d); | 1218 | &priv->fuc409d); |
980 | nvc0_graph_init_fw(priv, 0x41a000, &priv->fuc41ac, | 1219 | nvc0_graph_init_fw(priv, 0x41a000, &priv->fuc41ac, |
981 | &priv->fuc41ad); | 1220 | &priv->fuc41ad); |
982 | nv_wr32(priv, 0x000260, r000260); | 1221 | nouveau_mc(priv)->unk260(nouveau_mc(priv), 1); |
983 | 1222 | ||
984 | /* start both of them running */ | 1223 | /* start both of them running */ |
985 | nv_wr32(priv, 0x409840, 0xffffffff); | 1224 | nv_wr32(priv, 0x409840, 0xffffffff); |
@@ -1066,7 +1305,7 @@ nvc0_graph_init_ctxctl(struct nvc0_graph_priv *priv) | |||
1066 | } | 1305 | } |
1067 | 1306 | ||
1068 | /* load HUB microcode */ | 1307 | /* load HUB microcode */ |
1069 | r000260 = nv_mask(priv, 0x000260, 0x00000001, 0x00000000); | 1308 | nouveau_mc(priv)->unk260(nouveau_mc(priv), 0); |
1070 | nv_wr32(priv, 0x4091c0, 0x01000000); | 1309 | nv_wr32(priv, 0x4091c0, 0x01000000); |
1071 | for (i = 0; i < oclass->fecs.ucode->data.size / 4; i++) | 1310 | for (i = 0; i < oclass->fecs.ucode->data.size / 4; i++) |
1072 | nv_wr32(priv, 0x4091c4, oclass->fecs.ucode->data.data[i]); | 1311 | nv_wr32(priv, 0x4091c4, oclass->fecs.ucode->data.data[i]); |
@@ -1089,7 +1328,7 @@ nvc0_graph_init_ctxctl(struct nvc0_graph_priv *priv) | |||
1089 | nv_wr32(priv, 0x41a188, i >> 6); | 1328 | nv_wr32(priv, 0x41a188, i >> 6); |
1090 | nv_wr32(priv, 0x41a184, oclass->gpccs.ucode->code.data[i]); | 1329 | nv_wr32(priv, 0x41a184, oclass->gpccs.ucode->code.data[i]); |
1091 | } | 1330 | } |
1092 | nv_wr32(priv, 0x000260, r000260); | 1331 | nouveau_mc(priv)->unk260(nouveau_mc(priv), 1); |
1093 | 1332 | ||
1094 | /* load register lists */ | 1333 | /* load register lists */ |
1095 | nvc0_graph_init_csdata(priv, cclass->hub, 0x409000, 0x000, 0x000000); | 1334 | nvc0_graph_init_csdata(priv, cclass->hub, 0x409000, 0x000, 0x000000); |
@@ -1224,6 +1463,9 @@ nvc0_graph_init(struct nouveau_object *object) | |||
1224 | nv_wr32(priv, 0x400134, 0xffffffff); | 1463 | nv_wr32(priv, 0x400134, 0xffffffff); |
1225 | 1464 | ||
1226 | nv_wr32(priv, 0x400054, 0x34ce3464); | 1465 | nv_wr32(priv, 0x400054, 0x34ce3464); |
1466 | |||
1467 | nvc0_graph_zbc_init(priv); | ||
1468 | |||
1227 | return nvc0_graph_init_ctxctl(priv); | 1469 | return nvc0_graph_init_ctxctl(priv); |
1228 | } | 1470 | } |
1229 | 1471 | ||
@@ -1287,7 +1529,7 @@ nvc0_graph_ctor(struct nouveau_object *parent, struct nouveau_object *engine, | |||
1287 | struct nouveau_device *device = nv_device(parent); | 1529 | struct nouveau_device *device = nv_device(parent); |
1288 | struct nvc0_graph_priv *priv; | 1530 | struct nvc0_graph_priv *priv; |
1289 | bool use_ext_fw, enable; | 1531 | bool use_ext_fw, enable; |
1290 | int ret, i; | 1532 | int ret, i, j; |
1291 | 1533 | ||
1292 | use_ext_fw = nouveau_boolopt(device->cfgopt, "NvGrUseFW", | 1534 | use_ext_fw = nouveau_boolopt(device->cfgopt, "NvGrUseFW", |
1293 | oclass->fecs.ucode == NULL); | 1535 | oclass->fecs.ucode == NULL); |
@@ -1333,6 +1575,11 @@ nvc0_graph_ctor(struct nouveau_object *parent, struct nouveau_object *engine, | |||
1333 | for (i = 0; i < priv->gpc_nr; i++) { | 1575 | for (i = 0; i < priv->gpc_nr; i++) { |
1334 | priv->tpc_nr[i] = nv_rd32(priv, GPC_UNIT(i, 0x2608)); | 1576 | priv->tpc_nr[i] = nv_rd32(priv, GPC_UNIT(i, 0x2608)); |
1335 | priv->tpc_total += priv->tpc_nr[i]; | 1577 | priv->tpc_total += priv->tpc_nr[i]; |
1578 | priv->ppc_nr[i] = oclass->ppc_nr; | ||
1579 | for (j = 0; j < priv->ppc_nr[i]; j++) { | ||
1580 | u8 mask = nv_rd32(priv, GPC_UNIT(i, 0x0c30 + (j * 4))); | ||
1581 | priv->ppc_tpc_nr[i][j] = hweight8(mask); | ||
1582 | } | ||
1336 | } | 1583 | } |
1337 | 1584 | ||
1338 | /*XXX: these need figuring out... though it might not even matter */ | 1585 | /*XXX: these need figuring out... though it might not even matter */ |
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.h b/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.h index ffc289198dd8..7ed9e89c3435 100644 --- a/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.h +++ b/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.h | |||
@@ -30,10 +30,15 @@ | |||
30 | #include <core/gpuobj.h> | 30 | #include <core/gpuobj.h> |
31 | #include <core/option.h> | 31 | #include <core/option.h> |
32 | 32 | ||
33 | #include <nvif/unpack.h> | ||
34 | #include <nvif/class.h> | ||
35 | |||
33 | #include <subdev/fb.h> | 36 | #include <subdev/fb.h> |
34 | #include <subdev/vm.h> | 37 | #include <subdev/vm.h> |
35 | #include <subdev/bar.h> | 38 | #include <subdev/bar.h> |
36 | #include <subdev/timer.h> | 39 | #include <subdev/timer.h> |
40 | #include <subdev/mc.h> | ||
41 | #include <subdev/ltc.h> | ||
37 | 42 | ||
38 | #include <engine/fifo.h> | 43 | #include <engine/fifo.h> |
39 | #include <engine/graph.h> | 44 | #include <engine/graph.h> |
@@ -60,7 +65,7 @@ struct nvc0_graph_mmio { | |||
60 | u32 addr; | 65 | u32 addr; |
61 | u32 data; | 66 | u32 data; |
62 | u32 shift; | 67 | u32 shift; |
63 | u32 buffer; | 68 | int buffer; |
64 | }; | 69 | }; |
65 | 70 | ||
66 | struct nvc0_graph_fuc { | 71 | struct nvc0_graph_fuc { |
@@ -68,6 +73,18 @@ struct nvc0_graph_fuc { | |||
68 | u32 size; | 73 | u32 size; |
69 | }; | 74 | }; |
70 | 75 | ||
76 | struct nvc0_graph_zbc_color { | ||
77 | u32 format; | ||
78 | u32 ds[4]; | ||
79 | u32 l2[4]; | ||
80 | }; | ||
81 | |||
82 | struct nvc0_graph_zbc_depth { | ||
83 | u32 format; | ||
84 | u32 ds; | ||
85 | u32 l2; | ||
86 | }; | ||
87 | |||
71 | struct nvc0_graph_priv { | 88 | struct nvc0_graph_priv { |
72 | struct nouveau_graph base; | 89 | struct nouveau_graph base; |
73 | 90 | ||
@@ -77,10 +94,15 @@ struct nvc0_graph_priv { | |||
77 | struct nvc0_graph_fuc fuc41ad; | 94 | struct nvc0_graph_fuc fuc41ad; |
78 | bool firmware; | 95 | bool firmware; |
79 | 96 | ||
97 | struct nvc0_graph_zbc_color zbc_color[NOUVEAU_LTC_MAX_ZBC_CNT]; | ||
98 | struct nvc0_graph_zbc_depth zbc_depth[NOUVEAU_LTC_MAX_ZBC_CNT]; | ||
99 | |||
80 | u8 rop_nr; | 100 | u8 rop_nr; |
81 | u8 gpc_nr; | 101 | u8 gpc_nr; |
82 | u8 tpc_nr[GPC_MAX]; | 102 | u8 tpc_nr[GPC_MAX]; |
83 | u8 tpc_total; | 103 | u8 tpc_total; |
104 | u8 ppc_nr[GPC_MAX]; | ||
105 | u8 ppc_tpc_nr[GPC_MAX][4]; | ||
84 | 106 | ||
85 | struct nouveau_gpuobj *unk4188b4; | 107 | struct nouveau_gpuobj *unk4188b4; |
86 | struct nouveau_gpuobj *unk4188b8; | 108 | struct nouveau_gpuobj *unk4188b8; |
@@ -118,12 +140,20 @@ int nvc0_graph_ctor(struct nouveau_object *, struct nouveau_object *, | |||
118 | struct nouveau_object **); | 140 | struct nouveau_object **); |
119 | void nvc0_graph_dtor(struct nouveau_object *); | 141 | void nvc0_graph_dtor(struct nouveau_object *); |
120 | int nvc0_graph_init(struct nouveau_object *); | 142 | int nvc0_graph_init(struct nouveau_object *); |
143 | void nvc0_graph_zbc_init(struct nvc0_graph_priv *); | ||
144 | |||
121 | int nve4_graph_fini(struct nouveau_object *, bool); | 145 | int nve4_graph_fini(struct nouveau_object *, bool); |
122 | int nve4_graph_init(struct nouveau_object *); | 146 | int nve4_graph_init(struct nouveau_object *); |
123 | 147 | ||
124 | extern struct nouveau_oclass nvc0_graph_sclass[]; | 148 | int nvf0_graph_fini(struct nouveau_object *, bool); |
149 | |||
150 | extern struct nouveau_ofuncs nvc0_fermi_ofuncs; | ||
125 | 151 | ||
152 | extern struct nouveau_oclass nvc0_graph_sclass[]; | ||
153 | extern struct nouveau_omthds nvc0_graph_9097_omthds[]; | ||
154 | extern struct nouveau_omthds nvc0_graph_90c0_omthds[]; | ||
126 | extern struct nouveau_oclass nvc8_graph_sclass[]; | 155 | extern struct nouveau_oclass nvc8_graph_sclass[]; |
156 | extern struct nouveau_oclass nvf0_graph_sclass[]; | ||
127 | 157 | ||
128 | struct nvc0_graph_init { | 158 | struct nvc0_graph_init { |
129 | u32 addr; | 159 | u32 addr; |
@@ -149,6 +179,9 @@ struct nvc0_graph_ucode { | |||
149 | extern struct nvc0_graph_ucode nvc0_graph_fecs_ucode; | 179 | extern struct nvc0_graph_ucode nvc0_graph_fecs_ucode; |
150 | extern struct nvc0_graph_ucode nvc0_graph_gpccs_ucode; | 180 | extern struct nvc0_graph_ucode nvc0_graph_gpccs_ucode; |
151 | 181 | ||
182 | extern struct nvc0_graph_ucode nvf0_graph_fecs_ucode; | ||
183 | extern struct nvc0_graph_ucode nvf0_graph_gpccs_ucode; | ||
184 | |||
152 | struct nvc0_graph_oclass { | 185 | struct nvc0_graph_oclass { |
153 | struct nouveau_oclass base; | 186 | struct nouveau_oclass base; |
154 | struct nouveau_oclass **cclass; | 187 | struct nouveau_oclass **cclass; |
@@ -160,6 +193,7 @@ struct nvc0_graph_oclass { | |||
160 | struct { | 193 | struct { |
161 | struct nvc0_graph_ucode *ucode; | 194 | struct nvc0_graph_ucode *ucode; |
162 | } gpccs; | 195 | } gpccs; |
196 | int ppc_nr; | ||
163 | }; | 197 | }; |
164 | 198 | ||
165 | void nvc0_graph_mmio(struct nvc0_graph_priv *, const struct nvc0_graph_pack *); | 199 | void nvc0_graph_mmio(struct nvc0_graph_priv *, const struct nvc0_graph_pack *); |
@@ -223,9 +257,11 @@ extern const struct nvc0_graph_init nve4_graph_init_be_0[]; | |||
223 | extern const struct nvc0_graph_pack nve4_graph_pack_mmio[]; | 257 | extern const struct nvc0_graph_pack nve4_graph_pack_mmio[]; |
224 | 258 | ||
225 | extern const struct nvc0_graph_init nvf0_graph_init_fe_0[]; | 259 | extern const struct nvc0_graph_init nvf0_graph_init_fe_0[]; |
260 | extern const struct nvc0_graph_init nvf0_graph_init_ds_0[]; | ||
226 | extern const struct nvc0_graph_init nvf0_graph_init_sked_0[]; | 261 | extern const struct nvc0_graph_init nvf0_graph_init_sked_0[]; |
227 | extern const struct nvc0_graph_init nvf0_graph_init_cwd_0[]; | 262 | extern const struct nvc0_graph_init nvf0_graph_init_cwd_0[]; |
228 | extern const struct nvc0_graph_init nvf0_graph_init_gpc_unk_1[]; | 263 | extern const struct nvc0_graph_init nvf0_graph_init_gpc_unk_1[]; |
264 | extern const struct nvc0_graph_init nvf0_graph_init_tex_0[]; | ||
229 | extern const struct nvc0_graph_init nvf0_graph_init_sm_0[]; | 265 | extern const struct nvc0_graph_init nvf0_graph_init_sm_0[]; |
230 | 266 | ||
231 | extern const struct nvc0_graph_init nv108_graph_init_gpc_unk_0[]; | 267 | extern const struct nvc0_graph_init nv108_graph_init_gpc_unk_0[]; |
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nvc1.c b/drivers/gpu/drm/nouveau/core/engine/graph/nvc1.c index 30cab0b2eba1..93d58e5b82c2 100644 --- a/drivers/gpu/drm/nouveau/core/engine/graph/nvc1.c +++ b/drivers/gpu/drm/nouveau/core/engine/graph/nvc1.c | |||
@@ -33,9 +33,9 @@ static struct nouveau_oclass | |||
33 | nvc1_graph_sclass[] = { | 33 | nvc1_graph_sclass[] = { |
34 | { 0x902d, &nouveau_object_ofuncs }, | 34 | { 0x902d, &nouveau_object_ofuncs }, |
35 | { 0x9039, &nouveau_object_ofuncs }, | 35 | { 0x9039, &nouveau_object_ofuncs }, |
36 | { 0x9097, &nouveau_object_ofuncs }, | 36 | { FERMI_A, &nvc0_fermi_ofuncs, nvc0_graph_9097_omthds }, |
37 | { 0x90c0, &nouveau_object_ofuncs }, | 37 | { FERMI_B, &nvc0_fermi_ofuncs, nvc0_graph_9097_omthds }, |
38 | { 0x9197, &nouveau_object_ofuncs }, | 38 | { FERMI_COMPUTE_A, &nouveau_object_ofuncs, nvc0_graph_90c0_omthds }, |
39 | {} | 39 | {} |
40 | }; | 40 | }; |
41 | 41 | ||
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nvc8.c b/drivers/gpu/drm/nouveau/core/engine/graph/nvc8.c index a6bf783e1256..692e1eda0eb4 100644 --- a/drivers/gpu/drm/nouveau/core/engine/graph/nvc8.c +++ b/drivers/gpu/drm/nouveau/core/engine/graph/nvc8.c | |||
@@ -33,10 +33,10 @@ struct nouveau_oclass | |||
33 | nvc8_graph_sclass[] = { | 33 | nvc8_graph_sclass[] = { |
34 | { 0x902d, &nouveau_object_ofuncs }, | 34 | { 0x902d, &nouveau_object_ofuncs }, |
35 | { 0x9039, &nouveau_object_ofuncs }, | 35 | { 0x9039, &nouveau_object_ofuncs }, |
36 | { 0x9097, &nouveau_object_ofuncs }, | 36 | { FERMI_A, &nvc0_fermi_ofuncs, nvc0_graph_9097_omthds }, |
37 | { 0x90c0, &nouveau_object_ofuncs }, | 37 | { FERMI_B, &nvc0_fermi_ofuncs, nvc0_graph_9097_omthds }, |
38 | { 0x9197, &nouveau_object_ofuncs }, | 38 | { FERMI_C, &nvc0_fermi_ofuncs, nvc0_graph_9097_omthds }, |
39 | { 0x9297, &nouveau_object_ofuncs }, | 39 | { FERMI_COMPUTE_A, &nouveau_object_ofuncs, nvc0_graph_90c0_omthds }, |
40 | {} | 40 | {} |
41 | }; | 41 | }; |
42 | 42 | ||
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nvd7.c b/drivers/gpu/drm/nouveau/core/engine/graph/nvd7.c index 2a6a94e2a041..41e8445c7eea 100644 --- a/drivers/gpu/drm/nouveau/core/engine/graph/nvd7.c +++ b/drivers/gpu/drm/nouveau/core/engine/graph/nvd7.c | |||
@@ -133,4 +133,5 @@ nvd7_graph_oclass = &(struct nvc0_graph_oclass) { | |||
133 | .mmio = nvd7_graph_pack_mmio, | 133 | .mmio = nvd7_graph_pack_mmio, |
134 | .fecs.ucode = &nvd7_graph_fecs_ucode, | 134 | .fecs.ucode = &nvd7_graph_fecs_ucode, |
135 | .gpccs.ucode = &nvd7_graph_gpccs_ucode, | 135 | .gpccs.ucode = &nvd7_graph_gpccs_ucode, |
136 | .ppc_nr = 1, | ||
136 | }.base; | 137 | }.base; |
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nve4.c b/drivers/gpu/drm/nouveau/core/engine/graph/nve4.c index 51e0c075ad34..0c71f5c67ae0 100644 --- a/drivers/gpu/drm/nouveau/core/engine/graph/nve4.c +++ b/drivers/gpu/drm/nouveau/core/engine/graph/nve4.c | |||
@@ -22,6 +22,8 @@ | |||
22 | * Authors: Ben Skeggs <bskeggs@redhat.com> | 22 | * Authors: Ben Skeggs <bskeggs@redhat.com> |
23 | */ | 23 | */ |
24 | 24 | ||
25 | #include <subdev/pwr.h> | ||
26 | |||
25 | #include "nvc0.h" | 27 | #include "nvc0.h" |
26 | #include "ctxnvc0.h" | 28 | #include "ctxnvc0.h" |
27 | 29 | ||
@@ -33,8 +35,8 @@ static struct nouveau_oclass | |||
33 | nve4_graph_sclass[] = { | 35 | nve4_graph_sclass[] = { |
34 | { 0x902d, &nouveau_object_ofuncs }, | 36 | { 0x902d, &nouveau_object_ofuncs }, |
35 | { 0xa040, &nouveau_object_ofuncs }, | 37 | { 0xa040, &nouveau_object_ofuncs }, |
36 | { 0xa097, &nouveau_object_ofuncs }, | 38 | { KEPLER_A, &nvc0_fermi_ofuncs, nvc0_graph_9097_omthds }, |
37 | { 0xa0c0, &nouveau_object_ofuncs }, | 39 | { KEPLER_COMPUTE_A, &nouveau_object_ofuncs, nvc0_graph_90c0_omthds }, |
38 | {} | 40 | {} |
39 | }; | 41 | }; |
40 | 42 | ||
@@ -190,39 +192,20 @@ nve4_graph_pack_mmio[] = { | |||
190 | ******************************************************************************/ | 192 | ******************************************************************************/ |
191 | 193 | ||
192 | int | 194 | int |
193 | nve4_graph_fini(struct nouveau_object *object, bool suspend) | ||
194 | { | ||
195 | struct nvc0_graph_priv *priv = (void *)object; | ||
196 | |||
197 | /*XXX: this is a nasty hack to power on gr on certain boards | ||
198 | * where it's disabled by therm, somehow. ideally it'd | ||
199 | * be nice to know when we should be doing this, and why, | ||
200 | * but, it's yet to be determined. for now we test for | ||
201 | * the particular mmio error that occurs in the situation, | ||
202 | * and then bash therm in the way nvidia do. | ||
203 | */ | ||
204 | nv_mask(priv, 0x000200, 0x08001000, 0x08001000); | ||
205 | nv_rd32(priv, 0x000200); | ||
206 | if (nv_rd32(priv, 0x400700) == 0xbadf1000) { | ||
207 | nv_mask(priv, 0x000200, 0x08001000, 0x00000000); | ||
208 | nv_rd32(priv, 0x000200); | ||
209 | nv_mask(priv, 0x020004, 0xc0000000, 0x40000000); | ||
210 | } | ||
211 | |||
212 | return nouveau_graph_fini(&priv->base, suspend); | ||
213 | } | ||
214 | |||
215 | int | ||
216 | nve4_graph_init(struct nouveau_object *object) | 195 | nve4_graph_init(struct nouveau_object *object) |
217 | { | 196 | { |
218 | struct nvc0_graph_oclass *oclass = (void *)object->oclass; | 197 | struct nvc0_graph_oclass *oclass = (void *)object->oclass; |
219 | struct nvc0_graph_priv *priv = (void *)object; | 198 | struct nvc0_graph_priv *priv = (void *)object; |
199 | struct nouveau_pwr *ppwr = nouveau_pwr(priv); | ||
220 | const u32 magicgpc918 = DIV_ROUND_UP(0x00800000, priv->tpc_total); | 200 | const u32 magicgpc918 = DIV_ROUND_UP(0x00800000, priv->tpc_total); |
221 | u32 data[TPC_MAX / 8] = {}; | 201 | u32 data[TPC_MAX / 8] = {}; |
222 | u8 tpcnr[GPC_MAX]; | 202 | u8 tpcnr[GPC_MAX]; |
223 | int gpc, tpc, rop; | 203 | int gpc, tpc, rop; |
224 | int ret, i; | 204 | int ret, i; |
225 | 205 | ||
206 | if (ppwr) | ||
207 | ppwr->pgob(ppwr, false); | ||
208 | |||
226 | ret = nouveau_graph_init(&priv->base); | 209 | ret = nouveau_graph_init(&priv->base); |
227 | if (ret) | 210 | if (ret) |
228 | return ret; | 211 | return ret; |
@@ -320,6 +303,9 @@ nve4_graph_init(struct nouveau_object *object) | |||
320 | nv_wr32(priv, 0x400134, 0xffffffff); | 303 | nv_wr32(priv, 0x400134, 0xffffffff); |
321 | 304 | ||
322 | nv_wr32(priv, 0x400054, 0x34ce3464); | 305 | nv_wr32(priv, 0x400054, 0x34ce3464); |
306 | |||
307 | nvc0_graph_zbc_init(priv); | ||
308 | |||
323 | return nvc0_graph_init_ctxctl(priv); | 309 | return nvc0_graph_init_ctxctl(priv); |
324 | } | 310 | } |
325 | 311 | ||
@@ -350,11 +336,12 @@ nve4_graph_oclass = &(struct nvc0_graph_oclass) { | |||
350 | .ctor = nvc0_graph_ctor, | 336 | .ctor = nvc0_graph_ctor, |
351 | .dtor = nvc0_graph_dtor, | 337 | .dtor = nvc0_graph_dtor, |
352 | .init = nve4_graph_init, | 338 | .init = nve4_graph_init, |
353 | .fini = nve4_graph_fini, | 339 | .fini = _nouveau_graph_fini, |
354 | }, | 340 | }, |
355 | .cclass = &nve4_grctx_oclass, | 341 | .cclass = &nve4_grctx_oclass, |
356 | .sclass = nve4_graph_sclass, | 342 | .sclass = nve4_graph_sclass, |
357 | .mmio = nve4_graph_pack_mmio, | 343 | .mmio = nve4_graph_pack_mmio, |
358 | .fecs.ucode = &nve4_graph_fecs_ucode, | 344 | .fecs.ucode = &nve4_graph_fecs_ucode, |
359 | .gpccs.ucode = &nve4_graph_gpccs_ucode, | 345 | .gpccs.ucode = &nve4_graph_gpccs_ucode, |
346 | .ppc_nr = 1, | ||
360 | }.base; | 347 | }.base; |
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nvf0.c b/drivers/gpu/drm/nouveau/core/engine/graph/nvf0.c index c96762122b9b..c306c0f2fc84 100644 --- a/drivers/gpu/drm/nouveau/core/engine/graph/nvf0.c +++ b/drivers/gpu/drm/nouveau/core/engine/graph/nvf0.c | |||
@@ -29,12 +29,12 @@ | |||
29 | * Graphics object classes | 29 | * Graphics object classes |
30 | ******************************************************************************/ | 30 | ******************************************************************************/ |
31 | 31 | ||
32 | static struct nouveau_oclass | 32 | struct nouveau_oclass |
33 | nvf0_graph_sclass[] = { | 33 | nvf0_graph_sclass[] = { |
34 | { 0x902d, &nouveau_object_ofuncs }, | 34 | { 0x902d, &nouveau_object_ofuncs }, |
35 | { 0xa140, &nouveau_object_ofuncs }, | 35 | { 0xa140, &nouveau_object_ofuncs }, |
36 | { 0xa197, &nouveau_object_ofuncs }, | 36 | { KEPLER_B, &nvc0_fermi_ofuncs, nvc0_graph_9097_omthds }, |
37 | { 0xa1c0, &nouveau_object_ofuncs }, | 37 | { KEPLER_COMPUTE_B, &nouveau_object_ofuncs, nvc0_graph_90c0_omthds }, |
38 | {} | 38 | {} |
39 | }; | 39 | }; |
40 | 40 | ||
@@ -50,7 +50,7 @@ nvf0_graph_init_fe_0[] = { | |||
50 | {} | 50 | {} |
51 | }; | 51 | }; |
52 | 52 | ||
53 | static const struct nvc0_graph_init | 53 | const struct nvc0_graph_init |
54 | nvf0_graph_init_ds_0[] = { | 54 | nvf0_graph_init_ds_0[] = { |
55 | { 0x405844, 1, 0x04, 0x00ffffff }, | 55 | { 0x405844, 1, 0x04, 0x00ffffff }, |
56 | { 0x405850, 1, 0x04, 0x00000000 }, | 56 | { 0x405850, 1, 0x04, 0x00000000 }, |
@@ -88,7 +88,7 @@ nvf0_graph_init_gpc_unk_1[] = { | |||
88 | {} | 88 | {} |
89 | }; | 89 | }; |
90 | 90 | ||
91 | static const struct nvc0_graph_init | 91 | const struct nvc0_graph_init |
92 | nvf0_graph_init_tex_0[] = { | 92 | nvf0_graph_init_tex_0[] = { |
93 | { 0x419ab0, 1, 0x04, 0x00000000 }, | 93 | { 0x419ab0, 1, 0x04, 0x00000000 }, |
94 | { 0x419ac8, 1, 0x04, 0x00000000 }, | 94 | { 0x419ac8, 1, 0x04, 0x00000000 }, |
@@ -170,7 +170,7 @@ nvf0_graph_pack_mmio[] = { | |||
170 | * PGRAPH engine/subdev functions | 170 | * PGRAPH engine/subdev functions |
171 | ******************************************************************************/ | 171 | ******************************************************************************/ |
172 | 172 | ||
173 | static int | 173 | int |
174 | nvf0_graph_fini(struct nouveau_object *object, bool suspend) | 174 | nvf0_graph_fini(struct nouveau_object *object, bool suspend) |
175 | { | 175 | { |
176 | struct nvc0_graph_priv *priv = (void *)object; | 176 | struct nvc0_graph_priv *priv = (void *)object; |
@@ -209,7 +209,7 @@ nvf0_graph_fini(struct nouveau_object *object, bool suspend) | |||
209 | 209 | ||
210 | #include "fuc/hubnvf0.fuc.h" | 210 | #include "fuc/hubnvf0.fuc.h" |
211 | 211 | ||
212 | static struct nvc0_graph_ucode | 212 | struct nvc0_graph_ucode |
213 | nvf0_graph_fecs_ucode = { | 213 | nvf0_graph_fecs_ucode = { |
214 | .code.data = nvf0_grhub_code, | 214 | .code.data = nvf0_grhub_code, |
215 | .code.size = sizeof(nvf0_grhub_code), | 215 | .code.size = sizeof(nvf0_grhub_code), |
@@ -219,7 +219,7 @@ nvf0_graph_fecs_ucode = { | |||
219 | 219 | ||
220 | #include "fuc/gpcnvf0.fuc.h" | 220 | #include "fuc/gpcnvf0.fuc.h" |
221 | 221 | ||
222 | static struct nvc0_graph_ucode | 222 | struct nvc0_graph_ucode |
223 | nvf0_graph_gpccs_ucode = { | 223 | nvf0_graph_gpccs_ucode = { |
224 | .code.data = nvf0_grgpc_code, | 224 | .code.data = nvf0_grgpc_code, |
225 | .code.size = sizeof(nvf0_grgpc_code), | 225 | .code.size = sizeof(nvf0_grgpc_code), |
@@ -241,4 +241,5 @@ nvf0_graph_oclass = &(struct nvc0_graph_oclass) { | |||
241 | .mmio = nvf0_graph_pack_mmio, | 241 | .mmio = nvf0_graph_pack_mmio, |
242 | .fecs.ucode = &nvf0_graph_fecs_ucode, | 242 | .fecs.ucode = &nvf0_graph_fecs_ucode, |
243 | .gpccs.ucode = &nvf0_graph_gpccs_ucode, | 243 | .gpccs.ucode = &nvf0_graph_gpccs_ucode, |
244 | .ppc_nr = 2, | ||
244 | }.base; | 245 | }.base; |
diff --git a/drivers/gpu/drm/nouveau/core/engine/mpeg/nv31.c b/drivers/gpu/drm/nouveau/core/engine/mpeg/nv31.c index 7eb6d94c84e2..d88c700b2f69 100644 --- a/drivers/gpu/drm/nouveau/core/engine/mpeg/nv31.c +++ b/drivers/gpu/drm/nouveau/core/engine/mpeg/nv31.c | |||
@@ -24,7 +24,6 @@ | |||
24 | 24 | ||
25 | #include <core/client.h> | 25 | #include <core/client.h> |
26 | #include <core/os.h> | 26 | #include <core/os.h> |
27 | #include <core/class.h> | ||
28 | #include <core/engctx.h> | 27 | #include <core/engctx.h> |
29 | #include <core/handle.h> | 28 | #include <core/handle.h> |
30 | 29 | ||
diff --git a/drivers/gpu/drm/nouveau/core/engine/mpeg/nv40.c b/drivers/gpu/drm/nouveau/core/engine/mpeg/nv40.c index d4e7ec0ba68c..bdb2f20ff7b1 100644 --- a/drivers/gpu/drm/nouveau/core/engine/mpeg/nv40.c +++ b/drivers/gpu/drm/nouveau/core/engine/mpeg/nv40.c | |||
@@ -23,7 +23,6 @@ | |||
23 | */ | 23 | */ |
24 | 24 | ||
25 | #include <core/os.h> | 25 | #include <core/os.h> |
26 | #include <core/class.h> | ||
27 | #include <core/engctx.h> | 26 | #include <core/engctx.h> |
28 | 27 | ||
29 | #include <subdev/fb.h> | 28 | #include <subdev/fb.h> |
diff --git a/drivers/gpu/drm/nouveau/core/engine/mpeg/nv44.c b/drivers/gpu/drm/nouveau/core/engine/mpeg/nv44.c index 3d8c2133e0e8..72c7f33fd29b 100644 --- a/drivers/gpu/drm/nouveau/core/engine/mpeg/nv44.c +++ b/drivers/gpu/drm/nouveau/core/engine/mpeg/nv44.c | |||
@@ -23,7 +23,6 @@ | |||
23 | */ | 23 | */ |
24 | 24 | ||
25 | #include <core/os.h> | 25 | #include <core/os.h> |
26 | #include <core/class.h> | ||
27 | #include <core/client.h> | 26 | #include <core/client.h> |
28 | #include <core/engctx.h> | 27 | #include <core/engctx.h> |
29 | #include <core/handle.h> | 28 | #include <core/handle.h> |
diff --git a/drivers/gpu/drm/nouveau/core/engine/mpeg/nv50.c b/drivers/gpu/drm/nouveau/core/engine/mpeg/nv50.c index 37a2bd9e8078..cae33f86b11a 100644 --- a/drivers/gpu/drm/nouveau/core/engine/mpeg/nv50.c +++ b/drivers/gpu/drm/nouveau/core/engine/mpeg/nv50.c | |||
@@ -23,7 +23,6 @@ | |||
23 | */ | 23 | */ |
24 | 24 | ||
25 | #include <core/os.h> | 25 | #include <core/os.h> |
26 | #include <core/class.h> | ||
27 | #include <core/engctx.h> | 26 | #include <core/engctx.h> |
28 | 27 | ||
29 | #include <subdev/vm.h> | 28 | #include <subdev/vm.h> |
diff --git a/drivers/gpu/drm/nouveau/core/engine/mpeg/nv84.c b/drivers/gpu/drm/nouveau/core/engine/mpeg/nv84.c index 96f5aa92677b..e9cc8b116a24 100644 --- a/drivers/gpu/drm/nouveau/core/engine/mpeg/nv84.c +++ b/drivers/gpu/drm/nouveau/core/engine/mpeg/nv84.c | |||
@@ -23,7 +23,6 @@ | |||
23 | */ | 23 | */ |
24 | 24 | ||
25 | #include <core/os.h> | 25 | #include <core/os.h> |
26 | #include <core/class.h> | ||
27 | #include <core/engctx.h> | 26 | #include <core/engctx.h> |
28 | 27 | ||
29 | #include <subdev/vm.h> | 28 | #include <subdev/vm.h> |
diff --git a/drivers/gpu/drm/nouveau/core/engine/perfmon/base.c b/drivers/gpu/drm/nouveau/core/engine/perfmon/base.c index e9c5e51943ef..63013812f7c9 100644 --- a/drivers/gpu/drm/nouveau/core/engine/perfmon/base.c +++ b/drivers/gpu/drm/nouveau/core/engine/perfmon/base.c | |||
@@ -22,8 +22,11 @@ | |||
22 | * Authors: Ben Skeggs | 22 | * Authors: Ben Skeggs |
23 | */ | 23 | */ |
24 | 24 | ||
25 | #include <core/client.h> | ||
25 | #include <core/option.h> | 26 | #include <core/option.h> |
26 | #include <core/class.h> | 27 | #include <nvif/unpack.h> |
28 | #include <nvif/class.h> | ||
29 | #include <nvif/ioctl.h> | ||
27 | 30 | ||
28 | #include <subdev/clock.h> | 31 | #include <subdev/clock.h> |
29 | 32 | ||
@@ -101,24 +104,28 @@ nouveau_perfsig_wrap(struct nouveau_perfmon *ppm, const char *name, | |||
101 | * Perfmon object classes | 104 | * Perfmon object classes |
102 | ******************************************************************************/ | 105 | ******************************************************************************/ |
103 | static int | 106 | static int |
104 | nouveau_perfctr_query(struct nouveau_object *object, u32 mthd, | 107 | nouveau_perfctr_query(struct nouveau_object *object, void *data, u32 size) |
105 | void *data, u32 size) | ||
106 | { | 108 | { |
109 | union { | ||
110 | struct nvif_perfctr_query_v0 v0; | ||
111 | } *args = data; | ||
107 | struct nouveau_device *device = nv_device(object); | 112 | struct nouveau_device *device = nv_device(object); |
108 | struct nouveau_perfmon *ppm = (void *)object->engine; | 113 | struct nouveau_perfmon *ppm = (void *)object->engine; |
109 | struct nouveau_perfdom *dom = NULL, *chk; | 114 | struct nouveau_perfdom *dom = NULL, *chk; |
110 | struct nv_perfctr_query *args = data; | ||
111 | const bool all = nouveau_boolopt(device->cfgopt, "NvPmShowAll", false); | 115 | const bool all = nouveau_boolopt(device->cfgopt, "NvPmShowAll", false); |
112 | const bool raw = nouveau_boolopt(device->cfgopt, "NvPmUnnamed", all); | 116 | const bool raw = nouveau_boolopt(device->cfgopt, "NvPmUnnamed", all); |
113 | const char *name; | 117 | const char *name; |
114 | int tmp = 0, di, si; | 118 | int tmp = 0, di, si; |
115 | char path[64]; | 119 | int ret; |
116 | |||
117 | if (size < sizeof(*args)) | ||
118 | return -EINVAL; | ||
119 | 120 | ||
120 | di = (args->iter & 0xff000000) >> 24; | 121 | nv_ioctl(object, "perfctr query size %d\n", size); |
121 | si = (args->iter & 0x00ffffff) - 1; | 122 | if (nvif_unpack(args->v0, 0, 0, false)) { |
123 | nv_ioctl(object, "perfctr query vers %d iter %08x\n", | ||
124 | args->v0.version, args->v0.iter); | ||
125 | di = (args->v0.iter & 0xff000000) >> 24; | ||
126 | si = (args->v0.iter & 0x00ffffff) - 1; | ||
127 | } else | ||
128 | return ret; | ||
122 | 129 | ||
123 | list_for_each_entry(chk, &ppm->domains, head) { | 130 | list_for_each_entry(chk, &ppm->domains, head) { |
124 | if (tmp++ == di) { | 131 | if (tmp++ == di) { |
@@ -132,19 +139,17 @@ nouveau_perfctr_query(struct nouveau_object *object, u32 mthd, | |||
132 | 139 | ||
133 | if (si >= 0) { | 140 | if (si >= 0) { |
134 | if (raw || !(name = dom->signal[si].name)) { | 141 | if (raw || !(name = dom->signal[si].name)) { |
135 | snprintf(path, sizeof(path), "/%s/%02x", dom->name, si); | 142 | snprintf(args->v0.name, sizeof(args->v0.name), |
136 | name = path; | 143 | "/%s/%02x", dom->name, si); |
144 | } else { | ||
145 | strncpy(args->v0.name, name, sizeof(args->v0.name)); | ||
137 | } | 146 | } |
138 | |||
139 | if (args->name) | ||
140 | strncpy(args->name, name, args->size); | ||
141 | args->size = strlen(name) + 1; | ||
142 | } | 147 | } |
143 | 148 | ||
144 | do { | 149 | do { |
145 | while (++si < dom->signal_nr) { | 150 | while (++si < dom->signal_nr) { |
146 | if (all || dom->signal[si].name) { | 151 | if (all || dom->signal[si].name) { |
147 | args->iter = (di << 24) | ++si; | 152 | args->v0.iter = (di << 24) | ++si; |
148 | return 0; | 153 | return 0; |
149 | } | 154 | } |
150 | } | 155 | } |
@@ -153,21 +158,26 @@ nouveau_perfctr_query(struct nouveau_object *object, u32 mthd, | |||
153 | dom = list_entry(dom->head.next, typeof(*dom), head); | 158 | dom = list_entry(dom->head.next, typeof(*dom), head); |
154 | } while (&dom->head != &ppm->domains); | 159 | } while (&dom->head != &ppm->domains); |
155 | 160 | ||
156 | args->iter = 0xffffffff; | 161 | args->v0.iter = 0xffffffff; |
157 | return 0; | 162 | return 0; |
158 | } | 163 | } |
159 | 164 | ||
160 | static int | 165 | static int |
161 | nouveau_perfctr_sample(struct nouveau_object *object, u32 mthd, | 166 | nouveau_perfctr_sample(struct nouveau_object *object, void *data, u32 size) |
162 | void *data, u32 size) | ||
163 | { | 167 | { |
168 | union { | ||
169 | struct nvif_perfctr_sample none; | ||
170 | } *args = data; | ||
164 | struct nouveau_perfmon *ppm = (void *)object->engine; | 171 | struct nouveau_perfmon *ppm = (void *)object->engine; |
165 | struct nouveau_perfctr *ctr, *tmp; | 172 | struct nouveau_perfctr *ctr, *tmp; |
166 | struct nouveau_perfdom *dom; | 173 | struct nouveau_perfdom *dom; |
167 | struct nv_perfctr_sample *args = data; | 174 | int ret; |
168 | 175 | ||
169 | if (size < sizeof(*args)) | 176 | nv_ioctl(object, "perfctr sample size %d\n", size); |
170 | return -EINVAL; | 177 | if (nvif_unvers(args->none)) { |
178 | nv_ioctl(object, "perfctr sample\n"); | ||
179 | } else | ||
180 | return ret; | ||
171 | ppm->sequence++; | 181 | ppm->sequence++; |
172 | 182 | ||
173 | list_for_each_entry(dom, &ppm->domains, head) { | 183 | list_for_each_entry(dom, &ppm->domains, head) { |
@@ -206,22 +216,45 @@ nouveau_perfctr_sample(struct nouveau_object *object, u32 mthd, | |||
206 | } | 216 | } |
207 | 217 | ||
208 | static int | 218 | static int |
209 | nouveau_perfctr_read(struct nouveau_object *object, u32 mthd, | 219 | nouveau_perfctr_read(struct nouveau_object *object, void *data, u32 size) |
210 | void *data, u32 size) | ||
211 | { | 220 | { |
221 | union { | ||
222 | struct nvif_perfctr_read_v0 v0; | ||
223 | } *args = data; | ||
212 | struct nouveau_perfctr *ctr = (void *)object; | 224 | struct nouveau_perfctr *ctr = (void *)object; |
213 | struct nv_perfctr_read *args = data; | 225 | int ret; |
226 | |||
227 | nv_ioctl(object, "perfctr read size %d\n", size); | ||
228 | if (nvif_unpack(args->v0, 0, 0, false)) { | ||
229 | nv_ioctl(object, "perfctr read vers %d\n", args->v0.version); | ||
230 | } else | ||
231 | return ret; | ||
214 | 232 | ||
215 | if (size < sizeof(*args)) | ||
216 | return -EINVAL; | ||
217 | if (!ctr->clk) | 233 | if (!ctr->clk) |
218 | return -EAGAIN; | 234 | return -EAGAIN; |
219 | 235 | ||
220 | args->clk = ctr->clk; | 236 | args->v0.clk = ctr->clk; |
221 | args->ctr = ctr->ctr; | 237 | args->v0.ctr = ctr->ctr; |
222 | return 0; | 238 | return 0; |
223 | } | 239 | } |
224 | 240 | ||
241 | static int | ||
242 | nouveau_perfctr_mthd(struct nouveau_object *object, u32 mthd, | ||
243 | void *data, u32 size) | ||
244 | { | ||
245 | switch (mthd) { | ||
246 | case NVIF_PERFCTR_V0_QUERY: | ||
247 | return nouveau_perfctr_query(object, data, size); | ||
248 | case NVIF_PERFCTR_V0_SAMPLE: | ||
249 | return nouveau_perfctr_sample(object, data, size); | ||
250 | case NVIF_PERFCTR_V0_READ: | ||
251 | return nouveau_perfctr_read(object, data, size); | ||
252 | default: | ||
253 | break; | ||
254 | } | ||
255 | return -EINVAL; | ||
256 | } | ||
257 | |||
225 | static void | 258 | static void |
226 | nouveau_perfctr_dtor(struct nouveau_object *object) | 259 | nouveau_perfctr_dtor(struct nouveau_object *object) |
227 | { | 260 | { |
@@ -237,19 +270,27 @@ nouveau_perfctr_ctor(struct nouveau_object *parent, | |||
237 | struct nouveau_oclass *oclass, void *data, u32 size, | 270 | struct nouveau_oclass *oclass, void *data, u32 size, |
238 | struct nouveau_object **pobject) | 271 | struct nouveau_object **pobject) |
239 | { | 272 | { |
273 | union { | ||
274 | struct nvif_perfctr_v0 v0; | ||
275 | } *args = data; | ||
240 | struct nouveau_perfmon *ppm = (void *)engine; | 276 | struct nouveau_perfmon *ppm = (void *)engine; |
241 | struct nouveau_perfdom *dom = NULL; | 277 | struct nouveau_perfdom *dom = NULL; |
242 | struct nouveau_perfsig *sig[4] = {}; | 278 | struct nouveau_perfsig *sig[4] = {}; |
243 | struct nouveau_perfctr *ctr; | 279 | struct nouveau_perfctr *ctr; |
244 | struct nv_perfctr_class *args = data; | ||
245 | int ret, i; | 280 | int ret, i; |
246 | 281 | ||
247 | if (size < sizeof(*args)) | 282 | nv_ioctl(parent, "create perfctr size %d\n", size); |
248 | return -EINVAL; | 283 | if (nvif_unpack(args->v0, 0, 0, false)) { |
284 | nv_ioctl(parent, "create perfctr vers %d logic_op %04x\n", | ||
285 | args->v0.version, args->v0.logic_op); | ||
286 | } else | ||
287 | return ret; | ||
249 | 288 | ||
250 | for (i = 0; i < ARRAY_SIZE(args->signal) && args->signal[i].name; i++) { | 289 | for (i = 0; i < ARRAY_SIZE(args->v0.name) && args->v0.name[i][0]; i++) { |
251 | sig[i] = nouveau_perfsig_find(ppm, args->signal[i].name, | 290 | sig[i] = nouveau_perfsig_find(ppm, args->v0.name[i], |
252 | args->signal[i].size, &dom); | 291 | strnlen(args->v0.name[i], |
292 | sizeof(args->v0.name[i])), | ||
293 | &dom); | ||
253 | if (!sig[i]) | 294 | if (!sig[i]) |
254 | return -EINVAL; | 295 | return -EINVAL; |
255 | } | 296 | } |
@@ -260,7 +301,7 @@ nouveau_perfctr_ctor(struct nouveau_object *parent, | |||
260 | return ret; | 301 | return ret; |
261 | 302 | ||
262 | ctr->slot = -1; | 303 | ctr->slot = -1; |
263 | ctr->logic_op = args->logic_op; | 304 | ctr->logic_op = args->v0.logic_op; |
264 | ctr->signal[0] = sig[0]; | 305 | ctr->signal[0] = sig[0]; |
265 | ctr->signal[1] = sig[1]; | 306 | ctr->signal[1] = sig[1]; |
266 | ctr->signal[2] = sig[2]; | 307 | ctr->signal[2] = sig[2]; |
@@ -276,21 +317,13 @@ nouveau_perfctr_ofuncs = { | |||
276 | .dtor = nouveau_perfctr_dtor, | 317 | .dtor = nouveau_perfctr_dtor, |
277 | .init = nouveau_object_init, | 318 | .init = nouveau_object_init, |
278 | .fini = nouveau_object_fini, | 319 | .fini = nouveau_object_fini, |
279 | }; | 320 | .mthd = nouveau_perfctr_mthd, |
280 | |||
281 | static struct nouveau_omthds | ||
282 | nouveau_perfctr_omthds[] = { | ||
283 | { NV_PERFCTR_QUERY, NV_PERFCTR_QUERY, nouveau_perfctr_query }, | ||
284 | { NV_PERFCTR_SAMPLE, NV_PERFCTR_SAMPLE, nouveau_perfctr_sample }, | ||
285 | { NV_PERFCTR_READ, NV_PERFCTR_READ, nouveau_perfctr_read }, | ||
286 | {} | ||
287 | }; | 321 | }; |
288 | 322 | ||
289 | struct nouveau_oclass | 323 | struct nouveau_oclass |
290 | nouveau_perfmon_sclass[] = { | 324 | nouveau_perfmon_sclass[] = { |
291 | { .handle = NV_PERFCTR_CLASS, | 325 | { .handle = NVIF_IOCTL_NEW_V0_PERFCTR, |
292 | .ofuncs = &nouveau_perfctr_ofuncs, | 326 | .ofuncs = &nouveau_perfctr_ofuncs, |
293 | .omthds = nouveau_perfctr_omthds, | ||
294 | }, | 327 | }, |
295 | {}, | 328 | {}, |
296 | }; | 329 | }; |
@@ -303,6 +336,7 @@ nouveau_perfctx_dtor(struct nouveau_object *object) | |||
303 | { | 336 | { |
304 | struct nouveau_perfmon *ppm = (void *)object->engine; | 337 | struct nouveau_perfmon *ppm = (void *)object->engine; |
305 | mutex_lock(&nv_subdev(ppm)->mutex); | 338 | mutex_lock(&nv_subdev(ppm)->mutex); |
339 | nouveau_engctx_destroy(&ppm->context->base); | ||
306 | ppm->context = NULL; | 340 | ppm->context = NULL; |
307 | mutex_unlock(&nv_subdev(ppm)->mutex); | 341 | mutex_unlock(&nv_subdev(ppm)->mutex); |
308 | } | 342 | } |
diff --git a/drivers/gpu/drm/nouveau/core/engine/software/nv04.c b/drivers/gpu/drm/nouveau/core/engine/software/nv04.c index c571758e4a27..64df15c7f051 100644 --- a/drivers/gpu/drm/nouveau/core/engine/software/nv04.c +++ b/drivers/gpu/drm/nouveau/core/engine/software/nv04.c | |||
@@ -23,7 +23,6 @@ | |||
23 | */ | 23 | */ |
24 | 24 | ||
25 | #include <core/os.h> | 25 | #include <core/os.h> |
26 | #include <core/class.h> | ||
27 | #include <core/engctx.h> | 26 | #include <core/engctx.h> |
28 | 27 | ||
29 | #include <engine/software.h> | 28 | #include <engine/software.h> |
diff --git a/drivers/gpu/drm/nouveau/core/engine/software/nv10.c b/drivers/gpu/drm/nouveau/core/engine/software/nv10.c index a62f11a78430..f54a2253deca 100644 --- a/drivers/gpu/drm/nouveau/core/engine/software/nv10.c +++ b/drivers/gpu/drm/nouveau/core/engine/software/nv10.c | |||
@@ -23,7 +23,6 @@ | |||
23 | */ | 23 | */ |
24 | 24 | ||
25 | #include <core/os.h> | 25 | #include <core/os.h> |
26 | #include <core/class.h> | ||
27 | #include <core/engctx.h> | 26 | #include <core/engctx.h> |
28 | 27 | ||
29 | #include <engine/software.h> | 28 | #include <engine/software.h> |
diff --git a/drivers/gpu/drm/nouveau/core/engine/software/nv50.c b/drivers/gpu/drm/nouveau/core/engine/software/nv50.c index f3b4d9dbf23c..4d2994d8cc32 100644 --- a/drivers/gpu/drm/nouveau/core/engine/software/nv50.c +++ b/drivers/gpu/drm/nouveau/core/engine/software/nv50.c | |||
@@ -23,12 +23,12 @@ | |||
23 | */ | 23 | */ |
24 | 24 | ||
25 | #include <core/os.h> | 25 | #include <core/os.h> |
26 | #include <core/class.h> | ||
27 | #include <core/engctx.h> | 26 | #include <core/engctx.h> |
28 | #include <core/namedb.h> | 27 | #include <core/namedb.h> |
29 | #include <core/handle.h> | 28 | #include <core/handle.h> |
30 | #include <core/gpuobj.h> | 29 | #include <core/gpuobj.h> |
31 | #include <core/event.h> | 30 | #include <core/event.h> |
31 | #include <nvif/event.h> | ||
32 | 32 | ||
33 | #include <subdev/bar.h> | 33 | #include <subdev/bar.h> |
34 | 34 | ||
@@ -86,10 +86,10 @@ nv50_software_mthd_vblsem_release(struct nouveau_object *object, u32 mthd, | |||
86 | { | 86 | { |
87 | struct nv50_software_chan *chan = (void *)nv_engctx(object->parent); | 87 | struct nv50_software_chan *chan = (void *)nv_engctx(object->parent); |
88 | u32 head = *(u32 *)args; | 88 | u32 head = *(u32 *)args; |
89 | if (head >= chan->vblank.nr_event) | 89 | if (head >= nouveau_disp(chan)->vblank.index_nr) |
90 | return -EINVAL; | 90 | return -EINVAL; |
91 | 91 | ||
92 | nouveau_event_get(chan->vblank.event[head]); | 92 | nvkm_notify_get(&chan->vblank.notify[head]); |
93 | return 0; | 93 | return 0; |
94 | } | 94 | } |
95 | 95 | ||
@@ -124,9 +124,10 @@ nv50_software_sclass[] = { | |||
124 | ******************************************************************************/ | 124 | ******************************************************************************/ |
125 | 125 | ||
126 | static int | 126 | static int |
127 | nv50_software_vblsem_release(void *data, u32 type, int head) | 127 | nv50_software_vblsem_release(struct nvkm_notify *notify) |
128 | { | 128 | { |
129 | struct nv50_software_chan *chan = data; | 129 | struct nv50_software_chan *chan = |
130 | container_of(notify, typeof(*chan), vblank.notify[notify->index]); | ||
130 | struct nv50_software_priv *priv = (void *)nv_object(chan)->engine; | 131 | struct nv50_software_priv *priv = (void *)nv_object(chan)->engine; |
131 | struct nouveau_bar *bar = nouveau_bar(priv); | 132 | struct nouveau_bar *bar = nouveau_bar(priv); |
132 | 133 | ||
@@ -142,7 +143,7 @@ nv50_software_vblsem_release(void *data, u32 type, int head) | |||
142 | nv_wr32(priv, 0x060014, chan->vblank.value); | 143 | nv_wr32(priv, 0x060014, chan->vblank.value); |
143 | } | 144 | } |
144 | 145 | ||
145 | return NVKM_EVENT_DROP; | 146 | return NVKM_NOTIFY_DROP; |
146 | } | 147 | } |
147 | 148 | ||
148 | void | 149 | void |
@@ -151,11 +152,8 @@ nv50_software_context_dtor(struct nouveau_object *object) | |||
151 | struct nv50_software_chan *chan = (void *)object; | 152 | struct nv50_software_chan *chan = (void *)object; |
152 | int i; | 153 | int i; |
153 | 154 | ||
154 | if (chan->vblank.event) { | 155 | for (i = 0; i < ARRAY_SIZE(chan->vblank.notify); i++) |
155 | for (i = 0; i < chan->vblank.nr_event; i++) | 156 | nvkm_notify_fini(&chan->vblank.notify[i]); |
156 | nouveau_event_ref(NULL, &chan->vblank.event[i]); | ||
157 | kfree(chan->vblank.event); | ||
158 | } | ||
159 | 157 | ||
160 | nouveau_software_context_destroy(&chan->base); | 158 | nouveau_software_context_destroy(&chan->base); |
161 | } | 159 | } |
@@ -176,15 +174,14 @@ nv50_software_context_ctor(struct nouveau_object *parent, | |||
176 | if (ret) | 174 | if (ret) |
177 | return ret; | 175 | return ret; |
178 | 176 | ||
179 | chan->vblank.nr_event = pdisp ? pdisp->vblank->index_nr : 0; | 177 | for (i = 0; pdisp && i < pdisp->vblank.index_nr; i++) { |
180 | chan->vblank.event = kzalloc(chan->vblank.nr_event * | 178 | ret = nvkm_notify_init(&pdisp->vblank, pclass->vblank, false, |
181 | sizeof(*chan->vblank.event), GFP_KERNEL); | 179 | &(struct nvif_notify_head_req_v0) { |
182 | if (!chan->vblank.event) | 180 | .head = i, |
183 | return -ENOMEM; | 181 | }, |
184 | 182 | sizeof(struct nvif_notify_head_req_v0), | |
185 | for (i = 0; i < chan->vblank.nr_event; i++) { | 183 | sizeof(struct nvif_notify_head_rep_v0), |
186 | ret = nouveau_event_new(pdisp->vblank, 1, i, pclass->vblank, | 184 | &chan->vblank.notify[i]); |
187 | chan, &chan->vblank.event[i]); | ||
188 | if (ret) | 185 | if (ret) |
189 | return ret; | 186 | return ret; |
190 | } | 187 | } |
@@ -198,7 +195,7 @@ nv50_software_cclass = { | |||
198 | .base.handle = NV_ENGCTX(SW, 0x50), | 195 | .base.handle = NV_ENGCTX(SW, 0x50), |
199 | .base.ofuncs = &(struct nouveau_ofuncs) { | 196 | .base.ofuncs = &(struct nouveau_ofuncs) { |
200 | .ctor = nv50_software_context_ctor, | 197 | .ctor = nv50_software_context_ctor, |
201 | .dtor = _nouveau_software_context_dtor, | 198 | .dtor = nv50_software_context_dtor, |
202 | .init = _nouveau_software_context_init, | 199 | .init = _nouveau_software_context_init, |
203 | .fini = _nouveau_software_context_fini, | 200 | .fini = _nouveau_software_context_fini, |
204 | }, | 201 | }, |
diff --git a/drivers/gpu/drm/nouveau/core/engine/software/nv50.h b/drivers/gpu/drm/nouveau/core/engine/software/nv50.h index bb49a7a20857..41542e725b4b 100644 --- a/drivers/gpu/drm/nouveau/core/engine/software/nv50.h +++ b/drivers/gpu/drm/nouveau/core/engine/software/nv50.h | |||
@@ -19,14 +19,13 @@ int nv50_software_ctor(struct nouveau_object *, struct nouveau_object *, | |||
19 | 19 | ||
20 | struct nv50_software_cclass { | 20 | struct nv50_software_cclass { |
21 | struct nouveau_oclass base; | 21 | struct nouveau_oclass base; |
22 | int (*vblank)(void *, u32, int); | 22 | int (*vblank)(struct nvkm_notify *); |
23 | }; | 23 | }; |
24 | 24 | ||
25 | struct nv50_software_chan { | 25 | struct nv50_software_chan { |
26 | struct nouveau_software_chan base; | 26 | struct nouveau_software_chan base; |
27 | struct { | 27 | struct { |
28 | struct nouveau_eventh **event; | 28 | struct nvkm_notify notify[4]; |
29 | int nr_event; | ||
30 | u32 channel; | 29 | u32 channel; |
31 | u32 ctxdma; | 30 | u32 ctxdma; |
32 | u64 offset; | 31 | u64 offset; |
diff --git a/drivers/gpu/drm/nouveau/core/engine/software/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/software/nvc0.c index 135c20f38356..6af370d3a06d 100644 --- a/drivers/gpu/drm/nouveau/core/engine/software/nvc0.c +++ b/drivers/gpu/drm/nouveau/core/engine/software/nvc0.c | |||
@@ -23,7 +23,6 @@ | |||
23 | */ | 23 | */ |
24 | 24 | ||
25 | #include <core/os.h> | 25 | #include <core/os.h> |
26 | #include <core/class.h> | ||
27 | #include <core/engctx.h> | 26 | #include <core/engctx.h> |
28 | #include <core/event.h> | 27 | #include <core/event.h> |
29 | 28 | ||
@@ -104,9 +103,10 @@ nvc0_software_sclass[] = { | |||
104 | ******************************************************************************/ | 103 | ******************************************************************************/ |
105 | 104 | ||
106 | static int | 105 | static int |
107 | nvc0_software_vblsem_release(void *data, u32 type, int head) | 106 | nvc0_software_vblsem_release(struct nvkm_notify *notify) |
108 | { | 107 | { |
109 | struct nv50_software_chan *chan = data; | 108 | struct nv50_software_chan *chan = |
109 | container_of(notify, typeof(*chan), vblank.notify[notify->index]); | ||
110 | struct nv50_software_priv *priv = (void *)nv_object(chan)->engine; | 110 | struct nv50_software_priv *priv = (void *)nv_object(chan)->engine; |
111 | struct nouveau_bar *bar = nouveau_bar(priv); | 111 | struct nouveau_bar *bar = nouveau_bar(priv); |
112 | 112 | ||
@@ -116,7 +116,7 @@ nvc0_software_vblsem_release(void *data, u32 type, int head) | |||
116 | nv_wr32(priv, 0x060010, lower_32_bits(chan->vblank.offset)); | 116 | nv_wr32(priv, 0x060010, lower_32_bits(chan->vblank.offset)); |
117 | nv_wr32(priv, 0x060014, chan->vblank.value); | 117 | nv_wr32(priv, 0x060014, chan->vblank.value); |
118 | 118 | ||
119 | return NVKM_EVENT_DROP; | 119 | return NVKM_NOTIFY_DROP; |
120 | } | 120 | } |
121 | 121 | ||
122 | static struct nv50_software_cclass | 122 | static struct nv50_software_cclass |
@@ -124,7 +124,7 @@ nvc0_software_cclass = { | |||
124 | .base.handle = NV_ENGCTX(SW, 0xc0), | 124 | .base.handle = NV_ENGCTX(SW, 0xc0), |
125 | .base.ofuncs = &(struct nouveau_ofuncs) { | 125 | .base.ofuncs = &(struct nouveau_ofuncs) { |
126 | .ctor = nv50_software_context_ctor, | 126 | .ctor = nv50_software_context_ctor, |
127 | .dtor = _nouveau_software_context_dtor, | 127 | .dtor = nv50_software_context_dtor, |
128 | .init = _nouveau_software_context_init, | 128 | .init = _nouveau_software_context_init, |
129 | .fini = _nouveau_software_context_fini, | 129 | .fini = _nouveau_software_context_fini, |
130 | }, | 130 | }, |
diff --git a/drivers/gpu/drm/nouveau/core/include/core/class.h b/drivers/gpu/drm/nouveau/core/include/core/class.h deleted file mode 100644 index e0c812bc884f..000000000000 --- a/drivers/gpu/drm/nouveau/core/include/core/class.h +++ /dev/null | |||
@@ -1,470 +0,0 @@ | |||
1 | #ifndef __NOUVEAU_CLASS_H__ | ||
2 | #define __NOUVEAU_CLASS_H__ | ||
3 | |||
4 | /* Device class | ||
5 | * | ||
6 | * 0080: NV_DEVICE | ||
7 | */ | ||
8 | #define NV_DEVICE_CLASS 0x00000080 | ||
9 | |||
10 | #define NV_DEVICE_DISABLE_IDENTIFY 0x0000000000000001ULL | ||
11 | #define NV_DEVICE_DISABLE_MMIO 0x0000000000000002ULL | ||
12 | #define NV_DEVICE_DISABLE_VBIOS 0x0000000000000004ULL | ||
13 | #define NV_DEVICE_DISABLE_CORE 0x0000000000000008ULL | ||
14 | #define NV_DEVICE_DISABLE_DISP 0x0000000000010000ULL | ||
15 | #define NV_DEVICE_DISABLE_FIFO 0x0000000000020000ULL | ||
16 | #define NV_DEVICE_DISABLE_GRAPH 0x0000000100000000ULL | ||
17 | #define NV_DEVICE_DISABLE_MPEG 0x0000000200000000ULL | ||
18 | #define NV_DEVICE_DISABLE_ME 0x0000000400000000ULL | ||
19 | #define NV_DEVICE_DISABLE_VP 0x0000000800000000ULL | ||
20 | #define NV_DEVICE_DISABLE_CRYPT 0x0000001000000000ULL | ||
21 | #define NV_DEVICE_DISABLE_BSP 0x0000002000000000ULL | ||
22 | #define NV_DEVICE_DISABLE_PPP 0x0000004000000000ULL | ||
23 | #define NV_DEVICE_DISABLE_COPY0 0x0000008000000000ULL | ||
24 | #define NV_DEVICE_DISABLE_COPY1 0x0000010000000000ULL | ||
25 | #define NV_DEVICE_DISABLE_VIC 0x0000020000000000ULL | ||
26 | #define NV_DEVICE_DISABLE_VENC 0x0000040000000000ULL | ||
27 | |||
28 | struct nv_device_class { | ||
29 | u64 device; /* device identifier, ~0 for client default */ | ||
30 | u64 disable; /* disable particular subsystems */ | ||
31 | u64 debug0; /* as above, but *internal* ids, and *NOT* ABI */ | ||
32 | }; | ||
33 | |||
34 | /* DMA object classes | ||
35 | * | ||
36 | * 0002: NV_DMA_FROM_MEMORY | ||
37 | * 0003: NV_DMA_TO_MEMORY | ||
38 | * 003d: NV_DMA_IN_MEMORY | ||
39 | */ | ||
40 | #define NV_DMA_FROM_MEMORY_CLASS 0x00000002 | ||
41 | #define NV_DMA_TO_MEMORY_CLASS 0x00000003 | ||
42 | #define NV_DMA_IN_MEMORY_CLASS 0x0000003d | ||
43 | |||
44 | #define NV_DMA_TARGET_MASK 0x000000ff | ||
45 | #define NV_DMA_TARGET_VM 0x00000000 | ||
46 | #define NV_DMA_TARGET_VRAM 0x00000001 | ||
47 | #define NV_DMA_TARGET_PCI 0x00000002 | ||
48 | #define NV_DMA_TARGET_PCI_US 0x00000003 | ||
49 | #define NV_DMA_TARGET_AGP 0x00000004 | ||
50 | #define NV_DMA_ACCESS_MASK 0x00000f00 | ||
51 | #define NV_DMA_ACCESS_VM 0x00000000 | ||
52 | #define NV_DMA_ACCESS_RD 0x00000100 | ||
53 | #define NV_DMA_ACCESS_WR 0x00000200 | ||
54 | #define NV_DMA_ACCESS_RDWR 0x00000300 | ||
55 | |||
56 | /* NV50:NVC0 */ | ||
57 | #define NV50_DMA_CONF0_ENABLE 0x80000000 | ||
58 | #define NV50_DMA_CONF0_PRIV 0x00300000 | ||
59 | #define NV50_DMA_CONF0_PRIV_VM 0x00000000 | ||
60 | #define NV50_DMA_CONF0_PRIV_US 0x00100000 | ||
61 | #define NV50_DMA_CONF0_PRIV__S 0x00200000 | ||
62 | #define NV50_DMA_CONF0_PART 0x00030000 | ||
63 | #define NV50_DMA_CONF0_PART_VM 0x00000000 | ||
64 | #define NV50_DMA_CONF0_PART_256 0x00010000 | ||
65 | #define NV50_DMA_CONF0_PART_1KB 0x00020000 | ||
66 | #define NV50_DMA_CONF0_COMP 0x00000180 | ||
67 | #define NV50_DMA_CONF0_COMP_NONE 0x00000000 | ||
68 | #define NV50_DMA_CONF0_COMP_VM 0x00000180 | ||
69 | #define NV50_DMA_CONF0_TYPE 0x0000007f | ||
70 | #define NV50_DMA_CONF0_TYPE_LINEAR 0x00000000 | ||
71 | #define NV50_DMA_CONF0_TYPE_VM 0x0000007f | ||
72 | |||
73 | /* NVC0:NVD9 */ | ||
74 | #define NVC0_DMA_CONF0_ENABLE 0x80000000 | ||
75 | #define NVC0_DMA_CONF0_PRIV 0x00300000 | ||
76 | #define NVC0_DMA_CONF0_PRIV_VM 0x00000000 | ||
77 | #define NVC0_DMA_CONF0_PRIV_US 0x00100000 | ||
78 | #define NVC0_DMA_CONF0_PRIV__S 0x00200000 | ||
79 | #define NVC0_DMA_CONF0_UNKN /* PART? */ 0x00030000 | ||
80 | #define NVC0_DMA_CONF0_TYPE 0x000000ff | ||
81 | #define NVC0_DMA_CONF0_TYPE_LINEAR 0x00000000 | ||
82 | #define NVC0_DMA_CONF0_TYPE_VM 0x000000ff | ||
83 | |||
84 | /* NVD9- */ | ||
85 | #define NVD0_DMA_CONF0_ENABLE 0x80000000 | ||
86 | #define NVD0_DMA_CONF0_PAGE 0x00000400 | ||
87 | #define NVD0_DMA_CONF0_PAGE_LP 0x00000000 | ||
88 | #define NVD0_DMA_CONF0_PAGE_SP 0x00000400 | ||
89 | #define NVD0_DMA_CONF0_TYPE 0x000000ff | ||
90 | #define NVD0_DMA_CONF0_TYPE_LINEAR 0x00000000 | ||
91 | #define NVD0_DMA_CONF0_TYPE_VM 0x000000ff | ||
92 | |||
93 | struct nv_dma_class { | ||
94 | u32 flags; | ||
95 | u32 pad0; | ||
96 | u64 start; | ||
97 | u64 limit; | ||
98 | u32 conf0; | ||
99 | }; | ||
100 | |||
101 | /* Perfmon counter class | ||
102 | * | ||
103 | * XXXX: NV_PERFCTR | ||
104 | */ | ||
105 | #define NV_PERFCTR_CLASS 0x0000ffff | ||
106 | #define NV_PERFCTR_QUERY 0x00000000 | ||
107 | #define NV_PERFCTR_SAMPLE 0x00000001 | ||
108 | #define NV_PERFCTR_READ 0x00000002 | ||
109 | |||
110 | struct nv_perfctr_class { | ||
111 | u16 logic_op; | ||
112 | struct { | ||
113 | char __user *name; /*XXX: use cfu when exposed to userspace */ | ||
114 | u32 size; | ||
115 | } signal[4]; | ||
116 | }; | ||
117 | |||
118 | struct nv_perfctr_query { | ||
119 | u32 iter; | ||
120 | u32 size; | ||
121 | char __user *name; /*XXX: use ctu when exposed to userspace */ | ||
122 | }; | ||
123 | |||
124 | struct nv_perfctr_sample { | ||
125 | }; | ||
126 | |||
127 | struct nv_perfctr_read { | ||
128 | u32 ctr; | ||
129 | u32 clk; | ||
130 | }; | ||
131 | |||
132 | /* Device control class | ||
133 | * | ||
134 | * XXXX: NV_CONTROL | ||
135 | */ | ||
136 | #define NV_CONTROL_CLASS 0x0000fffe | ||
137 | |||
138 | #define NV_CONTROL_PSTATE_INFO 0x00000000 | ||
139 | #define NV_CONTROL_PSTATE_INFO_USTATE_DISABLE (-1) | ||
140 | #define NV_CONTROL_PSTATE_INFO_USTATE_PERFMON (-2) | ||
141 | #define NV_CONTROL_PSTATE_INFO_PSTATE_UNKNOWN (-1) | ||
142 | #define NV_CONTROL_PSTATE_INFO_PSTATE_PERFMON (-2) | ||
143 | #define NV_CONTROL_PSTATE_ATTR 0x00000001 | ||
144 | #define NV_CONTROL_PSTATE_ATTR_STATE_CURRENT (-1) | ||
145 | #define NV_CONTROL_PSTATE_USER 0x00000002 | ||
146 | #define NV_CONTROL_PSTATE_USER_STATE_UNKNOWN (-1) | ||
147 | #define NV_CONTROL_PSTATE_USER_STATE_PERFMON (-2) | ||
148 | |||
149 | struct nv_control_pstate_info { | ||
150 | u32 count; /* out: number of power states */ | ||
151 | s32 ustate; /* out: current target pstate index */ | ||
152 | u32 pstate; /* out: current pstate index */ | ||
153 | }; | ||
154 | |||
155 | struct nv_control_pstate_attr { | ||
156 | s32 state; /* in: index of pstate to query | ||
157 | * out: pstate identifier | ||
158 | */ | ||
159 | u32 index; /* in: index of attribute to query | ||
160 | * out: index of next attribute, or 0 if no more | ||
161 | */ | ||
162 | char name[32]; | ||
163 | char unit[16]; | ||
164 | u32 min; | ||
165 | u32 max; | ||
166 | }; | ||
167 | |||
168 | struct nv_control_pstate_user { | ||
169 | s32 state; /* in: pstate identifier */ | ||
170 | }; | ||
171 | |||
172 | /* DMA FIFO channel classes | ||
173 | * | ||
174 | * 006b: NV03_CHANNEL_DMA | ||
175 | * 006e: NV10_CHANNEL_DMA | ||
176 | * 176e: NV17_CHANNEL_DMA | ||
177 | * 406e: NV40_CHANNEL_DMA | ||
178 | * 506e: NV50_CHANNEL_DMA | ||
179 | * 826e: NV84_CHANNEL_DMA | ||
180 | */ | ||
181 | #define NV03_CHANNEL_DMA_CLASS 0x0000006b | ||
182 | #define NV10_CHANNEL_DMA_CLASS 0x0000006e | ||
183 | #define NV17_CHANNEL_DMA_CLASS 0x0000176e | ||
184 | #define NV40_CHANNEL_DMA_CLASS 0x0000406e | ||
185 | #define NV50_CHANNEL_DMA_CLASS 0x0000506e | ||
186 | #define NV84_CHANNEL_DMA_CLASS 0x0000826e | ||
187 | |||
188 | struct nv03_channel_dma_class { | ||
189 | u32 pushbuf; | ||
190 | u32 pad0; | ||
191 | u64 offset; | ||
192 | }; | ||
193 | |||
194 | /* Indirect FIFO channel classes | ||
195 | * | ||
196 | * 506f: NV50_CHANNEL_IND | ||
197 | * 826f: NV84_CHANNEL_IND | ||
198 | * 906f: NVC0_CHANNEL_IND | ||
199 | * a06f: NVE0_CHANNEL_IND | ||
200 | */ | ||
201 | |||
202 | #define NV50_CHANNEL_IND_CLASS 0x0000506f | ||
203 | #define NV84_CHANNEL_IND_CLASS 0x0000826f | ||
204 | #define NVC0_CHANNEL_IND_CLASS 0x0000906f | ||
205 | #define NVE0_CHANNEL_IND_CLASS 0x0000a06f | ||
206 | |||
207 | struct nv50_channel_ind_class { | ||
208 | u32 pushbuf; | ||
209 | u32 ilength; | ||
210 | u64 ioffset; | ||
211 | }; | ||
212 | |||
213 | #define NVE0_CHANNEL_IND_ENGINE_GR 0x00000001 | ||
214 | #define NVE0_CHANNEL_IND_ENGINE_VP 0x00000002 | ||
215 | #define NVE0_CHANNEL_IND_ENGINE_PPP 0x00000004 | ||
216 | #define NVE0_CHANNEL_IND_ENGINE_BSP 0x00000008 | ||
217 | #define NVE0_CHANNEL_IND_ENGINE_CE0 0x00000010 | ||
218 | #define NVE0_CHANNEL_IND_ENGINE_CE1 0x00000020 | ||
219 | #define NVE0_CHANNEL_IND_ENGINE_ENC 0x00000040 | ||
220 | |||
221 | struct nve0_channel_ind_class { | ||
222 | u32 pushbuf; | ||
223 | u32 ilength; | ||
224 | u64 ioffset; | ||
225 | u32 engine; | ||
226 | }; | ||
227 | |||
228 | /* 0046: NV04_DISP | ||
229 | */ | ||
230 | |||
231 | #define NV04_DISP_CLASS 0x00000046 | ||
232 | |||
233 | #define NV04_DISP_MTHD 0x00000000 | ||
234 | #define NV04_DISP_MTHD_HEAD 0x00000001 | ||
235 | |||
236 | #define NV04_DISP_SCANOUTPOS 0x00000000 | ||
237 | |||
238 | struct nv04_display_class { | ||
239 | }; | ||
240 | |||
241 | struct nv04_display_scanoutpos { | ||
242 | s64 time[2]; | ||
243 | u32 vblanks; | ||
244 | u32 vblanke; | ||
245 | u32 vtotal; | ||
246 | u32 vline; | ||
247 | u32 hblanks; | ||
248 | u32 hblanke; | ||
249 | u32 htotal; | ||
250 | u32 hline; | ||
251 | }; | ||
252 | |||
253 | /* 5070: NV50_DISP | ||
254 | * 8270: NV84_DISP | ||
255 | * 8370: NVA0_DISP | ||
256 | * 8870: NV94_DISP | ||
257 | * 8570: NVA3_DISP | ||
258 | * 9070: NVD0_DISP | ||
259 | * 9170: NVE0_DISP | ||
260 | * 9270: NVF0_DISP | ||
261 | * 9470: GM107_DISP | ||
262 | */ | ||
263 | |||
264 | #define NV50_DISP_CLASS 0x00005070 | ||
265 | #define NV84_DISP_CLASS 0x00008270 | ||
266 | #define NVA0_DISP_CLASS 0x00008370 | ||
267 | #define NV94_DISP_CLASS 0x00008870 | ||
268 | #define NVA3_DISP_CLASS 0x00008570 | ||
269 | #define NVD0_DISP_CLASS 0x00009070 | ||
270 | #define NVE0_DISP_CLASS 0x00009170 | ||
271 | #define NVF0_DISP_CLASS 0x00009270 | ||
272 | #define GM107_DISP_CLASS 0x00009470 | ||
273 | |||
274 | #define NV50_DISP_MTHD 0x00000000 | ||
275 | #define NV50_DISP_MTHD_HEAD 0x00000003 | ||
276 | |||
277 | #define NV50_DISP_SCANOUTPOS 0x00000000 | ||
278 | |||
279 | #define NV50_DISP_SOR_MTHD 0x00010000 | ||
280 | #define NV50_DISP_SOR_MTHD_TYPE 0x0000f000 | ||
281 | #define NV50_DISP_SOR_MTHD_HEAD 0x00000018 | ||
282 | #define NV50_DISP_SOR_MTHD_LINK 0x00000004 | ||
283 | #define NV50_DISP_SOR_MTHD_OR 0x00000003 | ||
284 | |||
285 | #define NV50_DISP_SOR_PWR 0x00010000 | ||
286 | #define NV50_DISP_SOR_PWR_STATE 0x00000001 | ||
287 | #define NV50_DISP_SOR_PWR_STATE_ON 0x00000001 | ||
288 | #define NV50_DISP_SOR_PWR_STATE_OFF 0x00000000 | ||
289 | #define NVA3_DISP_SOR_HDA_ELD 0x00010100 | ||
290 | #define NV84_DISP_SOR_HDMI_PWR 0x00012000 | ||
291 | #define NV84_DISP_SOR_HDMI_PWR_STATE 0x40000000 | ||
292 | #define NV84_DISP_SOR_HDMI_PWR_STATE_OFF 0x00000000 | ||
293 | #define NV84_DISP_SOR_HDMI_PWR_STATE_ON 0x40000000 | ||
294 | #define NV84_DISP_SOR_HDMI_PWR_MAX_AC_PACKET 0x001f0000 | ||
295 | #define NV84_DISP_SOR_HDMI_PWR_REKEY 0x0000007f | ||
296 | #define NV50_DISP_SOR_LVDS_SCRIPT 0x00013000 | ||
297 | #define NV50_DISP_SOR_LVDS_SCRIPT_ID 0x0000ffff | ||
298 | #define NV94_DISP_SOR_DP_PWR 0x00016000 | ||
299 | #define NV94_DISP_SOR_DP_PWR_STATE 0x00000001 | ||
300 | #define NV94_DISP_SOR_DP_PWR_STATE_OFF 0x00000000 | ||
301 | #define NV94_DISP_SOR_DP_PWR_STATE_ON 0x00000001 | ||
302 | |||
303 | #define NV50_DISP_DAC_MTHD 0x00020000 | ||
304 | #define NV50_DISP_DAC_MTHD_TYPE 0x0000f000 | ||
305 | #define NV50_DISP_DAC_MTHD_OR 0x00000003 | ||
306 | |||
307 | #define NV50_DISP_DAC_PWR 0x00020000 | ||
308 | #define NV50_DISP_DAC_PWR_HSYNC 0x00000001 | ||
309 | #define NV50_DISP_DAC_PWR_HSYNC_ON 0x00000000 | ||
310 | #define NV50_DISP_DAC_PWR_HSYNC_LO 0x00000001 | ||
311 | #define NV50_DISP_DAC_PWR_VSYNC 0x00000004 | ||
312 | #define NV50_DISP_DAC_PWR_VSYNC_ON 0x00000000 | ||
313 | #define NV50_DISP_DAC_PWR_VSYNC_LO 0x00000004 | ||
314 | #define NV50_DISP_DAC_PWR_DATA 0x00000010 | ||
315 | #define NV50_DISP_DAC_PWR_DATA_ON 0x00000000 | ||
316 | #define NV50_DISP_DAC_PWR_DATA_LO 0x00000010 | ||
317 | #define NV50_DISP_DAC_PWR_STATE 0x00000040 | ||
318 | #define NV50_DISP_DAC_PWR_STATE_ON 0x00000000 | ||
319 | #define NV50_DISP_DAC_PWR_STATE_OFF 0x00000040 | ||
320 | #define NV50_DISP_DAC_LOAD 0x00020100 | ||
321 | #define NV50_DISP_DAC_LOAD_VALUE 0x00000007 | ||
322 | |||
323 | #define NV50_DISP_PIOR_MTHD 0x00030000 | ||
324 | #define NV50_DISP_PIOR_MTHD_TYPE 0x0000f000 | ||
325 | #define NV50_DISP_PIOR_MTHD_OR 0x00000003 | ||
326 | |||
327 | #define NV50_DISP_PIOR_PWR 0x00030000 | ||
328 | #define NV50_DISP_PIOR_PWR_STATE 0x00000001 | ||
329 | #define NV50_DISP_PIOR_PWR_STATE_ON 0x00000001 | ||
330 | #define NV50_DISP_PIOR_PWR_STATE_OFF 0x00000000 | ||
331 | #define NV50_DISP_PIOR_TMDS_PWR 0x00032000 | ||
332 | #define NV50_DISP_PIOR_TMDS_PWR_STATE 0x00000001 | ||
333 | #define NV50_DISP_PIOR_TMDS_PWR_STATE_ON 0x00000001 | ||
334 | #define NV50_DISP_PIOR_TMDS_PWR_STATE_OFF 0x00000000 | ||
335 | #define NV50_DISP_PIOR_DP_PWR 0x00036000 | ||
336 | #define NV50_DISP_PIOR_DP_PWR_STATE 0x00000001 | ||
337 | #define NV50_DISP_PIOR_DP_PWR_STATE_ON 0x00000001 | ||
338 | #define NV50_DISP_PIOR_DP_PWR_STATE_OFF 0x00000000 | ||
339 | |||
340 | struct nv50_display_class { | ||
341 | }; | ||
342 | |||
343 | /* 507a: NV50_DISP_CURS | ||
344 | * 827a: NV84_DISP_CURS | ||
345 | * 837a: NVA0_DISP_CURS | ||
346 | * 887a: NV94_DISP_CURS | ||
347 | * 857a: NVA3_DISP_CURS | ||
348 | * 907a: NVD0_DISP_CURS | ||
349 | * 917a: NVE0_DISP_CURS | ||
350 | * 927a: NVF0_DISP_CURS | ||
351 | * 947a: GM107_DISP_CURS | ||
352 | */ | ||
353 | |||
354 | #define NV50_DISP_CURS_CLASS 0x0000507a | ||
355 | #define NV84_DISP_CURS_CLASS 0x0000827a | ||
356 | #define NVA0_DISP_CURS_CLASS 0x0000837a | ||
357 | #define NV94_DISP_CURS_CLASS 0x0000887a | ||
358 | #define NVA3_DISP_CURS_CLASS 0x0000857a | ||
359 | #define NVD0_DISP_CURS_CLASS 0x0000907a | ||
360 | #define NVE0_DISP_CURS_CLASS 0x0000917a | ||
361 | #define NVF0_DISP_CURS_CLASS 0x0000927a | ||
362 | #define GM107_DISP_CURS_CLASS 0x0000947a | ||
363 | |||
364 | struct nv50_display_curs_class { | ||
365 | u32 head; | ||
366 | }; | ||
367 | |||
368 | /* 507b: NV50_DISP_OIMM | ||
369 | * 827b: NV84_DISP_OIMM | ||
370 | * 837b: NVA0_DISP_OIMM | ||
371 | * 887b: NV94_DISP_OIMM | ||
372 | * 857b: NVA3_DISP_OIMM | ||
373 | * 907b: NVD0_DISP_OIMM | ||
374 | * 917b: NVE0_DISP_OIMM | ||
375 | * 927b: NVE0_DISP_OIMM | ||
376 | * 947b: GM107_DISP_OIMM | ||
377 | */ | ||
378 | |||
379 | #define NV50_DISP_OIMM_CLASS 0x0000507b | ||
380 | #define NV84_DISP_OIMM_CLASS 0x0000827b | ||
381 | #define NVA0_DISP_OIMM_CLASS 0x0000837b | ||
382 | #define NV94_DISP_OIMM_CLASS 0x0000887b | ||
383 | #define NVA3_DISP_OIMM_CLASS 0x0000857b | ||
384 | #define NVD0_DISP_OIMM_CLASS 0x0000907b | ||
385 | #define NVE0_DISP_OIMM_CLASS 0x0000917b | ||
386 | #define NVF0_DISP_OIMM_CLASS 0x0000927b | ||
387 | #define GM107_DISP_OIMM_CLASS 0x0000947b | ||
388 | |||
389 | struct nv50_display_oimm_class { | ||
390 | u32 head; | ||
391 | }; | ||
392 | |||
393 | /* 507c: NV50_DISP_SYNC | ||
394 | * 827c: NV84_DISP_SYNC | ||
395 | * 837c: NVA0_DISP_SYNC | ||
396 | * 887c: NV94_DISP_SYNC | ||
397 | * 857c: NVA3_DISP_SYNC | ||
398 | * 907c: NVD0_DISP_SYNC | ||
399 | * 917c: NVE0_DISP_SYNC | ||
400 | * 927c: NVF0_DISP_SYNC | ||
401 | * 947c: GM107_DISP_SYNC | ||
402 | */ | ||
403 | |||
404 | #define NV50_DISP_SYNC_CLASS 0x0000507c | ||
405 | #define NV84_DISP_SYNC_CLASS 0x0000827c | ||
406 | #define NVA0_DISP_SYNC_CLASS 0x0000837c | ||
407 | #define NV94_DISP_SYNC_CLASS 0x0000887c | ||
408 | #define NVA3_DISP_SYNC_CLASS 0x0000857c | ||
409 | #define NVD0_DISP_SYNC_CLASS 0x0000907c | ||
410 | #define NVE0_DISP_SYNC_CLASS 0x0000917c | ||
411 | #define NVF0_DISP_SYNC_CLASS 0x0000927c | ||
412 | #define GM107_DISP_SYNC_CLASS 0x0000947c | ||
413 | |||
414 | struct nv50_display_sync_class { | ||
415 | u32 pushbuf; | ||
416 | u32 head; | ||
417 | }; | ||
418 | |||
419 | /* 507d: NV50_DISP_MAST | ||
420 | * 827d: NV84_DISP_MAST | ||
421 | * 837d: NVA0_DISP_MAST | ||
422 | * 887d: NV94_DISP_MAST | ||
423 | * 857d: NVA3_DISP_MAST | ||
424 | * 907d: NVD0_DISP_MAST | ||
425 | * 917d: NVE0_DISP_MAST | ||
426 | * 927d: NVF0_DISP_MAST | ||
427 | * 947d: GM107_DISP_MAST | ||
428 | */ | ||
429 | |||
430 | #define NV50_DISP_MAST_CLASS 0x0000507d | ||
431 | #define NV84_DISP_MAST_CLASS 0x0000827d | ||
432 | #define NVA0_DISP_MAST_CLASS 0x0000837d | ||
433 | #define NV94_DISP_MAST_CLASS 0x0000887d | ||
434 | #define NVA3_DISP_MAST_CLASS 0x0000857d | ||
435 | #define NVD0_DISP_MAST_CLASS 0x0000907d | ||
436 | #define NVE0_DISP_MAST_CLASS 0x0000917d | ||
437 | #define NVF0_DISP_MAST_CLASS 0x0000927d | ||
438 | #define GM107_DISP_MAST_CLASS 0x0000947d | ||
439 | |||
440 | struct nv50_display_mast_class { | ||
441 | u32 pushbuf; | ||
442 | }; | ||
443 | |||
444 | /* 507e: NV50_DISP_OVLY | ||
445 | * 827e: NV84_DISP_OVLY | ||
446 | * 837e: NVA0_DISP_OVLY | ||
447 | * 887e: NV94_DISP_OVLY | ||
448 | * 857e: NVA3_DISP_OVLY | ||
449 | * 907e: NVD0_DISP_OVLY | ||
450 | * 917e: NVE0_DISP_OVLY | ||
451 | * 927e: NVF0_DISP_OVLY | ||
452 | * 947e: GM107_DISP_OVLY | ||
453 | */ | ||
454 | |||
455 | #define NV50_DISP_OVLY_CLASS 0x0000507e | ||
456 | #define NV84_DISP_OVLY_CLASS 0x0000827e | ||
457 | #define NVA0_DISP_OVLY_CLASS 0x0000837e | ||
458 | #define NV94_DISP_OVLY_CLASS 0x0000887e | ||
459 | #define NVA3_DISP_OVLY_CLASS 0x0000857e | ||
460 | #define NVD0_DISP_OVLY_CLASS 0x0000907e | ||
461 | #define NVE0_DISP_OVLY_CLASS 0x0000917e | ||
462 | #define NVF0_DISP_OVLY_CLASS 0x0000927e | ||
463 | #define GM107_DISP_OVLY_CLASS 0x0000947e | ||
464 | |||
465 | struct nv50_display_ovly_class { | ||
466 | u32 pushbuf; | ||
467 | u32 head; | ||
468 | }; | ||
469 | |||
470 | #endif | ||
diff --git a/drivers/gpu/drm/nouveau/core/include/core/client.h b/drivers/gpu/drm/nouveau/core/include/core/client.h index c66eac513803..4fc6ab12382d 100644 --- a/drivers/gpu/drm/nouveau/core/include/core/client.h +++ b/drivers/gpu/drm/nouveau/core/include/core/client.h | |||
@@ -10,6 +10,11 @@ struct nouveau_client { | |||
10 | char name[32]; | 10 | char name[32]; |
11 | u32 debug; | 11 | u32 debug; |
12 | struct nouveau_vm *vm; | 12 | struct nouveau_vm *vm; |
13 | bool super; | ||
14 | void *data; | ||
15 | |||
16 | int (*ntfy)(const void *, u32, const void *, u32); | ||
17 | struct nvkm_client_notify *notify[8]; | ||
13 | }; | 18 | }; |
14 | 19 | ||
15 | static inline struct nouveau_client * | 20 | static inline struct nouveau_client * |
@@ -43,4 +48,10 @@ int nouveau_client_init(struct nouveau_client *); | |||
43 | int nouveau_client_fini(struct nouveau_client *, bool suspend); | 48 | int nouveau_client_fini(struct nouveau_client *, bool suspend); |
44 | const char *nouveau_client_name(void *obj); | 49 | const char *nouveau_client_name(void *obj); |
45 | 50 | ||
51 | int nvkm_client_notify_new(struct nouveau_client *, struct nvkm_event *, | ||
52 | void *data, u32 size); | ||
53 | int nvkm_client_notify_del(struct nouveau_client *, int index); | ||
54 | int nvkm_client_notify_get(struct nouveau_client *, int index); | ||
55 | int nvkm_client_notify_put(struct nouveau_client *, int index); | ||
56 | |||
46 | #endif | 57 | #endif |
diff --git a/drivers/gpu/drm/nouveau/core/include/core/device.h b/drivers/gpu/drm/nouveau/core/include/core/device.h index a8a9a9cf16cb..8743766454a5 100644 --- a/drivers/gpu/drm/nouveau/core/include/core/device.h +++ b/drivers/gpu/drm/nouveau/core/include/core/device.h | |||
@@ -4,6 +4,7 @@ | |||
4 | #include <core/object.h> | 4 | #include <core/object.h> |
5 | #include <core/subdev.h> | 5 | #include <core/subdev.h> |
6 | #include <core/engine.h> | 6 | #include <core/engine.h> |
7 | #include <core/event.h> | ||
7 | 8 | ||
8 | enum nv_subdev_type { | 9 | enum nv_subdev_type { |
9 | NVDEV_ENGINE_DEVICE, | 10 | NVDEV_ENGINE_DEVICE, |
@@ -28,7 +29,7 @@ enum nv_subdev_type { | |||
28 | NVDEV_SUBDEV_BUS, | 29 | NVDEV_SUBDEV_BUS, |
29 | NVDEV_SUBDEV_TIMER, | 30 | NVDEV_SUBDEV_TIMER, |
30 | NVDEV_SUBDEV_FB, | 31 | NVDEV_SUBDEV_FB, |
31 | NVDEV_SUBDEV_LTCG, | 32 | NVDEV_SUBDEV_LTC, |
32 | NVDEV_SUBDEV_IBUS, | 33 | NVDEV_SUBDEV_IBUS, |
33 | NVDEV_SUBDEV_INSTMEM, | 34 | NVDEV_SUBDEV_INSTMEM, |
34 | NVDEV_SUBDEV_VM, | 35 | NVDEV_SUBDEV_VM, |
@@ -69,6 +70,8 @@ struct nouveau_device { | |||
69 | struct platform_device *platformdev; | 70 | struct platform_device *platformdev; |
70 | u64 handle; | 71 | u64 handle; |
71 | 72 | ||
73 | struct nvkm_event event; | ||
74 | |||
72 | const char *cfgopt; | 75 | const char *cfgopt; |
73 | const char *dbgopt; | 76 | const char *dbgopt; |
74 | const char *name; | 77 | const char *name; |
@@ -84,7 +87,6 @@ struct nouveau_device { | |||
84 | NV_40 = 0x40, | 87 | NV_40 = 0x40, |
85 | NV_50 = 0x50, | 88 | NV_50 = 0x50, |
86 | NV_C0 = 0xc0, | 89 | NV_C0 = 0xc0, |
87 | NV_D0 = 0xd0, | ||
88 | NV_E0 = 0xe0, | 90 | NV_E0 = 0xe0, |
89 | GM100 = 0x110, | 91 | GM100 = 0x110, |
90 | } card_type; | 92 | } card_type; |
@@ -93,8 +95,14 @@ struct nouveau_device { | |||
93 | 95 | ||
94 | struct nouveau_oclass *oclass[NVDEV_SUBDEV_NR]; | 96 | struct nouveau_oclass *oclass[NVDEV_SUBDEV_NR]; |
95 | struct nouveau_object *subdev[NVDEV_SUBDEV_NR]; | 97 | struct nouveau_object *subdev[NVDEV_SUBDEV_NR]; |
98 | |||
99 | struct { | ||
100 | struct notifier_block nb; | ||
101 | } acpi; | ||
96 | }; | 102 | }; |
97 | 103 | ||
104 | int nouveau_device_list(u64 *name, int size); | ||
105 | |||
98 | static inline struct nouveau_device * | 106 | static inline struct nouveau_device * |
99 | nv_device(void *obj) | 107 | nv_device(void *obj) |
100 | { | 108 | { |
@@ -162,12 +170,6 @@ nv_device_resource_start(struct nouveau_device *device, unsigned int bar); | |||
162 | resource_size_t | 170 | resource_size_t |
163 | nv_device_resource_len(struct nouveau_device *device, unsigned int bar); | 171 | nv_device_resource_len(struct nouveau_device *device, unsigned int bar); |
164 | 172 | ||
165 | dma_addr_t | ||
166 | nv_device_map_page(struct nouveau_device *device, struct page *page); | ||
167 | |||
168 | void | ||
169 | nv_device_unmap_page(struct nouveau_device *device, dma_addr_t addr); | ||
170 | |||
171 | int | 173 | int |
172 | nv_device_get_irq(struct nouveau_device *device, bool stall); | 174 | nv_device_get_irq(struct nouveau_device *device, bool stall); |
173 | 175 | ||
diff --git a/drivers/gpu/drm/nouveau/core/include/core/event.h b/drivers/gpu/drm/nouveau/core/include/core/event.h index ba3f1a76a815..51e55d03330a 100644 --- a/drivers/gpu/drm/nouveau/core/include/core/event.h +++ b/drivers/gpu/drm/nouveau/core/include/core/event.h | |||
@@ -1,47 +1,34 @@ | |||
1 | #ifndef __NVKM_EVENT_H__ | 1 | #ifndef __NVKM_EVENT_H__ |
2 | #define __NVKM_EVENT_H__ | 2 | #define __NVKM_EVENT_H__ |
3 | 3 | ||
4 | /* return codes from event handlers */ | 4 | #include <core/notify.h> |
5 | #define NVKM_EVENT_DROP 0 | ||
6 | #define NVKM_EVENT_KEEP 1 | ||
7 | 5 | ||
8 | /* nouveau_eventh.flags bit #s */ | 6 | struct nvkm_event_func { |
9 | #define NVKM_EVENT_ENABLE 0 | 7 | int (*ctor)(void *data, u32 size, struct nvkm_notify *); |
10 | 8 | void (*send)(void *data, u32 size, struct nvkm_notify *); | |
11 | struct nouveau_eventh { | 9 | void (*init)(struct nvkm_event *, int type, int index); |
12 | struct nouveau_event *event; | 10 | void (*fini)(struct nvkm_event *, int type, int index); |
13 | struct list_head head; | ||
14 | unsigned long flags; | ||
15 | u32 types; | ||
16 | int index; | ||
17 | int (*func)(void *, u32, int); | ||
18 | void *priv; | ||
19 | }; | 11 | }; |
20 | 12 | ||
21 | struct nouveau_event { | 13 | struct nvkm_event { |
22 | void *priv; | 14 | const struct nvkm_event_func *func; |
23 | int (*check)(struct nouveau_event *, u32 type, int index); | ||
24 | void (*enable)(struct nouveau_event *, int type, int index); | ||
25 | void (*disable)(struct nouveau_event *, int type, int index); | ||
26 | 15 | ||
27 | int types_nr; | 16 | int types_nr; |
28 | int index_nr; | 17 | int index_nr; |
29 | 18 | ||
30 | spinlock_t list_lock; | ||
31 | struct list_head *list; | ||
32 | spinlock_t refs_lock; | 19 | spinlock_t refs_lock; |
33 | int refs[]; | 20 | spinlock_t list_lock; |
21 | struct list_head list; | ||
22 | int *refs; | ||
34 | }; | 23 | }; |
35 | 24 | ||
36 | int nouveau_event_create(int types_nr, int index_nr, struct nouveau_event **); | 25 | int nvkm_event_init(const struct nvkm_event_func *func, |
37 | void nouveau_event_destroy(struct nouveau_event **); | 26 | int types_nr, int index_nr, |
38 | void nouveau_event_trigger(struct nouveau_event *, u32 types, int index); | 27 | struct nvkm_event *); |
39 | 28 | void nvkm_event_fini(struct nvkm_event *); | |
40 | int nouveau_event_new(struct nouveau_event *, u32 types, int index, | 29 | void nvkm_event_get(struct nvkm_event *, u32 types, int index); |
41 | int (*func)(void *, u32, int), void *, | 30 | void nvkm_event_put(struct nvkm_event *, u32 types, int index); |
42 | struct nouveau_eventh **); | 31 | void nvkm_event_send(struct nvkm_event *, u32 types, int index, |
43 | void nouveau_event_ref(struct nouveau_eventh *, struct nouveau_eventh **); | 32 | void *data, u32 size); |
44 | void nouveau_event_get(struct nouveau_eventh *); | ||
45 | void nouveau_event_put(struct nouveau_eventh *); | ||
46 | 33 | ||
47 | #endif | 34 | #endif |
diff --git a/drivers/gpu/drm/nouveau/core/include/core/handle.h b/drivers/gpu/drm/nouveau/core/include/core/handle.h index 363674cdf8ab..ceb67d770875 100644 --- a/drivers/gpu/drm/nouveau/core/include/core/handle.h +++ b/drivers/gpu/drm/nouveau/core/include/core/handle.h | |||
@@ -10,6 +10,9 @@ struct nouveau_handle { | |||
10 | u32 name; | 10 | u32 name; |
11 | u32 priv; | 11 | u32 priv; |
12 | 12 | ||
13 | u8 route; | ||
14 | u64 token; | ||
15 | |||
13 | struct nouveau_handle *parent; | 16 | struct nouveau_handle *parent; |
14 | struct nouveau_object *object; | 17 | struct nouveau_object *object; |
15 | }; | 18 | }; |
@@ -20,6 +23,11 @@ void nouveau_handle_destroy(struct nouveau_handle *); | |||
20 | int nouveau_handle_init(struct nouveau_handle *); | 23 | int nouveau_handle_init(struct nouveau_handle *); |
21 | int nouveau_handle_fini(struct nouveau_handle *, bool suspend); | 24 | int nouveau_handle_fini(struct nouveau_handle *, bool suspend); |
22 | 25 | ||
26 | int nouveau_handle_new(struct nouveau_object *, u32 parent, u32 handle, | ||
27 | u16 oclass, void *data, u32 size, | ||
28 | struct nouveau_object **); | ||
29 | int nouveau_handle_del(struct nouveau_object *, u32 parent, u32 handle); | ||
30 | |||
23 | struct nouveau_object * | 31 | struct nouveau_object * |
24 | nouveau_handle_ref(struct nouveau_object *, u32 name); | 32 | nouveau_handle_ref(struct nouveau_object *, u32 name); |
25 | 33 | ||
diff --git a/drivers/gpu/drm/nouveau/core/include/core/ioctl.h b/drivers/gpu/drm/nouveau/core/include/core/ioctl.h new file mode 100644 index 000000000000..ac7935c2474e --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/include/core/ioctl.h | |||
@@ -0,0 +1,6 @@ | |||
1 | #ifndef __NVKM_IOCTL_H__ | ||
2 | #define __NVKM_IOCTL_H__ | ||
3 | |||
4 | int nvkm_ioctl(struct nouveau_client *, bool, void *, u32, void **); | ||
5 | |||
6 | #endif | ||
diff --git a/drivers/gpu/drm/nouveau/core/include/core/notify.h b/drivers/gpu/drm/nouveau/core/include/core/notify.h new file mode 100644 index 000000000000..1262d8f020f3 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/include/core/notify.h | |||
@@ -0,0 +1,36 @@ | |||
1 | #ifndef __NVKM_NOTIFY_H__ | ||
2 | #define __NVKM_NOTIFY_H__ | ||
3 | |||
4 | struct nvkm_notify { | ||
5 | struct nvkm_event *event; | ||
6 | struct list_head head; | ||
7 | #define NVKM_NOTIFY_USER 0 | ||
8 | #define NVKM_NOTIFY_WORK 1 | ||
9 | unsigned long flags; | ||
10 | int block; | ||
11 | #define NVKM_NOTIFY_DROP 0 | ||
12 | #define NVKM_NOTIFY_KEEP 1 | ||
13 | int (*func)(struct nvkm_notify *); | ||
14 | |||
15 | /* set by nvkm_event ctor */ | ||
16 | u32 types; | ||
17 | int index; | ||
18 | u32 size; | ||
19 | |||
20 | struct work_struct work; | ||
21 | /* this is const for a *very* good reason - the data might be on the | ||
22 | * stack from an irq handler. if you're not core/notify.c then you | ||
23 | * should probably think twice before casting it away... | ||
24 | */ | ||
25 | const void *data; | ||
26 | }; | ||
27 | |||
28 | int nvkm_notify_init(struct nvkm_event *, int (*func)(struct nvkm_notify *), | ||
29 | bool work, void *data, u32 size, u32 reply, | ||
30 | struct nvkm_notify *); | ||
31 | void nvkm_notify_fini(struct nvkm_notify *); | ||
32 | void nvkm_notify_get(struct nvkm_notify *); | ||
33 | void nvkm_notify_put(struct nvkm_notify *); | ||
34 | void nvkm_notify_send(struct nvkm_notify *, void *data, u32 size); | ||
35 | |||
36 | #endif | ||
diff --git a/drivers/gpu/drm/nouveau/core/include/core/object.h b/drivers/gpu/drm/nouveau/core/include/core/object.h index 62e68baef087..d7039482d6fd 100644 --- a/drivers/gpu/drm/nouveau/core/include/core/object.h +++ b/drivers/gpu/drm/nouveau/core/include/core/object.h | |||
@@ -48,6 +48,10 @@ void nouveau_object_destroy(struct nouveau_object *); | |||
48 | int nouveau_object_init(struct nouveau_object *); | 48 | int nouveau_object_init(struct nouveau_object *); |
49 | int nouveau_object_fini(struct nouveau_object *, bool suspend); | 49 | int nouveau_object_fini(struct nouveau_object *, bool suspend); |
50 | 50 | ||
51 | int _nouveau_object_ctor(struct nouveau_object *, struct nouveau_object *, | ||
52 | struct nouveau_oclass *, void *, u32, | ||
53 | struct nouveau_object **); | ||
54 | |||
51 | extern struct nouveau_ofuncs nouveau_object_ofuncs; | 55 | extern struct nouveau_ofuncs nouveau_object_ofuncs; |
52 | 56 | ||
53 | /* Don't allocate dynamically, because lockdep needs lock_class_keys to be in | 57 | /* Don't allocate dynamically, because lockdep needs lock_class_keys to be in |
@@ -78,6 +82,7 @@ struct nouveau_omthds { | |||
78 | int (*call)(struct nouveau_object *, u32, void *, u32); | 82 | int (*call)(struct nouveau_object *, u32, void *, u32); |
79 | }; | 83 | }; |
80 | 84 | ||
85 | struct nvkm_event; | ||
81 | struct nouveau_ofuncs { | 86 | struct nouveau_ofuncs { |
82 | int (*ctor)(struct nouveau_object *, struct nouveau_object *, | 87 | int (*ctor)(struct nouveau_object *, struct nouveau_object *, |
83 | struct nouveau_oclass *, void *data, u32 size, | 88 | struct nouveau_oclass *, void *data, u32 size, |
@@ -85,6 +90,9 @@ struct nouveau_ofuncs { | |||
85 | void (*dtor)(struct nouveau_object *); | 90 | void (*dtor)(struct nouveau_object *); |
86 | int (*init)(struct nouveau_object *); | 91 | int (*init)(struct nouveau_object *); |
87 | int (*fini)(struct nouveau_object *, bool suspend); | 92 | int (*fini)(struct nouveau_object *, bool suspend); |
93 | int (*mthd)(struct nouveau_object *, u32, void *, u32); | ||
94 | int (*ntfy)(struct nouveau_object *, u32, struct nvkm_event **); | ||
95 | int (* map)(struct nouveau_object *, u64 *, u32 *); | ||
88 | u8 (*rd08)(struct nouveau_object *, u64 offset); | 96 | u8 (*rd08)(struct nouveau_object *, u64 offset); |
89 | u16 (*rd16)(struct nouveau_object *, u64 offset); | 97 | u16 (*rd16)(struct nouveau_object *, u64 offset); |
90 | u32 (*rd32)(struct nouveau_object *, u64 offset); | 98 | u32 (*rd32)(struct nouveau_object *, u64 offset); |
@@ -106,10 +114,6 @@ void nouveau_object_ref(struct nouveau_object *, struct nouveau_object **); | |||
106 | int nouveau_object_inc(struct nouveau_object *); | 114 | int nouveau_object_inc(struct nouveau_object *); |
107 | int nouveau_object_dec(struct nouveau_object *, bool suspend); | 115 | int nouveau_object_dec(struct nouveau_object *, bool suspend); |
108 | 116 | ||
109 | int nouveau_object_new(struct nouveau_object *, u32 parent, u32 handle, | ||
110 | u16 oclass, void *data, u32 size, | ||
111 | struct nouveau_object **); | ||
112 | int nouveau_object_del(struct nouveau_object *, u32 parent, u32 handle); | ||
113 | void nouveau_object_debug(void); | 117 | void nouveau_object_debug(void); |
114 | 118 | ||
115 | static inline int | 119 | static inline int |
@@ -199,4 +203,21 @@ nv_memcmp(void *obj, u32 addr, const char *str, u32 len) | |||
199 | return 0; | 203 | return 0; |
200 | } | 204 | } |
201 | 205 | ||
206 | #include <core/handle.h> | ||
207 | |||
208 | static inline int | ||
209 | nouveau_object_new(struct nouveau_object *client, u32 parent, u32 handle, | ||
210 | u16 oclass, void *data, u32 size, | ||
211 | struct nouveau_object **pobject) | ||
212 | { | ||
213 | return nouveau_handle_new(client, parent, handle, oclass, | ||
214 | data, size, pobject); | ||
215 | } | ||
216 | |||
217 | static inline int | ||
218 | nouveau_object_del(struct nouveau_object *client, u32 parent, u32 handle) | ||
219 | { | ||
220 | return nouveau_handle_del(client, parent, handle); | ||
221 | } | ||
222 | |||
202 | #endif | 223 | #endif |
diff --git a/drivers/gpu/drm/nouveau/core/include/core/parent.h b/drivers/gpu/drm/nouveau/core/include/core/parent.h index 9f5ea900ff00..12da418ec70a 100644 --- a/drivers/gpu/drm/nouveau/core/include/core/parent.h +++ b/drivers/gpu/drm/nouveau/core/include/core/parent.h | |||
@@ -57,5 +57,6 @@ void _nouveau_parent_dtor(struct nouveau_object *); | |||
57 | int nouveau_parent_sclass(struct nouveau_object *, u16 handle, | 57 | int nouveau_parent_sclass(struct nouveau_object *, u16 handle, |
58 | struct nouveau_object **pengine, | 58 | struct nouveau_object **pengine, |
59 | struct nouveau_oclass **poclass); | 59 | struct nouveau_oclass **poclass); |
60 | int nouveau_parent_lclass(struct nouveau_object *, u32 *, int); | ||
60 | 61 | ||
61 | #endif | 62 | #endif |
diff --git a/drivers/gpu/drm/nouveau/core/include/core/printk.h b/drivers/gpu/drm/nouveau/core/include/core/printk.h index 0f9a37bd32b0..451b6ed20b7e 100644 --- a/drivers/gpu/drm/nouveau/core/include/core/printk.h +++ b/drivers/gpu/drm/nouveau/core/include/core/printk.h | |||
@@ -21,6 +21,7 @@ nv_printk_(struct nouveau_object *, int, const char *, ...); | |||
21 | #define nv_debug(o,f,a...) nv_printk((o), DEBUG, f, ##a) | 21 | #define nv_debug(o,f,a...) nv_printk((o), DEBUG, f, ##a) |
22 | #define nv_trace(o,f,a...) nv_printk((o), TRACE, f, ##a) | 22 | #define nv_trace(o,f,a...) nv_printk((o), TRACE, f, ##a) |
23 | #define nv_spam(o,f,a...) nv_printk((o), SPAM, f, ##a) | 23 | #define nv_spam(o,f,a...) nv_printk((o), SPAM, f, ##a) |
24 | #define nv_ioctl(o,f,a...) nv_trace(nouveau_client(o), "ioctl: "f, ##a) | ||
24 | 25 | ||
25 | #define nv_assert(f,a...) do { \ | 26 | #define nv_assert(f,a...) do { \ |
26 | if (NV_DBG_FATAL <= CONFIG_NOUVEAU_DEBUG) \ | 27 | if (NV_DBG_FATAL <= CONFIG_NOUVEAU_DEBUG) \ |
diff --git a/drivers/gpu/drm/nouveau/core/include/engine/disp.h b/drivers/gpu/drm/nouveau/core/include/engine/disp.h index fde842896806..7a64f347b385 100644 --- a/drivers/gpu/drm/nouveau/core/include/engine/disp.h +++ b/drivers/gpu/drm/nouveau/core/include/engine/disp.h | |||
@@ -6,20 +6,13 @@ | |||
6 | #include <core/device.h> | 6 | #include <core/device.h> |
7 | #include <core/event.h> | 7 | #include <core/event.h> |
8 | 8 | ||
9 | enum nvkm_hpd_event { | ||
10 | NVKM_HPD_PLUG = 1, | ||
11 | NVKM_HPD_UNPLUG = 2, | ||
12 | NVKM_HPD_IRQ = 4, | ||
13 | NVKM_HPD = (NVKM_HPD_PLUG | NVKM_HPD_UNPLUG | NVKM_HPD_IRQ) | ||
14 | }; | ||
15 | |||
16 | struct nouveau_disp { | 9 | struct nouveau_disp { |
17 | struct nouveau_engine base; | 10 | struct nouveau_engine base; |
18 | 11 | ||
19 | struct list_head outp; | 12 | struct list_head outp; |
20 | struct nouveau_event *hpd; | ||
21 | 13 | ||
22 | struct nouveau_event *vblank; | 14 | struct nvkm_event hpd; |
15 | struct nvkm_event vblank; | ||
23 | }; | 16 | }; |
24 | 17 | ||
25 | static inline struct nouveau_disp * | 18 | static inline struct nouveau_disp * |
diff --git a/drivers/gpu/drm/nouveau/core/include/engine/dmaobj.h b/drivers/gpu/drm/nouveau/core/include/engine/dmaobj.h index b28914ed1752..1b283a7b78e6 100644 --- a/drivers/gpu/drm/nouveau/core/include/engine/dmaobj.h +++ b/drivers/gpu/drm/nouveau/core/include/engine/dmaobj.h | |||
@@ -12,37 +12,20 @@ struct nouveau_dmaobj { | |||
12 | u32 access; | 12 | u32 access; |
13 | u64 start; | 13 | u64 start; |
14 | u64 limit; | 14 | u64 limit; |
15 | u32 conf0; | ||
16 | }; | 15 | }; |
17 | 16 | ||
18 | struct nouveau_dmaeng { | 17 | struct nouveau_dmaeng { |
19 | struct nouveau_engine base; | 18 | struct nouveau_engine base; |
20 | 19 | ||
21 | /* creates a "physical" dma object from a struct nouveau_dmaobj */ | 20 | /* creates a "physical" dma object from a struct nouveau_dmaobj */ |
22 | int (*bind)(struct nouveau_dmaeng *dmaeng, | 21 | int (*bind)(struct nouveau_dmaobj *dmaobj, |
23 | struct nouveau_object *parent, | 22 | struct nouveau_object *parent, |
24 | struct nouveau_dmaobj *dmaobj, | ||
25 | struct nouveau_gpuobj **); | 23 | struct nouveau_gpuobj **); |
26 | }; | 24 | }; |
27 | 25 | ||
28 | #define nouveau_dmaeng_create(p,e,c,d) \ | 26 | extern struct nouveau_oclass *nv04_dmaeng_oclass; |
29 | nouveau_engine_create((p), (e), (c), true, "DMAOBJ", "dmaobj", (d)) | 27 | extern struct nouveau_oclass *nv50_dmaeng_oclass; |
30 | #define nouveau_dmaeng_destroy(p) \ | 28 | extern struct nouveau_oclass *nvc0_dmaeng_oclass; |
31 | nouveau_engine_destroy(&(p)->base) | 29 | extern struct nouveau_oclass *nvd0_dmaeng_oclass; |
32 | #define nouveau_dmaeng_init(p) \ | ||
33 | nouveau_engine_init(&(p)->base) | ||
34 | #define nouveau_dmaeng_fini(p,s) \ | ||
35 | nouveau_engine_fini(&(p)->base, (s)) | ||
36 | |||
37 | #define _nouveau_dmaeng_dtor _nouveau_engine_dtor | ||
38 | #define _nouveau_dmaeng_init _nouveau_engine_init | ||
39 | #define _nouveau_dmaeng_fini _nouveau_engine_fini | ||
40 | |||
41 | extern struct nouveau_oclass nv04_dmaeng_oclass; | ||
42 | extern struct nouveau_oclass nv50_dmaeng_oclass; | ||
43 | extern struct nouveau_oclass nvc0_dmaeng_oclass; | ||
44 | extern struct nouveau_oclass nvd0_dmaeng_oclass; | ||
45 | |||
46 | extern struct nouveau_oclass nouveau_dmaobj_sclass[]; | ||
47 | 30 | ||
48 | #endif | 31 | #endif |
diff --git a/drivers/gpu/drm/nouveau/core/include/engine/fifo.h b/drivers/gpu/drm/nouveau/core/include/engine/fifo.h index b639eb2c74ff..e5e4d930b2c2 100644 --- a/drivers/gpu/drm/nouveau/core/include/engine/fifo.h +++ b/drivers/gpu/drm/nouveau/core/include/engine/fifo.h | |||
@@ -4,12 +4,14 @@ | |||
4 | #include <core/namedb.h> | 4 | #include <core/namedb.h> |
5 | #include <core/gpuobj.h> | 5 | #include <core/gpuobj.h> |
6 | #include <core/engine.h> | 6 | #include <core/engine.h> |
7 | #include <core/event.h> | ||
7 | 8 | ||
8 | struct nouveau_fifo_chan { | 9 | struct nouveau_fifo_chan { |
9 | struct nouveau_namedb base; | 10 | struct nouveau_namedb base; |
10 | struct nouveau_dmaobj *pushdma; | 11 | struct nouveau_dmaobj *pushdma; |
11 | struct nouveau_gpuobj *pushgpu; | 12 | struct nouveau_gpuobj *pushgpu; |
12 | void __iomem *user; | 13 | void __iomem *user; |
14 | u64 addr; | ||
13 | u32 size; | 15 | u32 size; |
14 | u16 chid; | 16 | u16 chid; |
15 | atomic_t refcnt; /* NV04_NVSW_SET_REF */ | 17 | atomic_t refcnt; /* NV04_NVSW_SET_REF */ |
@@ -40,8 +42,10 @@ void nouveau_fifo_channel_destroy(struct nouveau_fifo_chan *); | |||
40 | #define _nouveau_fifo_channel_fini _nouveau_namedb_fini | 42 | #define _nouveau_fifo_channel_fini _nouveau_namedb_fini |
41 | 43 | ||
42 | void _nouveau_fifo_channel_dtor(struct nouveau_object *); | 44 | void _nouveau_fifo_channel_dtor(struct nouveau_object *); |
45 | int _nouveau_fifo_channel_map(struct nouveau_object *, u64 *, u32 *); | ||
43 | u32 _nouveau_fifo_channel_rd32(struct nouveau_object *, u64); | 46 | u32 _nouveau_fifo_channel_rd32(struct nouveau_object *, u64); |
44 | void _nouveau_fifo_channel_wr32(struct nouveau_object *, u64, u32); | 47 | void _nouveau_fifo_channel_wr32(struct nouveau_object *, u64, u32); |
48 | int _nouveau_fifo_channel_ntfy(struct nouveau_object *, u32, struct nvkm_event **); | ||
45 | 49 | ||
46 | struct nouveau_fifo_base { | 50 | struct nouveau_fifo_base { |
47 | struct nouveau_gpuobj base; | 51 | struct nouveau_gpuobj base; |
@@ -65,8 +69,8 @@ struct nouveau_fifo_base { | |||
65 | struct nouveau_fifo { | 69 | struct nouveau_fifo { |
66 | struct nouveau_engine base; | 70 | struct nouveau_engine base; |
67 | 71 | ||
68 | struct nouveau_event *cevent; /* channel creation event */ | 72 | struct nvkm_event cevent; /* channel creation event */ |
69 | struct nouveau_event *uevent; /* async user trigger */ | 73 | struct nvkm_event uevent; /* async user trigger */ |
70 | 74 | ||
71 | struct nouveau_object **channel; | 75 | struct nouveau_object **channel; |
72 | spinlock_t lock; | 76 | spinlock_t lock; |
@@ -112,6 +116,9 @@ extern struct nouveau_oclass *nve0_fifo_oclass; | |||
112 | extern struct nouveau_oclass *gk20a_fifo_oclass; | 116 | extern struct nouveau_oclass *gk20a_fifo_oclass; |
113 | extern struct nouveau_oclass *nv108_fifo_oclass; | 117 | extern struct nouveau_oclass *nv108_fifo_oclass; |
114 | 118 | ||
119 | int nouveau_fifo_uevent_ctor(void *, u32, struct nvkm_notify *); | ||
120 | void nouveau_fifo_uevent(struct nouveau_fifo *); | ||
121 | |||
115 | void nv04_fifo_intr(struct nouveau_subdev *); | 122 | void nv04_fifo_intr(struct nouveau_subdev *); |
116 | int nv04_fifo_context_attach(struct nouveau_object *, struct nouveau_object *); | 123 | int nv04_fifo_context_attach(struct nouveau_object *, struct nouveau_object *); |
117 | 124 | ||
diff --git a/drivers/gpu/drm/nouveau/core/include/engine/graph.h b/drivers/gpu/drm/nouveau/core/include/engine/graph.h index 8c1d4772da0c..d5055570d01b 100644 --- a/drivers/gpu/drm/nouveau/core/include/engine/graph.h +++ b/drivers/gpu/drm/nouveau/core/include/engine/graph.h | |||
@@ -70,6 +70,7 @@ extern struct nouveau_oclass *nvd9_graph_oclass; | |||
70 | extern struct nouveau_oclass *nve4_graph_oclass; | 70 | extern struct nouveau_oclass *nve4_graph_oclass; |
71 | extern struct nouveau_oclass *gk20a_graph_oclass; | 71 | extern struct nouveau_oclass *gk20a_graph_oclass; |
72 | extern struct nouveau_oclass *nvf0_graph_oclass; | 72 | extern struct nouveau_oclass *nvf0_graph_oclass; |
73 | extern struct nouveau_oclass *gk110b_graph_oclass; | ||
73 | extern struct nouveau_oclass *nv108_graph_oclass; | 74 | extern struct nouveau_oclass *nv108_graph_oclass; |
74 | extern struct nouveau_oclass *gm107_graph_oclass; | 75 | extern struct nouveau_oclass *gm107_graph_oclass; |
75 | 76 | ||
diff --git a/drivers/gpu/drm/nouveau/core/include/engine/perfmon.h b/drivers/gpu/drm/nouveau/core/include/engine/perfmon.h index 49b0024910fe..88cc812baaa3 100644 --- a/drivers/gpu/drm/nouveau/core/include/engine/perfmon.h +++ b/drivers/gpu/drm/nouveau/core/include/engine/perfmon.h | |||
@@ -4,7 +4,6 @@ | |||
4 | #include <core/device.h> | 4 | #include <core/device.h> |
5 | #include <core/engine.h> | 5 | #include <core/engine.h> |
6 | #include <core/engctx.h> | 6 | #include <core/engctx.h> |
7 | #include <core/class.h> | ||
8 | 7 | ||
9 | struct nouveau_perfdom; | 8 | struct nouveau_perfdom; |
10 | struct nouveau_perfctr; | 9 | struct nouveau_perfctr; |
diff --git a/drivers/gpu/drm/nouveau/core/include/nvif/class.h b/drivers/gpu/drm/nouveau/core/include/nvif/class.h new file mode 120000 index 000000000000..f1ac4859edd4 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/include/nvif/class.h | |||
@@ -0,0 +1 @@ | |||
../../../nvif/class.h \ No newline at end of file | |||
diff --git a/drivers/gpu/drm/nouveau/core/include/nvif/event.h b/drivers/gpu/drm/nouveau/core/include/nvif/event.h new file mode 120000 index 000000000000..1b798538a725 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/include/nvif/event.h | |||
@@ -0,0 +1 @@ | |||
../../../nvif/event.h \ No newline at end of file | |||
diff --git a/drivers/gpu/drm/nouveau/core/include/nvif/ioctl.h b/drivers/gpu/drm/nouveau/core/include/nvif/ioctl.h new file mode 120000 index 000000000000..8569c86907c5 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/include/nvif/ioctl.h | |||
@@ -0,0 +1 @@ | |||
../../../nvif/ioctl.h \ No newline at end of file | |||
diff --git a/drivers/gpu/drm/nouveau/core/include/nvif/unpack.h b/drivers/gpu/drm/nouveau/core/include/nvif/unpack.h new file mode 120000 index 000000000000..69d99292bca4 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/include/nvif/unpack.h | |||
@@ -0,0 +1 @@ | |||
../../../nvif/unpack.h \ No newline at end of file | |||
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bar.h b/drivers/gpu/drm/nouveau/core/include/subdev/bar.h index 9faa98e67ad8..be037fac534c 100644 --- a/drivers/gpu/drm/nouveau/core/include/subdev/bar.h +++ b/drivers/gpu/drm/nouveau/core/include/subdev/bar.h | |||
@@ -20,6 +20,9 @@ struct nouveau_bar { | |||
20 | u32 flags, struct nouveau_vma *); | 20 | u32 flags, struct nouveau_vma *); |
21 | void (*unmap)(struct nouveau_bar *, struct nouveau_vma *); | 21 | void (*unmap)(struct nouveau_bar *, struct nouveau_vma *); |
22 | void (*flush)(struct nouveau_bar *); | 22 | void (*flush)(struct nouveau_bar *); |
23 | |||
24 | /* whether the BAR supports to be ioremapped WC or should be uncached */ | ||
25 | bool iomap_uncached; | ||
23 | }; | 26 | }; |
24 | 27 | ||
25 | static inline struct nouveau_bar * | 28 | static inline struct nouveau_bar * |
@@ -30,5 +33,6 @@ nouveau_bar(void *obj) | |||
30 | 33 | ||
31 | extern struct nouveau_oclass nv50_bar_oclass; | 34 | extern struct nouveau_oclass nv50_bar_oclass; |
32 | extern struct nouveau_oclass nvc0_bar_oclass; | 35 | extern struct nouveau_oclass nvc0_bar_oclass; |
36 | extern struct nouveau_oclass gk20a_bar_oclass; | ||
33 | 37 | ||
34 | #endif | 38 | #endif |
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/clock.h b/drivers/gpu/drm/nouveau/core/include/subdev/clock.h index c01e29c9f89a..a5ca00dd2f61 100644 --- a/drivers/gpu/drm/nouveau/core/include/subdev/clock.h +++ b/drivers/gpu/drm/nouveau/core/include/subdev/clock.h | |||
@@ -71,8 +71,15 @@ struct nouveau_clock { | |||
71 | struct list_head states; | 71 | struct list_head states; |
72 | int state_nr; | 72 | int state_nr; |
73 | 73 | ||
74 | struct work_struct work; | ||
75 | wait_queue_head_t wait; | ||
76 | atomic_t waiting; | ||
77 | |||
78 | struct nvkm_notify pwrsrc_ntfy; | ||
79 | int pwrsrc; | ||
74 | int pstate; /* current */ | 80 | int pstate; /* current */ |
75 | int ustate; /* user-requested (-1 disabled, -2 perfmon) */ | 81 | int ustate_ac; /* user-requested (-1 disabled, -2 perfmon) */ |
82 | int ustate_dc; /* user-requested (-1 disabled, -2 perfmon) */ | ||
76 | int astate; /* perfmon adjustment (base) */ | 83 | int astate; /* perfmon adjustment (base) */ |
77 | int tstate; /* thermal adjustment (max-) */ | 84 | int tstate; /* thermal adjustment (max-) */ |
78 | int dstate; /* display adjustment (min+) */ | 85 | int dstate; /* display adjustment (min+) */ |
@@ -108,8 +115,9 @@ struct nouveau_clocks { | |||
108 | int mdiv; | 115 | int mdiv; |
109 | }; | 116 | }; |
110 | 117 | ||
111 | #define nouveau_clock_create(p,e,o,i,r,d) \ | 118 | #define nouveau_clock_create(p,e,o,i,r,s,n,d) \ |
112 | nouveau_clock_create_((p), (e), (o), (i), (r), sizeof(**d), (void **)d) | 119 | nouveau_clock_create_((p), (e), (o), (i), (r), (s), (n), sizeof(**d), \ |
120 | (void **)d) | ||
113 | #define nouveau_clock_destroy(p) ({ \ | 121 | #define nouveau_clock_destroy(p) ({ \ |
114 | struct nouveau_clock *clk = (p); \ | 122 | struct nouveau_clock *clk = (p); \ |
115 | _nouveau_clock_dtor(nv_object(clk)); \ | 123 | _nouveau_clock_dtor(nv_object(clk)); \ |
@@ -118,15 +126,18 @@ struct nouveau_clocks { | |||
118 | struct nouveau_clock *clk = (p); \ | 126 | struct nouveau_clock *clk = (p); \ |
119 | _nouveau_clock_init(nv_object(clk)); \ | 127 | _nouveau_clock_init(nv_object(clk)); \ |
120 | }) | 128 | }) |
121 | #define nouveau_clock_fini(p,s) \ | 129 | #define nouveau_clock_fini(p,s) ({ \ |
122 | nouveau_subdev_fini(&(p)->base, (s)) | 130 | struct nouveau_clock *clk = (p); \ |
131 | _nouveau_clock_fini(nv_object(clk), (s)); \ | ||
132 | }) | ||
123 | 133 | ||
124 | int nouveau_clock_create_(struct nouveau_object *, struct nouveau_object *, | 134 | int nouveau_clock_create_(struct nouveau_object *, struct nouveau_object *, |
125 | struct nouveau_oclass *, | 135 | struct nouveau_oclass *, |
126 | struct nouveau_clocks *, bool, int, void **); | 136 | struct nouveau_clocks *, struct nouveau_pstate *, |
137 | int, bool, int, void **); | ||
127 | void _nouveau_clock_dtor(struct nouveau_object *); | 138 | void _nouveau_clock_dtor(struct nouveau_object *); |
128 | int _nouveau_clock_init(struct nouveau_object *); | 139 | int _nouveau_clock_init(struct nouveau_object *); |
129 | #define _nouveau_clock_fini _nouveau_subdev_fini | 140 | int _nouveau_clock_fini(struct nouveau_object *, bool); |
130 | 141 | ||
131 | extern struct nouveau_oclass nv04_clock_oclass; | 142 | extern struct nouveau_oclass nv04_clock_oclass; |
132 | extern struct nouveau_oclass nv40_clock_oclass; | 143 | extern struct nouveau_oclass nv40_clock_oclass; |
@@ -136,6 +147,7 @@ extern struct nouveau_oclass *nvaa_clock_oclass; | |||
136 | extern struct nouveau_oclass nva3_clock_oclass; | 147 | extern struct nouveau_oclass nva3_clock_oclass; |
137 | extern struct nouveau_oclass nvc0_clock_oclass; | 148 | extern struct nouveau_oclass nvc0_clock_oclass; |
138 | extern struct nouveau_oclass nve0_clock_oclass; | 149 | extern struct nouveau_oclass nve0_clock_oclass; |
150 | extern struct nouveau_oclass gk20a_clock_oclass; | ||
139 | 151 | ||
140 | int nv04_clock_pll_set(struct nouveau_clock *, u32 type, u32 freq); | 152 | int nv04_clock_pll_set(struct nouveau_clock *, u32 type, u32 freq); |
141 | int nv04_clock_pll_calc(struct nouveau_clock *, struct nvbios_pll *, | 153 | int nv04_clock_pll_calc(struct nouveau_clock *, struct nvbios_pll *, |
@@ -145,7 +157,7 @@ int nv04_clock_pll_prog(struct nouveau_clock *, u32 reg1, | |||
145 | int nva3_clock_pll_calc(struct nouveau_clock *, struct nvbios_pll *, | 157 | int nva3_clock_pll_calc(struct nouveau_clock *, struct nvbios_pll *, |
146 | int clk, struct nouveau_pll_vals *); | 158 | int clk, struct nouveau_pll_vals *); |
147 | 159 | ||
148 | int nouveau_clock_ustate(struct nouveau_clock *, int req); | 160 | int nouveau_clock_ustate(struct nouveau_clock *, int req, int pwr); |
149 | int nouveau_clock_astate(struct nouveau_clock *, int req, int rel); | 161 | int nouveau_clock_astate(struct nouveau_clock *, int req, int rel); |
150 | int nouveau_clock_dstate(struct nouveau_clock *, int req, int rel); | 162 | int nouveau_clock_dstate(struct nouveau_clock *, int req, int rel); |
151 | int nouveau_clock_tstate(struct nouveau_clock *, int req, int rel); | 163 | int nouveau_clock_tstate(struct nouveau_clock *, int req, int rel); |
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/gpio.h b/drivers/gpu/drm/nouveau/core/include/subdev/gpio.h index 612d82ab683d..b73733d21cc7 100644 --- a/drivers/gpu/drm/nouveau/core/include/subdev/gpio.h +++ b/drivers/gpu/drm/nouveau/core/include/subdev/gpio.h | |||
@@ -8,16 +8,22 @@ | |||
8 | #include <subdev/bios.h> | 8 | #include <subdev/bios.h> |
9 | #include <subdev/bios/gpio.h> | 9 | #include <subdev/bios/gpio.h> |
10 | 10 | ||
11 | enum nvkm_gpio_event { | 11 | struct nvkm_gpio_ntfy_req { |
12 | NVKM_GPIO_HI = 1, | 12 | #define NVKM_GPIO_HI 0x01 |
13 | NVKM_GPIO_LO = 2, | 13 | #define NVKM_GPIO_LO 0x02 |
14 | NVKM_GPIO_TOGGLED = (NVKM_GPIO_HI | NVKM_GPIO_LO), | 14 | #define NVKM_GPIO_TOGGLED 0x03 |
15 | u8 mask; | ||
16 | u8 line; | ||
17 | }; | ||
18 | |||
19 | struct nvkm_gpio_ntfy_rep { | ||
20 | u8 mask; | ||
15 | }; | 21 | }; |
16 | 22 | ||
17 | struct nouveau_gpio { | 23 | struct nouveau_gpio { |
18 | struct nouveau_subdev base; | 24 | struct nouveau_subdev base; |
19 | 25 | ||
20 | struct nouveau_event *events; | 26 | struct nvkm_event event; |
21 | 27 | ||
22 | void (*reset)(struct nouveau_gpio *, u8 func); | 28 | void (*reset)(struct nouveau_gpio *, u8 func); |
23 | int (*find)(struct nouveau_gpio *, int idx, u8 tag, u8 line, | 29 | int (*find)(struct nouveau_gpio *, int idx, u8 tag, u8 line, |
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/i2c.h b/drivers/gpu/drm/nouveau/core/include/subdev/i2c.h index 825f7bb46b67..1b937c2c25ae 100644 --- a/drivers/gpu/drm/nouveau/core/include/subdev/i2c.h +++ b/drivers/gpu/drm/nouveau/core/include/subdev/i2c.h | |||
@@ -14,15 +14,18 @@ | |||
14 | #define NV_I2C_TYPE_EXTDDC(e) (0x0005 | (e) << 8) | 14 | #define NV_I2C_TYPE_EXTDDC(e) (0x0005 | (e) << 8) |
15 | #define NV_I2C_TYPE_EXTAUX(e) (0x0006 | (e) << 8) | 15 | #define NV_I2C_TYPE_EXTAUX(e) (0x0006 | (e) << 8) |
16 | 16 | ||
17 | enum nvkm_i2c_event { | 17 | struct nvkm_i2c_ntfy_req { |
18 | NVKM_I2C_PLUG = 1, | 18 | #define NVKM_I2C_PLUG 0x01 |
19 | NVKM_I2C_UNPLUG = 2, | 19 | #define NVKM_I2C_UNPLUG 0x02 |
20 | NVKM_I2C_IRQ = 4, | 20 | #define NVKM_I2C_IRQ 0x04 |
21 | NVKM_I2C_DONE = 8, | 21 | #define NVKM_I2C_DONE 0x08 |
22 | NVKM_I2C_ANY = (NVKM_I2C_PLUG | | 22 | #define NVKM_I2C_ANY 0x0f |
23 | NVKM_I2C_UNPLUG | | 23 | u8 mask; |
24 | NVKM_I2C_IRQ | | 24 | u8 port; |
25 | NVKM_I2C_DONE), | 25 | }; |
26 | |||
27 | struct nvkm_i2c_ntfy_rep { | ||
28 | u8 mask; | ||
26 | }; | 29 | }; |
27 | 30 | ||
28 | struct nouveau_i2c_port { | 31 | struct nouveau_i2c_port { |
@@ -56,7 +59,7 @@ struct nouveau_i2c_board_info { | |||
56 | 59 | ||
57 | struct nouveau_i2c { | 60 | struct nouveau_i2c { |
58 | struct nouveau_subdev base; | 61 | struct nouveau_subdev base; |
59 | struct nouveau_event *ntfy; | 62 | struct nvkm_event event; |
60 | 63 | ||
61 | struct nouveau_i2c_port *(*find)(struct nouveau_i2c *, u8 index); | 64 | struct nouveau_i2c_port *(*find)(struct nouveau_i2c *, u8 index); |
62 | struct nouveau_i2c_port *(*find_type)(struct nouveau_i2c *, u16 type); | 65 | struct nouveau_i2c_port *(*find_type)(struct nouveau_i2c *, u16 type); |
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/ltc.h b/drivers/gpu/drm/nouveau/core/include/subdev/ltc.h new file mode 100644 index 000000000000..b909a7363f6b --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/include/subdev/ltc.h | |||
@@ -0,0 +1,35 @@ | |||
1 | #ifndef __NOUVEAU_LTC_H__ | ||
2 | #define __NOUVEAU_LTC_H__ | ||
3 | |||
4 | #include <core/subdev.h> | ||
5 | #include <core/device.h> | ||
6 | |||
7 | #define NOUVEAU_LTC_MAX_ZBC_CNT 16 | ||
8 | |||
9 | struct nouveau_mm_node; | ||
10 | |||
11 | struct nouveau_ltc { | ||
12 | struct nouveau_subdev base; | ||
13 | |||
14 | int (*tags_alloc)(struct nouveau_ltc *, u32 count, | ||
15 | struct nouveau_mm_node **); | ||
16 | void (*tags_free)(struct nouveau_ltc *, struct nouveau_mm_node **); | ||
17 | void (*tags_clear)(struct nouveau_ltc *, u32 first, u32 count); | ||
18 | |||
19 | int zbc_min; | ||
20 | int zbc_max; | ||
21 | int (*zbc_color_get)(struct nouveau_ltc *, int index, const u32[4]); | ||
22 | int (*zbc_depth_get)(struct nouveau_ltc *, int index, const u32); | ||
23 | }; | ||
24 | |||
25 | static inline struct nouveau_ltc * | ||
26 | nouveau_ltc(void *obj) | ||
27 | { | ||
28 | return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_LTC]; | ||
29 | } | ||
30 | |||
31 | extern struct nouveau_oclass *gf100_ltc_oclass; | ||
32 | extern struct nouveau_oclass *gk104_ltc_oclass; | ||
33 | extern struct nouveau_oclass *gm107_ltc_oclass; | ||
34 | |||
35 | #endif | ||
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/ltcg.h b/drivers/gpu/drm/nouveau/core/include/subdev/ltcg.h deleted file mode 100644 index c9c1950b7743..000000000000 --- a/drivers/gpu/drm/nouveau/core/include/subdev/ltcg.h +++ /dev/null | |||
@@ -1,41 +0,0 @@ | |||
1 | #ifndef __NOUVEAU_LTCG_H__ | ||
2 | #define __NOUVEAU_LTCG_H__ | ||
3 | |||
4 | #include <core/subdev.h> | ||
5 | #include <core/device.h> | ||
6 | |||
7 | struct nouveau_mm_node; | ||
8 | |||
9 | struct nouveau_ltcg { | ||
10 | struct nouveau_subdev base; | ||
11 | |||
12 | int (*tags_alloc)(struct nouveau_ltcg *, u32 count, | ||
13 | struct nouveau_mm_node **); | ||
14 | void (*tags_free)(struct nouveau_ltcg *, struct nouveau_mm_node **); | ||
15 | void (*tags_clear)(struct nouveau_ltcg *, u32 first, u32 count); | ||
16 | }; | ||
17 | |||
18 | static inline struct nouveau_ltcg * | ||
19 | nouveau_ltcg(void *obj) | ||
20 | { | ||
21 | return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_LTCG]; | ||
22 | } | ||
23 | |||
24 | #define nouveau_ltcg_create(p,e,o,d) \ | ||
25 | nouveau_subdev_create_((p), (e), (o), 0, "PLTCG", "level2", \ | ||
26 | sizeof(**d), (void **)d) | ||
27 | #define nouveau_ltcg_destroy(p) \ | ||
28 | nouveau_subdev_destroy(&(p)->base) | ||
29 | #define nouveau_ltcg_init(p) \ | ||
30 | nouveau_subdev_init(&(p)->base) | ||
31 | #define nouveau_ltcg_fini(p,s) \ | ||
32 | nouveau_subdev_fini(&(p)->base, (s)) | ||
33 | |||
34 | #define _nouveau_ltcg_dtor _nouveau_subdev_dtor | ||
35 | #define _nouveau_ltcg_init _nouveau_subdev_init | ||
36 | #define _nouveau_ltcg_fini _nouveau_subdev_fini | ||
37 | |||
38 | extern struct nouveau_oclass *gf100_ltcg_oclass; | ||
39 | extern struct nouveau_oclass *gm107_ltcg_oclass; | ||
40 | |||
41 | #endif | ||
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/mc.h b/drivers/gpu/drm/nouveau/core/include/subdev/mc.h index 72b176831be6..568e4dfc5e9e 100644 --- a/drivers/gpu/drm/nouveau/core/include/subdev/mc.h +++ b/drivers/gpu/drm/nouveau/core/include/subdev/mc.h | |||
@@ -4,15 +4,11 @@ | |||
4 | #include <core/subdev.h> | 4 | #include <core/subdev.h> |
5 | #include <core/device.h> | 5 | #include <core/device.h> |
6 | 6 | ||
7 | struct nouveau_mc_intr { | ||
8 | u32 stat; | ||
9 | u32 unit; | ||
10 | }; | ||
11 | |||
12 | struct nouveau_mc { | 7 | struct nouveau_mc { |
13 | struct nouveau_subdev base; | 8 | struct nouveau_subdev base; |
14 | bool use_msi; | 9 | bool use_msi; |
15 | unsigned int irq; | 10 | unsigned int irq; |
11 | void (*unk260)(struct nouveau_mc *, u32); | ||
16 | }; | 12 | }; |
17 | 13 | ||
18 | static inline struct nouveau_mc * | 14 | static inline struct nouveau_mc * |
@@ -21,30 +17,6 @@ nouveau_mc(void *obj) | |||
21 | return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_MC]; | 17 | return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_MC]; |
22 | } | 18 | } |
23 | 19 | ||
24 | #define nouveau_mc_create(p,e,o,d) \ | ||
25 | nouveau_mc_create_((p), (e), (o), sizeof(**d), (void **)d) | ||
26 | #define nouveau_mc_destroy(p) ({ \ | ||
27 | struct nouveau_mc *pmc = (p); _nouveau_mc_dtor(nv_object(pmc)); \ | ||
28 | }) | ||
29 | #define nouveau_mc_init(p) ({ \ | ||
30 | struct nouveau_mc *pmc = (p); _nouveau_mc_init(nv_object(pmc)); \ | ||
31 | }) | ||
32 | #define nouveau_mc_fini(p,s) ({ \ | ||
33 | struct nouveau_mc *pmc = (p); _nouveau_mc_fini(nv_object(pmc), (s)); \ | ||
34 | }) | ||
35 | |||
36 | int nouveau_mc_create_(struct nouveau_object *, struct nouveau_object *, | ||
37 | struct nouveau_oclass *, int, void **); | ||
38 | void _nouveau_mc_dtor(struct nouveau_object *); | ||
39 | int _nouveau_mc_init(struct nouveau_object *); | ||
40 | int _nouveau_mc_fini(struct nouveau_object *, bool); | ||
41 | |||
42 | struct nouveau_mc_oclass { | ||
43 | struct nouveau_oclass base; | ||
44 | const struct nouveau_mc_intr *intr; | ||
45 | void (*msi_rearm)(struct nouveau_mc *); | ||
46 | }; | ||
47 | |||
48 | extern struct nouveau_oclass *nv04_mc_oclass; | 20 | extern struct nouveau_oclass *nv04_mc_oclass; |
49 | extern struct nouveau_oclass *nv40_mc_oclass; | 21 | extern struct nouveau_oclass *nv40_mc_oclass; |
50 | extern struct nouveau_oclass *nv44_mc_oclass; | 22 | extern struct nouveau_oclass *nv44_mc_oclass; |
@@ -54,5 +26,6 @@ extern struct nouveau_oclass *nv94_mc_oclass; | |||
54 | extern struct nouveau_oclass *nv98_mc_oclass; | 26 | extern struct nouveau_oclass *nv98_mc_oclass; |
55 | extern struct nouveau_oclass *nvc0_mc_oclass; | 27 | extern struct nouveau_oclass *nvc0_mc_oclass; |
56 | extern struct nouveau_oclass *nvc3_mc_oclass; | 28 | extern struct nouveau_oclass *nvc3_mc_oclass; |
29 | extern struct nouveau_oclass *gk20a_mc_oclass; | ||
57 | 30 | ||
58 | #endif | 31 | #endif |
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/pwr.h b/drivers/gpu/drm/nouveau/core/include/subdev/pwr.h index c5c92cbed33f..f73feec151db 100644 --- a/drivers/gpu/drm/nouveau/core/include/subdev/pwr.h +++ b/drivers/gpu/drm/nouveau/core/include/subdev/pwr.h | |||
@@ -8,18 +8,6 @@ struct nouveau_pwr { | |||
8 | struct nouveau_subdev base; | 8 | struct nouveau_subdev base; |
9 | 9 | ||
10 | struct { | 10 | struct { |
11 | u32 limit; | ||
12 | u32 *data; | ||
13 | u32 size; | ||
14 | } code; | ||
15 | |||
16 | struct { | ||
17 | u32 limit; | ||
18 | u32 *data; | ||
19 | u32 size; | ||
20 | } data; | ||
21 | |||
22 | struct { | ||
23 | u32 base; | 11 | u32 base; |
24 | u32 size; | 12 | u32 size; |
25 | } send; | 13 | } send; |
@@ -35,7 +23,8 @@ struct nouveau_pwr { | |||
35 | u32 data[2]; | 23 | u32 data[2]; |
36 | } recv; | 24 | } recv; |
37 | 25 | ||
38 | int (*message)(struct nouveau_pwr *, u32[2], u32, u32, u32, u32); | 26 | int (*message)(struct nouveau_pwr *, u32[2], u32, u32, u32, u32); |
27 | void (*pgob)(struct nouveau_pwr *, bool); | ||
39 | }; | 28 | }; |
40 | 29 | ||
41 | static inline struct nouveau_pwr * | 30 | static inline struct nouveau_pwr * |
@@ -44,29 +33,11 @@ nouveau_pwr(void *obj) | |||
44 | return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_PWR]; | 33 | return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_PWR]; |
45 | } | 34 | } |
46 | 35 | ||
47 | #define nouveau_pwr_create(p, e, o, d) \ | 36 | extern struct nouveau_oclass *nva3_pwr_oclass; |
48 | nouveau_pwr_create_((p), (e), (o), sizeof(**d), (void **)d) | 37 | extern struct nouveau_oclass *nvc0_pwr_oclass; |
49 | #define nouveau_pwr_destroy(p) \ | 38 | extern struct nouveau_oclass *nvd0_pwr_oclass; |
50 | nouveau_subdev_destroy(&(p)->base) | 39 | extern struct nouveau_oclass *gk104_pwr_oclass; |
51 | #define nouveau_pwr_init(p) ({ \ | 40 | extern struct nouveau_oclass *nv108_pwr_oclass; |
52 | struct nouveau_pwr *ppwr = (p); \ | ||
53 | _nouveau_pwr_init(nv_object(ppwr)); \ | ||
54 | }) | ||
55 | #define nouveau_pwr_fini(p,s) ({ \ | ||
56 | struct nouveau_pwr *ppwr = (p); \ | ||
57 | _nouveau_pwr_fini(nv_object(ppwr), (s)); \ | ||
58 | }) | ||
59 | |||
60 | int nouveau_pwr_create_(struct nouveau_object *, struct nouveau_object *, | ||
61 | struct nouveau_oclass *, int, void **); | ||
62 | #define _nouveau_pwr_dtor _nouveau_subdev_dtor | ||
63 | int _nouveau_pwr_init(struct nouveau_object *); | ||
64 | int _nouveau_pwr_fini(struct nouveau_object *, bool); | ||
65 | |||
66 | extern struct nouveau_oclass nva3_pwr_oclass; | ||
67 | extern struct nouveau_oclass nvc0_pwr_oclass; | ||
68 | extern struct nouveau_oclass nvd0_pwr_oclass; | ||
69 | extern struct nouveau_oclass nv108_pwr_oclass; | ||
70 | 41 | ||
71 | /* interface to MEMX process running on PPWR */ | 42 | /* interface to MEMX process running on PPWR */ |
72 | struct nouveau_memx; | 43 | struct nouveau_memx; |
diff --git a/drivers/gpu/drm/nouveau/core/os.h b/drivers/gpu/drm/nouveau/core/os.h index d0ced94ca54c..ccfa21d72ddc 100644 --- a/drivers/gpu/drm/nouveau/core/os.h +++ b/drivers/gpu/drm/nouveau/core/os.h | |||
@@ -21,6 +21,8 @@ | |||
21 | #include <linux/interrupt.h> | 21 | #include <linux/interrupt.h> |
22 | #include <linux/log2.h> | 22 | #include <linux/log2.h> |
23 | #include <linux/pm_runtime.h> | 23 | #include <linux/pm_runtime.h> |
24 | #include <linux/power_supply.h> | ||
25 | #include <linux/clk.h> | ||
24 | 26 | ||
25 | #include <asm/unaligned.h> | 27 | #include <asm/unaligned.h> |
26 | 28 | ||
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bar/gk20a.c b/drivers/gpu/drm/nouveau/core/subdev/bar/gk20a.c new file mode 100644 index 000000000000..bf877af9d3bd --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/subdev/bar/gk20a.c | |||
@@ -0,0 +1,54 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2014, NVIDIA CORPORATION. All rights reserved. | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice shall be included in | ||
12 | * all copies or substantial portions of the Software. | ||
13 | * | ||
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
17 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
18 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | ||
19 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER | ||
20 | * DEALINGS IN THE SOFTWARE. | ||
21 | */ | ||
22 | |||
23 | #include <subdev/bar.h> | ||
24 | |||
25 | #include "priv.h" | ||
26 | |||
27 | int | ||
28 | gk20a_bar_ctor(struct nouveau_object *parent, struct nouveau_object *engine, | ||
29 | struct nouveau_oclass *oclass, void *data, u32 size, | ||
30 | struct nouveau_object **pobject) | ||
31 | { | ||
32 | struct nouveau_bar *bar; | ||
33 | int ret; | ||
34 | |||
35 | ret = nvc0_bar_ctor(parent, engine, oclass, data, size, pobject); | ||
36 | if (ret) | ||
37 | return ret; | ||
38 | |||
39 | bar = (struct nouveau_bar *)*pobject; | ||
40 | bar->iomap_uncached = true; | ||
41 | |||
42 | return 0; | ||
43 | } | ||
44 | |||
45 | struct nouveau_oclass | ||
46 | gk20a_bar_oclass = { | ||
47 | .handle = NV_SUBDEV(BAR, 0xea), | ||
48 | .ofuncs = &(struct nouveau_ofuncs) { | ||
49 | .ctor = gk20a_bar_ctor, | ||
50 | .dtor = nvc0_bar_dtor, | ||
51 | .init = nvc0_bar_init, | ||
52 | .fini = _nouveau_bar_fini, | ||
53 | }, | ||
54 | }; | ||
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bar/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/bar/nvc0.c index ca8139b9ab27..0a44459844e3 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/bar/nvc0.c +++ b/drivers/gpu/drm/nouveau/core/subdev/bar/nvc0.c | |||
@@ -133,7 +133,7 @@ nvc0_bar_init_vm(struct nvc0_bar_priv *priv, struct nvc0_bar_priv_vm *bar_vm, | |||
133 | return 0; | 133 | return 0; |
134 | } | 134 | } |
135 | 135 | ||
136 | static int | 136 | int |
137 | nvc0_bar_ctor(struct nouveau_object *parent, struct nouveau_object *engine, | 137 | nvc0_bar_ctor(struct nouveau_object *parent, struct nouveau_object *engine, |
138 | struct nouveau_oclass *oclass, void *data, u32 size, | 138 | struct nouveau_oclass *oclass, void *data, u32 size, |
139 | struct nouveau_object **pobject) | 139 | struct nouveau_object **pobject) |
@@ -169,7 +169,7 @@ nvc0_bar_ctor(struct nouveau_object *parent, struct nouveau_object *engine, | |||
169 | return 0; | 169 | return 0; |
170 | } | 170 | } |
171 | 171 | ||
172 | static void | 172 | void |
173 | nvc0_bar_dtor(struct nouveau_object *object) | 173 | nvc0_bar_dtor(struct nouveau_object *object) |
174 | { | 174 | { |
175 | struct nvc0_bar_priv *priv = (void *)object; | 175 | struct nvc0_bar_priv *priv = (void *)object; |
@@ -188,7 +188,7 @@ nvc0_bar_dtor(struct nouveau_object *object) | |||
188 | nouveau_bar_destroy(&priv->base); | 188 | nouveau_bar_destroy(&priv->base); |
189 | } | 189 | } |
190 | 190 | ||
191 | static int | 191 | int |
192 | nvc0_bar_init(struct nouveau_object *object) | 192 | nvc0_bar_init(struct nouveau_object *object) |
193 | { | 193 | { |
194 | struct nvc0_bar_priv *priv = (void *)object; | 194 | struct nvc0_bar_priv *priv = (void *)object; |
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bar/priv.h b/drivers/gpu/drm/nouveau/core/subdev/bar/priv.h index ffad8f337ead..3ee8b1476d00 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/bar/priv.h +++ b/drivers/gpu/drm/nouveau/core/subdev/bar/priv.h | |||
@@ -23,4 +23,10 @@ int nouveau_bar_alloc(struct nouveau_bar *, struct nouveau_object *, | |||
23 | 23 | ||
24 | void nv84_bar_flush(struct nouveau_bar *); | 24 | void nv84_bar_flush(struct nouveau_bar *); |
25 | 25 | ||
26 | int nvc0_bar_ctor(struct nouveau_object *, struct nouveau_object *, | ||
27 | struct nouveau_oclass *, void *, u32, | ||
28 | struct nouveau_object **); | ||
29 | void nvc0_bar_dtor(struct nouveau_object *); | ||
30 | int nvc0_bar_init(struct nouveau_object *); | ||
31 | |||
26 | #endif | 32 | #endif |
diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/base.c b/drivers/gpu/drm/nouveau/core/subdev/clock/base.c index 22351f594d2a..a276a711294a 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/clock/base.c +++ b/drivers/gpu/drm/nouveau/core/subdev/clock/base.c | |||
@@ -90,16 +90,20 @@ nouveau_cstate_prog(struct nouveau_clock *clk, | |||
90 | cstate = &pstate->base; | 90 | cstate = &pstate->base; |
91 | } | 91 | } |
92 | 92 | ||
93 | ret = nouveau_therm_cstate(ptherm, pstate->fanspeed, +1); | 93 | if (ptherm) { |
94 | if (ret && ret != -ENODEV) { | 94 | ret = nouveau_therm_cstate(ptherm, pstate->fanspeed, +1); |
95 | nv_error(clk, "failed to raise fan speed: %d\n", ret); | 95 | if (ret && ret != -ENODEV) { |
96 | return ret; | 96 | nv_error(clk, "failed to raise fan speed: %d\n", ret); |
97 | return ret; | ||
98 | } | ||
97 | } | 99 | } |
98 | 100 | ||
99 | ret = volt->set_id(volt, cstate->voltage, +1); | 101 | if (volt) { |
100 | if (ret && ret != -ENODEV) { | 102 | ret = volt->set_id(volt, cstate->voltage, +1); |
101 | nv_error(clk, "failed to raise voltage: %d\n", ret); | 103 | if (ret && ret != -ENODEV) { |
102 | return ret; | 104 | nv_error(clk, "failed to raise voltage: %d\n", ret); |
105 | return ret; | ||
106 | } | ||
103 | } | 107 | } |
104 | 108 | ||
105 | ret = clk->calc(clk, cstate); | 109 | ret = clk->calc(clk, cstate); |
@@ -108,13 +112,17 @@ nouveau_cstate_prog(struct nouveau_clock *clk, | |||
108 | clk->tidy(clk); | 112 | clk->tidy(clk); |
109 | } | 113 | } |
110 | 114 | ||
111 | ret = volt->set_id(volt, cstate->voltage, -1); | 115 | if (volt) { |
112 | if (ret && ret != -ENODEV) | 116 | ret = volt->set_id(volt, cstate->voltage, -1); |
113 | nv_error(clk, "failed to lower voltage: %d\n", ret); | 117 | if (ret && ret != -ENODEV) |
118 | nv_error(clk, "failed to lower voltage: %d\n", ret); | ||
119 | } | ||
114 | 120 | ||
115 | ret = nouveau_therm_cstate(ptherm, pstate->fanspeed, -1); | 121 | if (ptherm) { |
116 | if (ret && ret != -ENODEV) | 122 | ret = nouveau_therm_cstate(ptherm, pstate->fanspeed, -1); |
117 | nv_error(clk, "failed to lower fan speed: %d\n", ret); | 123 | if (ret && ret != -ENODEV) |
124 | nv_error(clk, "failed to lower fan speed: %d\n", ret); | ||
125 | } | ||
118 | 126 | ||
119 | return 0; | 127 | return 0; |
120 | } | 128 | } |
@@ -194,16 +202,23 @@ nouveau_pstate_prog(struct nouveau_clock *clk, int pstatei) | |||
194 | return nouveau_cstate_prog(clk, pstate, 0); | 202 | return nouveau_cstate_prog(clk, pstate, 0); |
195 | } | 203 | } |
196 | 204 | ||
197 | static int | 205 | static void |
198 | nouveau_pstate_calc(struct nouveau_clock *clk) | 206 | nouveau_pstate_work(struct work_struct *work) |
199 | { | 207 | { |
200 | int pstate, ret = 0; | 208 | struct nouveau_clock *clk = container_of(work, typeof(*clk), work); |
209 | int pstate; | ||
201 | 210 | ||
202 | nv_trace(clk, "P %d U %d A %d T %d D %d\n", clk->pstate, | 211 | if (!atomic_xchg(&clk->waiting, 0)) |
203 | clk->ustate, clk->astate, clk->tstate, clk->dstate); | 212 | return; |
213 | clk->pwrsrc = power_supply_is_system_supplied(); | ||
204 | 214 | ||
205 | if (clk->state_nr && clk->ustate != -1) { | 215 | nv_trace(clk, "P %d PWR %d U(AC) %d U(DC) %d A %d T %d D %d\n", |
206 | pstate = (clk->ustate < 0) ? clk->astate : clk->ustate; | 216 | clk->pstate, clk->pwrsrc, clk->ustate_ac, clk->ustate_dc, |
217 | clk->astate, clk->tstate, clk->dstate); | ||
218 | |||
219 | pstate = clk->pwrsrc ? clk->ustate_ac : clk->ustate_dc; | ||
220 | if (clk->state_nr && pstate != -1) { | ||
221 | pstate = (pstate < 0) ? clk->astate : pstate; | ||
207 | pstate = min(pstate, clk->state_nr - 1 - clk->tstate); | 222 | pstate = min(pstate, clk->state_nr - 1 - clk->tstate); |
208 | pstate = max(pstate, clk->dstate); | 223 | pstate = max(pstate, clk->dstate); |
209 | } else { | 224 | } else { |
@@ -211,9 +226,26 @@ nouveau_pstate_calc(struct nouveau_clock *clk) | |||
211 | } | 226 | } |
212 | 227 | ||
213 | nv_trace(clk, "-> %d\n", pstate); | 228 | nv_trace(clk, "-> %d\n", pstate); |
214 | if (pstate != clk->pstate) | 229 | if (pstate != clk->pstate) { |
215 | ret = nouveau_pstate_prog(clk, pstate); | 230 | int ret = nouveau_pstate_prog(clk, pstate); |
216 | return ret; | 231 | if (ret) { |
232 | nv_error(clk, "error setting pstate %d: %d\n", | ||
233 | pstate, ret); | ||
234 | } | ||
235 | } | ||
236 | |||
237 | wake_up_all(&clk->wait); | ||
238 | nvkm_notify_get(&clk->pwrsrc_ntfy); | ||
239 | } | ||
240 | |||
241 | static int | ||
242 | nouveau_pstate_calc(struct nouveau_clock *clk, bool wait) | ||
243 | { | ||
244 | atomic_set(&clk->waiting, 1); | ||
245 | schedule_work(&clk->work); | ||
246 | if (wait) | ||
247 | wait_event(clk->wait, !atomic_read(&clk->waiting)); | ||
248 | return 0; | ||
217 | } | 249 | } |
218 | 250 | ||
219 | static void | 251 | static void |
@@ -361,17 +393,40 @@ nouveau_clock_ustate_update(struct nouveau_clock *clk, int req) | |||
361 | req = i; | 393 | req = i; |
362 | } | 394 | } |
363 | 395 | ||
364 | clk->ustate = req; | 396 | return req + 2; |
365 | return 0; | 397 | } |
398 | |||
399 | static int | ||
400 | nouveau_clock_nstate(struct nouveau_clock *clk, const char *mode, int arglen) | ||
401 | { | ||
402 | int ret = 1; | ||
403 | |||
404 | if (strncasecmpz(mode, "disabled", arglen)) { | ||
405 | char save = mode[arglen]; | ||
406 | long v; | ||
407 | |||
408 | ((char *)mode)[arglen] = '\0'; | ||
409 | if (!kstrtol(mode, 0, &v)) { | ||
410 | ret = nouveau_clock_ustate_update(clk, v); | ||
411 | if (ret < 0) | ||
412 | ret = 1; | ||
413 | } | ||
414 | ((char *)mode)[arglen] = save; | ||
415 | } | ||
416 | |||
417 | return ret - 2; | ||
366 | } | 418 | } |
367 | 419 | ||
368 | int | 420 | int |
369 | nouveau_clock_ustate(struct nouveau_clock *clk, int req) | 421 | nouveau_clock_ustate(struct nouveau_clock *clk, int req, int pwr) |
370 | { | 422 | { |
371 | int ret = nouveau_clock_ustate_update(clk, req); | 423 | int ret = nouveau_clock_ustate_update(clk, req); |
372 | if (ret) | 424 | if (ret >= 0) { |
373 | return ret; | 425 | if (ret -= 2, pwr) clk->ustate_ac = ret; |
374 | return nouveau_pstate_calc(clk); | 426 | else clk->ustate_dc = ret; |
427 | return nouveau_pstate_calc(clk, true); | ||
428 | } | ||
429 | return ret; | ||
375 | } | 430 | } |
376 | 431 | ||
377 | int | 432 | int |
@@ -381,7 +436,7 @@ nouveau_clock_astate(struct nouveau_clock *clk, int req, int rel) | |||
381 | if ( rel) clk->astate += rel; | 436 | if ( rel) clk->astate += rel; |
382 | clk->astate = min(clk->astate, clk->state_nr - 1); | 437 | clk->astate = min(clk->astate, clk->state_nr - 1); |
383 | clk->astate = max(clk->astate, 0); | 438 | clk->astate = max(clk->astate, 0); |
384 | return nouveau_pstate_calc(clk); | 439 | return nouveau_pstate_calc(clk, true); |
385 | } | 440 | } |
386 | 441 | ||
387 | int | 442 | int |
@@ -391,7 +446,7 @@ nouveau_clock_tstate(struct nouveau_clock *clk, int req, int rel) | |||
391 | if ( rel) clk->tstate += rel; | 446 | if ( rel) clk->tstate += rel; |
392 | clk->tstate = min(clk->tstate, 0); | 447 | clk->tstate = min(clk->tstate, 0); |
393 | clk->tstate = max(clk->tstate, -(clk->state_nr - 1)); | 448 | clk->tstate = max(clk->tstate, -(clk->state_nr - 1)); |
394 | return nouveau_pstate_calc(clk); | 449 | return nouveau_pstate_calc(clk, true); |
395 | } | 450 | } |
396 | 451 | ||
397 | int | 452 | int |
@@ -401,12 +456,30 @@ nouveau_clock_dstate(struct nouveau_clock *clk, int req, int rel) | |||
401 | if ( rel) clk->dstate += rel; | 456 | if ( rel) clk->dstate += rel; |
402 | clk->dstate = min(clk->dstate, clk->state_nr - 1); | 457 | clk->dstate = min(clk->dstate, clk->state_nr - 1); |
403 | clk->dstate = max(clk->dstate, 0); | 458 | clk->dstate = max(clk->dstate, 0); |
404 | return nouveau_pstate_calc(clk); | 459 | return nouveau_pstate_calc(clk, true); |
460 | } | ||
461 | |||
462 | static int | ||
463 | nouveau_clock_pwrsrc(struct nvkm_notify *notify) | ||
464 | { | ||
465 | struct nouveau_clock *clk = | ||
466 | container_of(notify, typeof(*clk), pwrsrc_ntfy); | ||
467 | nouveau_pstate_calc(clk, false); | ||
468 | return NVKM_NOTIFY_DROP; | ||
405 | } | 469 | } |
406 | 470 | ||
407 | /****************************************************************************** | 471 | /****************************************************************************** |
408 | * subdev base class implementation | 472 | * subdev base class implementation |
409 | *****************************************************************************/ | 473 | *****************************************************************************/ |
474 | |||
475 | int | ||
476 | _nouveau_clock_fini(struct nouveau_object *object, bool suspend) | ||
477 | { | ||
478 | struct nouveau_clock *clk = (void *)object; | ||
479 | nvkm_notify_put(&clk->pwrsrc_ntfy); | ||
480 | return nouveau_subdev_fini(&clk->base, suspend); | ||
481 | } | ||
482 | |||
410 | int | 483 | int |
411 | _nouveau_clock_init(struct nouveau_object *object) | 484 | _nouveau_clock_init(struct nouveau_object *object) |
412 | { | 485 | { |
@@ -414,6 +487,10 @@ _nouveau_clock_init(struct nouveau_object *object) | |||
414 | struct nouveau_clocks *clock = clk->domains; | 487 | struct nouveau_clocks *clock = clk->domains; |
415 | int ret; | 488 | int ret; |
416 | 489 | ||
490 | ret = nouveau_subdev_init(&clk->base); | ||
491 | if (ret) | ||
492 | return ret; | ||
493 | |||
417 | memset(&clk->bstate, 0x00, sizeof(clk->bstate)); | 494 | memset(&clk->bstate, 0x00, sizeof(clk->bstate)); |
418 | INIT_LIST_HEAD(&clk->bstate.list); | 495 | INIT_LIST_HEAD(&clk->bstate.list); |
419 | clk->bstate.pstate = 0xff; | 496 | clk->bstate.pstate = 0xff; |
@@ -434,7 +511,7 @@ _nouveau_clock_init(struct nouveau_object *object) | |||
434 | clk->tstate = 0; | 511 | clk->tstate = 0; |
435 | clk->dstate = 0; | 512 | clk->dstate = 0; |
436 | clk->pstate = -1; | 513 | clk->pstate = -1; |
437 | nouveau_pstate_calc(clk); | 514 | nouveau_pstate_calc(clk, true); |
438 | return 0; | 515 | return 0; |
439 | } | 516 | } |
440 | 517 | ||
@@ -444,6 +521,8 @@ _nouveau_clock_dtor(struct nouveau_object *object) | |||
444 | struct nouveau_clock *clk = (void *)object; | 521 | struct nouveau_clock *clk = (void *)object; |
445 | struct nouveau_pstate *pstate, *temp; | 522 | struct nouveau_pstate *pstate, *temp; |
446 | 523 | ||
524 | nvkm_notify_fini(&clk->pwrsrc_ntfy); | ||
525 | |||
447 | list_for_each_entry_safe(pstate, temp, &clk->states, head) { | 526 | list_for_each_entry_safe(pstate, temp, &clk->states, head) { |
448 | nouveau_pstate_del(pstate); | 527 | nouveau_pstate_del(pstate); |
449 | } | 528 | } |
@@ -456,6 +535,7 @@ nouveau_clock_create_(struct nouveau_object *parent, | |||
456 | struct nouveau_object *engine, | 535 | struct nouveau_object *engine, |
457 | struct nouveau_oclass *oclass, | 536 | struct nouveau_oclass *oclass, |
458 | struct nouveau_clocks *clocks, | 537 | struct nouveau_clocks *clocks, |
538 | struct nouveau_pstate *pstates, int nb_pstates, | ||
459 | bool allow_reclock, | 539 | bool allow_reclock, |
460 | int length, void **object) | 540 | int length, void **object) |
461 | { | 541 | { |
@@ -472,29 +552,46 @@ nouveau_clock_create_(struct nouveau_object *parent, | |||
472 | 552 | ||
473 | INIT_LIST_HEAD(&clk->states); | 553 | INIT_LIST_HEAD(&clk->states); |
474 | clk->domains = clocks; | 554 | clk->domains = clocks; |
475 | clk->ustate = -1; | 555 | clk->ustate_ac = -1; |
556 | clk->ustate_dc = -1; | ||
557 | |||
558 | INIT_WORK(&clk->work, nouveau_pstate_work); | ||
559 | init_waitqueue_head(&clk->wait); | ||
560 | atomic_set(&clk->waiting, 0); | ||
476 | 561 | ||
477 | idx = 0; | 562 | /* If no pstates are provided, try and fetch them from the BIOS */ |
478 | do { | 563 | if (!pstates) { |
479 | ret = nouveau_pstate_new(clk, idx++); | 564 | idx = 0; |
480 | } while (ret == 0); | 565 | do { |
566 | ret = nouveau_pstate_new(clk, idx++); | ||
567 | } while (ret == 0); | ||
568 | } else { | ||
569 | for (idx = 0; idx < nb_pstates; idx++) | ||
570 | list_add_tail(&pstates[idx].head, &clk->states); | ||
571 | clk->state_nr = nb_pstates; | ||
572 | } | ||
481 | 573 | ||
482 | clk->allow_reclock = allow_reclock; | 574 | clk->allow_reclock = allow_reclock; |
483 | 575 | ||
576 | ret = nvkm_notify_init(&device->event, nouveau_clock_pwrsrc, true, | ||
577 | NULL, 0, 0, &clk->pwrsrc_ntfy); | ||
578 | if (ret) | ||
579 | return ret; | ||
580 | |||
484 | mode = nouveau_stropt(device->cfgopt, "NvClkMode", &arglen); | 581 | mode = nouveau_stropt(device->cfgopt, "NvClkMode", &arglen); |
485 | if (mode) { | 582 | if (mode) { |
486 | if (!strncasecmpz(mode, "disabled", arglen)) { | 583 | clk->ustate_ac = nouveau_clock_nstate(clk, mode, arglen); |
487 | clk->ustate = -1; | 584 | clk->ustate_dc = nouveau_clock_nstate(clk, mode, arglen); |
488 | } else { | ||
489 | char save = mode[arglen]; | ||
490 | long v; | ||
491 | |||
492 | ((char *)mode)[arglen] = '\0'; | ||
493 | if (!kstrtol(mode, 0, &v)) | ||
494 | nouveau_clock_ustate_update(clk, v); | ||
495 | ((char *)mode)[arglen] = save; | ||
496 | } | ||
497 | } | 585 | } |
498 | 586 | ||
587 | mode = nouveau_stropt(device->cfgopt, "NvClkModeAC", &arglen); | ||
588 | if (mode) | ||
589 | clk->ustate_ac = nouveau_clock_nstate(clk, mode, arglen); | ||
590 | |||
591 | mode = nouveau_stropt(device->cfgopt, "NvClkModeDC", &arglen); | ||
592 | if (mode) | ||
593 | clk->ustate_dc = nouveau_clock_nstate(clk, mode, arglen); | ||
594 | |||
595 | |||
499 | return 0; | 596 | return 0; |
500 | } | 597 | } |
diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/gk20a.c b/drivers/gpu/drm/nouveau/core/subdev/clock/gk20a.c new file mode 100644 index 000000000000..425a8d5e9129 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/subdev/clock/gk20a.c | |||
@@ -0,0 +1,665 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2014, NVIDIA CORPORATION. All rights reserved. | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice shall be included in | ||
12 | * all copies or substantial portions of the Software. | ||
13 | * | ||
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
17 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
18 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | ||
19 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER | ||
20 | * DEALINGS IN THE SOFTWARE. | ||
21 | * | ||
22 | * Shamelessly ripped off from ChromeOS's gk20a/clk_pllg.c | ||
23 | * | ||
24 | */ | ||
25 | |||
26 | #define MHZ (1000 * 1000) | ||
27 | |||
28 | #define MASK(w) ((1 << w) - 1) | ||
29 | |||
30 | #define SYS_GPCPLL_CFG_BASE 0x00137000 | ||
31 | #define GPC_BCASE_GPCPLL_CFG_BASE 0x00132800 | ||
32 | |||
33 | #define GPCPLL_CFG (SYS_GPCPLL_CFG_BASE + 0) | ||
34 | #define GPCPLL_CFG_ENABLE BIT(0) | ||
35 | #define GPCPLL_CFG_IDDQ BIT(1) | ||
36 | #define GPCPLL_CFG_LOCK_DET_OFF BIT(4) | ||
37 | #define GPCPLL_CFG_LOCK BIT(17) | ||
38 | |||
39 | #define GPCPLL_COEFF (SYS_GPCPLL_CFG_BASE + 4) | ||
40 | #define GPCPLL_COEFF_M_SHIFT 0 | ||
41 | #define GPCPLL_COEFF_M_WIDTH 8 | ||
42 | #define GPCPLL_COEFF_N_SHIFT 8 | ||
43 | #define GPCPLL_COEFF_N_WIDTH 8 | ||
44 | #define GPCPLL_COEFF_P_SHIFT 16 | ||
45 | #define GPCPLL_COEFF_P_WIDTH 6 | ||
46 | |||
47 | #define GPCPLL_CFG2 (SYS_GPCPLL_CFG_BASE + 0xc) | ||
48 | #define GPCPLL_CFG2_SETUP2_SHIFT 16 | ||
49 | #define GPCPLL_CFG2_PLL_STEPA_SHIFT 24 | ||
50 | |||
51 | #define GPCPLL_CFG3 (SYS_GPCPLL_CFG_BASE + 0x18) | ||
52 | #define GPCPLL_CFG3_PLL_STEPB_SHIFT 16 | ||
53 | |||
54 | #define GPCPLL_NDIV_SLOWDOWN (SYS_GPCPLL_CFG_BASE + 0x1c) | ||
55 | #define GPCPLL_NDIV_SLOWDOWN_NDIV_LO_SHIFT 0 | ||
56 | #define GPCPLL_NDIV_SLOWDOWN_NDIV_MID_SHIFT 8 | ||
57 | #define GPCPLL_NDIV_SLOWDOWN_STEP_SIZE_LO2MID_SHIFT 16 | ||
58 | #define GPCPLL_NDIV_SLOWDOWN_SLOWDOWN_USING_PLL_SHIFT 22 | ||
59 | #define GPCPLL_NDIV_SLOWDOWN_EN_DYNRAMP_SHIFT 31 | ||
60 | |||
61 | #define SEL_VCO (SYS_GPCPLL_CFG_BASE + 0x100) | ||
62 | #define SEL_VCO_GPC2CLK_OUT_SHIFT 0 | ||
63 | |||
64 | #define GPC2CLK_OUT (SYS_GPCPLL_CFG_BASE + 0x250) | ||
65 | #define GPC2CLK_OUT_SDIV14_INDIV4_WIDTH 1 | ||
66 | #define GPC2CLK_OUT_SDIV14_INDIV4_SHIFT 31 | ||
67 | #define GPC2CLK_OUT_SDIV14_INDIV4_MODE 1 | ||
68 | #define GPC2CLK_OUT_VCODIV_WIDTH 6 | ||
69 | #define GPC2CLK_OUT_VCODIV_SHIFT 8 | ||
70 | #define GPC2CLK_OUT_VCODIV1 0 | ||
71 | #define GPC2CLK_OUT_VCODIV_MASK (MASK(GPC2CLK_OUT_VCODIV_WIDTH) << \ | ||
72 | GPC2CLK_OUT_VCODIV_SHIFT) | ||
73 | #define GPC2CLK_OUT_BYPDIV_WIDTH 6 | ||
74 | #define GPC2CLK_OUT_BYPDIV_SHIFT 0 | ||
75 | #define GPC2CLK_OUT_BYPDIV31 0x3c | ||
76 | #define GPC2CLK_OUT_INIT_MASK ((MASK(GPC2CLK_OUT_SDIV14_INDIV4_WIDTH) << \ | ||
77 | GPC2CLK_OUT_SDIV14_INDIV4_SHIFT)\ | ||
78 | | (MASK(GPC2CLK_OUT_VCODIV_WIDTH) << GPC2CLK_OUT_VCODIV_SHIFT)\ | ||
79 | | (MASK(GPC2CLK_OUT_BYPDIV_WIDTH) << GPC2CLK_OUT_BYPDIV_SHIFT)) | ||
80 | #define GPC2CLK_OUT_INIT_VAL ((GPC2CLK_OUT_SDIV14_INDIV4_MODE << \ | ||
81 | GPC2CLK_OUT_SDIV14_INDIV4_SHIFT) \ | ||
82 | | (GPC2CLK_OUT_VCODIV1 << GPC2CLK_OUT_VCODIV_SHIFT) \ | ||
83 | | (GPC2CLK_OUT_BYPDIV31 << GPC2CLK_OUT_BYPDIV_SHIFT)) | ||
84 | |||
85 | #define GPC_BCAST_NDIV_SLOWDOWN_DEBUG (GPC_BCASE_GPCPLL_CFG_BASE + 0xa0) | ||
86 | #define GPC_BCAST_NDIV_SLOWDOWN_DEBUG_PLL_DYNRAMP_DONE_SYNCED_SHIFT 24 | ||
87 | #define GPC_BCAST_NDIV_SLOWDOWN_DEBUG_PLL_DYNRAMP_DONE_SYNCED_MASK \ | ||
88 | (0x1 << GPC_BCAST_NDIV_SLOWDOWN_DEBUG_PLL_DYNRAMP_DONE_SYNCED_SHIFT) | ||
89 | |||
90 | #include <subdev/clock.h> | ||
91 | #include <subdev/timer.h> | ||
92 | |||
93 | #ifdef __KERNEL__ | ||
94 | #include <nouveau_platform.h> | ||
95 | #endif | ||
96 | |||
97 | static const u8 pl_to_div[] = { | ||
98 | /* PL: 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14 */ | ||
99 | /* p: */ 1, 2, 3, 4, 5, 6, 8, 10, 12, 16, 12, 16, 20, 24, 32, | ||
100 | }; | ||
101 | |||
102 | /* All frequencies in Mhz */ | ||
103 | struct gk20a_clk_pllg_params { | ||
104 | u32 min_vco, max_vco; | ||
105 | u32 min_u, max_u; | ||
106 | u32 min_m, max_m; | ||
107 | u32 min_n, max_n; | ||
108 | u32 min_pl, max_pl; | ||
109 | }; | ||
110 | |||
111 | static const struct gk20a_clk_pllg_params gk20a_pllg_params = { | ||
112 | .min_vco = 1000, .max_vco = 1700, | ||
113 | .min_u = 12, .max_u = 38, | ||
114 | .min_m = 1, .max_m = 255, | ||
115 | .min_n = 8, .max_n = 255, | ||
116 | .min_pl = 1, .max_pl = 32, | ||
117 | }; | ||
118 | |||
119 | struct gk20a_clock_priv { | ||
120 | struct nouveau_clock base; | ||
121 | const struct gk20a_clk_pllg_params *params; | ||
122 | u32 m, n, pl; | ||
123 | u32 parent_rate; | ||
124 | }; | ||
125 | #define to_gk20a_clock(base) container_of(base, struct gk20a_clock_priv, base) | ||
126 | |||
127 | static void | ||
128 | gk20a_pllg_read_mnp(struct gk20a_clock_priv *priv) | ||
129 | { | ||
130 | u32 val; | ||
131 | |||
132 | val = nv_rd32(priv, GPCPLL_COEFF); | ||
133 | priv->m = (val >> GPCPLL_COEFF_M_SHIFT) & MASK(GPCPLL_COEFF_M_WIDTH); | ||
134 | priv->n = (val >> GPCPLL_COEFF_N_SHIFT) & MASK(GPCPLL_COEFF_N_WIDTH); | ||
135 | priv->pl = (val >> GPCPLL_COEFF_P_SHIFT) & MASK(GPCPLL_COEFF_P_WIDTH); | ||
136 | } | ||
137 | |||
138 | static u32 | ||
139 | gk20a_pllg_calc_rate(struct gk20a_clock_priv *priv) | ||
140 | { | ||
141 | u32 rate; | ||
142 | u32 divider; | ||
143 | |||
144 | rate = priv->parent_rate * priv->n; | ||
145 | divider = priv->m * pl_to_div[priv->pl]; | ||
146 | do_div(rate, divider); | ||
147 | |||
148 | return rate / 2; | ||
149 | } | ||
150 | |||
151 | static int | ||
152 | gk20a_pllg_calc_mnp(struct gk20a_clock_priv *priv, unsigned long rate) | ||
153 | { | ||
154 | u32 target_clk_f, ref_clk_f, target_freq; | ||
155 | u32 min_vco_f, max_vco_f; | ||
156 | u32 low_pl, high_pl, best_pl; | ||
157 | u32 target_vco_f, vco_f; | ||
158 | u32 best_m, best_n; | ||
159 | u32 u_f; | ||
160 | u32 m, n, n2; | ||
161 | u32 delta, lwv, best_delta = ~0; | ||
162 | u32 pl; | ||
163 | |||
164 | target_clk_f = rate * 2 / MHZ; | ||
165 | ref_clk_f = priv->parent_rate / MHZ; | ||
166 | |||
167 | max_vco_f = priv->params->max_vco; | ||
168 | min_vco_f = priv->params->min_vco; | ||
169 | best_m = priv->params->max_m; | ||
170 | best_n = priv->params->min_n; | ||
171 | best_pl = priv->params->min_pl; | ||
172 | |||
173 | target_vco_f = target_clk_f + target_clk_f / 50; | ||
174 | if (max_vco_f < target_vco_f) | ||
175 | max_vco_f = target_vco_f; | ||
176 | |||
177 | /* min_pl <= high_pl <= max_pl */ | ||
178 | high_pl = (max_vco_f + target_vco_f - 1) / target_vco_f; | ||
179 | high_pl = min(high_pl, priv->params->max_pl); | ||
180 | high_pl = max(high_pl, priv->params->min_pl); | ||
181 | |||
182 | /* min_pl <= low_pl <= max_pl */ | ||
183 | low_pl = min_vco_f / target_vco_f; | ||
184 | low_pl = min(low_pl, priv->params->max_pl); | ||
185 | low_pl = max(low_pl, priv->params->min_pl); | ||
186 | |||
187 | /* Find Indices of high_pl and low_pl */ | ||
188 | for (pl = 0; pl < ARRAY_SIZE(pl_to_div) - 1; pl++) { | ||
189 | if (pl_to_div[pl] >= low_pl) { | ||
190 | low_pl = pl; | ||
191 | break; | ||
192 | } | ||
193 | } | ||
194 | for (pl = 0; pl < ARRAY_SIZE(pl_to_div) - 1; pl++) { | ||
195 | if (pl_to_div[pl] >= high_pl) { | ||
196 | high_pl = pl; | ||
197 | break; | ||
198 | } | ||
199 | } | ||
200 | |||
201 | nv_debug(priv, "low_PL %d(div%d), high_PL %d(div%d)", low_pl, | ||
202 | pl_to_div[low_pl], high_pl, pl_to_div[high_pl]); | ||
203 | |||
204 | /* Select lowest possible VCO */ | ||
205 | for (pl = low_pl; pl <= high_pl; pl++) { | ||
206 | target_vco_f = target_clk_f * pl_to_div[pl]; | ||
207 | for (m = priv->params->min_m; m <= priv->params->max_m; m++) { | ||
208 | u_f = ref_clk_f / m; | ||
209 | |||
210 | if (u_f < priv->params->min_u) | ||
211 | break; | ||
212 | if (u_f > priv->params->max_u) | ||
213 | continue; | ||
214 | |||
215 | n = (target_vco_f * m) / ref_clk_f; | ||
216 | n2 = ((target_vco_f * m) + (ref_clk_f - 1)) / ref_clk_f; | ||
217 | |||
218 | if (n > priv->params->max_n) | ||
219 | break; | ||
220 | |||
221 | for (; n <= n2; n++) { | ||
222 | if (n < priv->params->min_n) | ||
223 | continue; | ||
224 | if (n > priv->params->max_n) | ||
225 | break; | ||
226 | |||
227 | vco_f = ref_clk_f * n / m; | ||
228 | |||
229 | if (vco_f >= min_vco_f && vco_f <= max_vco_f) { | ||
230 | lwv = (vco_f + (pl_to_div[pl] / 2)) | ||
231 | / pl_to_div[pl]; | ||
232 | delta = abs(lwv - target_clk_f); | ||
233 | |||
234 | if (delta < best_delta) { | ||
235 | best_delta = delta; | ||
236 | best_m = m; | ||
237 | best_n = n; | ||
238 | best_pl = pl; | ||
239 | |||
240 | if (best_delta == 0) | ||
241 | goto found_match; | ||
242 | } | ||
243 | } | ||
244 | } | ||
245 | } | ||
246 | } | ||
247 | |||
248 | found_match: | ||
249 | WARN_ON(best_delta == ~0); | ||
250 | |||
251 | if (best_delta != 0) | ||
252 | nv_debug(priv, "no best match for target @ %dMHz on gpc_pll", | ||
253 | target_clk_f); | ||
254 | |||
255 | priv->m = best_m; | ||
256 | priv->n = best_n; | ||
257 | priv->pl = best_pl; | ||
258 | |||
259 | target_freq = gk20a_pllg_calc_rate(priv) / MHZ; | ||
260 | |||
261 | nv_debug(priv, "actual target freq %d MHz, M %d, N %d, PL %d(div%d)\n", | ||
262 | target_freq, priv->m, priv->n, priv->pl, pl_to_div[priv->pl]); | ||
263 | |||
264 | return 0; | ||
265 | } | ||
266 | |||
267 | static int | ||
268 | gk20a_pllg_slide(struct gk20a_clock_priv *priv, u32 n) | ||
269 | { | ||
270 | u32 val; | ||
271 | int ramp_timeout; | ||
272 | |||
273 | /* get old coefficients */ | ||
274 | val = nv_rd32(priv, GPCPLL_COEFF); | ||
275 | /* do nothing if NDIV is the same */ | ||
276 | if (n == ((val >> GPCPLL_COEFF_N_SHIFT) & MASK(GPCPLL_COEFF_N_WIDTH))) | ||
277 | return 0; | ||
278 | |||
279 | /* setup */ | ||
280 | nv_mask(priv, GPCPLL_CFG2, 0xff << GPCPLL_CFG2_PLL_STEPA_SHIFT, | ||
281 | 0x2b << GPCPLL_CFG2_PLL_STEPA_SHIFT); | ||
282 | nv_mask(priv, GPCPLL_CFG3, 0xff << GPCPLL_CFG3_PLL_STEPB_SHIFT, | ||
283 | 0xb << GPCPLL_CFG3_PLL_STEPB_SHIFT); | ||
284 | |||
285 | /* pll slowdown mode */ | ||
286 | nv_mask(priv, GPCPLL_NDIV_SLOWDOWN, | ||
287 | BIT(GPCPLL_NDIV_SLOWDOWN_SLOWDOWN_USING_PLL_SHIFT), | ||
288 | BIT(GPCPLL_NDIV_SLOWDOWN_SLOWDOWN_USING_PLL_SHIFT)); | ||
289 | |||
290 | /* new ndiv ready for ramp */ | ||
291 | val = nv_rd32(priv, GPCPLL_COEFF); | ||
292 | val &= ~(MASK(GPCPLL_COEFF_N_WIDTH) << GPCPLL_COEFF_N_SHIFT); | ||
293 | val |= (n & MASK(GPCPLL_COEFF_N_WIDTH)) << GPCPLL_COEFF_N_SHIFT; | ||
294 | udelay(1); | ||
295 | nv_wr32(priv, GPCPLL_COEFF, val); | ||
296 | |||
297 | /* dynamic ramp to new ndiv */ | ||
298 | val = nv_rd32(priv, GPCPLL_NDIV_SLOWDOWN); | ||
299 | val |= 0x1 << GPCPLL_NDIV_SLOWDOWN_EN_DYNRAMP_SHIFT; | ||
300 | udelay(1); | ||
301 | nv_wr32(priv, GPCPLL_NDIV_SLOWDOWN, val); | ||
302 | |||
303 | for (ramp_timeout = 500; ramp_timeout > 0; ramp_timeout--) { | ||
304 | udelay(1); | ||
305 | val = nv_rd32(priv, GPC_BCAST_NDIV_SLOWDOWN_DEBUG); | ||
306 | if (val & GPC_BCAST_NDIV_SLOWDOWN_DEBUG_PLL_DYNRAMP_DONE_SYNCED_MASK) | ||
307 | break; | ||
308 | } | ||
309 | |||
310 | /* exit slowdown mode */ | ||
311 | nv_mask(priv, GPCPLL_NDIV_SLOWDOWN, | ||
312 | BIT(GPCPLL_NDIV_SLOWDOWN_SLOWDOWN_USING_PLL_SHIFT) | | ||
313 | BIT(GPCPLL_NDIV_SLOWDOWN_EN_DYNRAMP_SHIFT), 0); | ||
314 | nv_rd32(priv, GPCPLL_NDIV_SLOWDOWN); | ||
315 | |||
316 | if (ramp_timeout <= 0) { | ||
317 | nv_error(priv, "gpcpll dynamic ramp timeout\n"); | ||
318 | return -ETIMEDOUT; | ||
319 | } | ||
320 | |||
321 | return 0; | ||
322 | } | ||
323 | |||
324 | static void | ||
325 | _gk20a_pllg_enable(struct gk20a_clock_priv *priv) | ||
326 | { | ||
327 | nv_mask(priv, GPCPLL_CFG, GPCPLL_CFG_ENABLE, GPCPLL_CFG_ENABLE); | ||
328 | nv_rd32(priv, GPCPLL_CFG); | ||
329 | } | ||
330 | |||
331 | static void | ||
332 | _gk20a_pllg_disable(struct gk20a_clock_priv *priv) | ||
333 | { | ||
334 | nv_mask(priv, GPCPLL_CFG, GPCPLL_CFG_ENABLE, 0); | ||
335 | nv_rd32(priv, GPCPLL_CFG); | ||
336 | } | ||
337 | |||
338 | static int | ||
339 | _gk20a_pllg_program_mnp(struct gk20a_clock_priv *priv, bool allow_slide) | ||
340 | { | ||
341 | u32 val, cfg; | ||
342 | u32 m_old, pl_old, n_lo; | ||
343 | |||
344 | /* get old coefficients */ | ||
345 | val = nv_rd32(priv, GPCPLL_COEFF); | ||
346 | m_old = (val >> GPCPLL_COEFF_M_SHIFT) & MASK(GPCPLL_COEFF_M_WIDTH); | ||
347 | pl_old = (val >> GPCPLL_COEFF_P_SHIFT) & MASK(GPCPLL_COEFF_P_WIDTH); | ||
348 | |||
349 | /* do NDIV slide if there is no change in M and PL */ | ||
350 | cfg = nv_rd32(priv, GPCPLL_CFG); | ||
351 | if (allow_slide && priv->m == m_old && priv->pl == pl_old && | ||
352 | (cfg & GPCPLL_CFG_ENABLE)) { | ||
353 | return gk20a_pllg_slide(priv, priv->n); | ||
354 | } | ||
355 | |||
356 | /* slide down to NDIV_LO */ | ||
357 | n_lo = DIV_ROUND_UP(m_old * priv->params->min_vco, | ||
358 | priv->parent_rate / MHZ); | ||
359 | if (allow_slide && (cfg & GPCPLL_CFG_ENABLE)) { | ||
360 | int ret = gk20a_pllg_slide(priv, n_lo); | ||
361 | |||
362 | if (ret) | ||
363 | return ret; | ||
364 | } | ||
365 | |||
366 | /* split FO-to-bypass jump in halfs by setting out divider 1:2 */ | ||
367 | nv_mask(priv, GPC2CLK_OUT, GPC2CLK_OUT_VCODIV_MASK, | ||
368 | 0x2 << GPC2CLK_OUT_VCODIV_SHIFT); | ||
369 | |||
370 | /* put PLL in bypass before programming it */ | ||
371 | val = nv_rd32(priv, SEL_VCO); | ||
372 | val &= ~(BIT(SEL_VCO_GPC2CLK_OUT_SHIFT)); | ||
373 | udelay(2); | ||
374 | nv_wr32(priv, SEL_VCO, val); | ||
375 | |||
376 | /* get out from IDDQ */ | ||
377 | val = nv_rd32(priv, GPCPLL_CFG); | ||
378 | if (val & GPCPLL_CFG_IDDQ) { | ||
379 | val &= ~GPCPLL_CFG_IDDQ; | ||
380 | nv_wr32(priv, GPCPLL_CFG, val); | ||
381 | nv_rd32(priv, GPCPLL_CFG); | ||
382 | udelay(2); | ||
383 | } | ||
384 | |||
385 | _gk20a_pllg_disable(priv); | ||
386 | |||
387 | nv_debug(priv, "%s: m=%d n=%d pl=%d\n", __func__, priv->m, priv->n, | ||
388 | priv->pl); | ||
389 | |||
390 | n_lo = DIV_ROUND_UP(priv->m * priv->params->min_vco, | ||
391 | priv->parent_rate / MHZ); | ||
392 | val = priv->m << GPCPLL_COEFF_M_SHIFT; | ||
393 | val |= (allow_slide ? n_lo : priv->n) << GPCPLL_COEFF_N_SHIFT; | ||
394 | val |= priv->pl << GPCPLL_COEFF_P_SHIFT; | ||
395 | nv_wr32(priv, GPCPLL_COEFF, val); | ||
396 | |||
397 | _gk20a_pllg_enable(priv); | ||
398 | |||
399 | val = nv_rd32(priv, GPCPLL_CFG); | ||
400 | if (val & GPCPLL_CFG_LOCK_DET_OFF) { | ||
401 | val &= ~GPCPLL_CFG_LOCK_DET_OFF; | ||
402 | nv_wr32(priv, GPCPLL_CFG, val); | ||
403 | } | ||
404 | |||
405 | if (!nouveau_timer_wait_eq(priv, 300000, GPCPLL_CFG, GPCPLL_CFG_LOCK, | ||
406 | GPCPLL_CFG_LOCK)) { | ||
407 | nv_error(priv, "%s: timeout waiting for pllg lock\n", __func__); | ||
408 | return -ETIMEDOUT; | ||
409 | } | ||
410 | |||
411 | /* switch to VCO mode */ | ||
412 | nv_mask(priv, SEL_VCO, 0, BIT(SEL_VCO_GPC2CLK_OUT_SHIFT)); | ||
413 | |||
414 | /* restore out divider 1:1 */ | ||
415 | val = nv_rd32(priv, GPC2CLK_OUT); | ||
416 | val &= ~GPC2CLK_OUT_VCODIV_MASK; | ||
417 | udelay(2); | ||
418 | nv_wr32(priv, GPC2CLK_OUT, val); | ||
419 | |||
420 | /* slide up to new NDIV */ | ||
421 | return allow_slide ? gk20a_pllg_slide(priv, priv->n) : 0; | ||
422 | } | ||
423 | |||
424 | static int | ||
425 | gk20a_pllg_program_mnp(struct gk20a_clock_priv *priv) | ||
426 | { | ||
427 | int err; | ||
428 | |||
429 | err = _gk20a_pllg_program_mnp(priv, true); | ||
430 | if (err) | ||
431 | err = _gk20a_pllg_program_mnp(priv, false); | ||
432 | |||
433 | return err; | ||
434 | } | ||
435 | |||
436 | static void | ||
437 | gk20a_pllg_disable(struct gk20a_clock_priv *priv) | ||
438 | { | ||
439 | u32 val; | ||
440 | |||
441 | /* slide to VCO min */ | ||
442 | val = nv_rd32(priv, GPCPLL_CFG); | ||
443 | if (val & GPCPLL_CFG_ENABLE) { | ||
444 | u32 coeff, m, n_lo; | ||
445 | |||
446 | coeff = nv_rd32(priv, GPCPLL_COEFF); | ||
447 | m = (coeff >> GPCPLL_COEFF_M_SHIFT) & MASK(GPCPLL_COEFF_M_WIDTH); | ||
448 | n_lo = DIV_ROUND_UP(m * priv->params->min_vco, | ||
449 | priv->parent_rate / MHZ); | ||
450 | gk20a_pllg_slide(priv, n_lo); | ||
451 | } | ||
452 | |||
453 | /* put PLL in bypass before disabling it */ | ||
454 | nv_mask(priv, SEL_VCO, BIT(SEL_VCO_GPC2CLK_OUT_SHIFT), 0); | ||
455 | |||
456 | _gk20a_pllg_disable(priv); | ||
457 | } | ||
458 | |||
459 | #define GK20A_CLK_GPC_MDIV 1000 | ||
460 | |||
461 | static struct nouveau_clocks | ||
462 | gk20a_domains[] = { | ||
463 | { nv_clk_src_crystal, 0xff }, | ||
464 | { nv_clk_src_gpc, 0xff, 0, "core", GK20A_CLK_GPC_MDIV }, | ||
465 | { nv_clk_src_max } | ||
466 | }; | ||
467 | |||
468 | static struct nouveau_pstate | ||
469 | gk20a_pstates[] = { | ||
470 | { | ||
471 | .base = { | ||
472 | .domain[nv_clk_src_gpc] = 72000, | ||
473 | }, | ||
474 | }, | ||
475 | { | ||
476 | .base = { | ||
477 | .domain[nv_clk_src_gpc] = 108000, | ||
478 | }, | ||
479 | }, | ||
480 | { | ||
481 | .base = { | ||
482 | .domain[nv_clk_src_gpc] = 180000, | ||
483 | }, | ||
484 | }, | ||
485 | { | ||
486 | .base = { | ||
487 | .domain[nv_clk_src_gpc] = 252000, | ||
488 | }, | ||
489 | }, | ||
490 | { | ||
491 | .base = { | ||
492 | .domain[nv_clk_src_gpc] = 324000, | ||
493 | }, | ||
494 | }, | ||
495 | { | ||
496 | .base = { | ||
497 | .domain[nv_clk_src_gpc] = 396000, | ||
498 | }, | ||
499 | }, | ||
500 | { | ||
501 | .base = { | ||
502 | .domain[nv_clk_src_gpc] = 468000, | ||
503 | }, | ||
504 | }, | ||
505 | { | ||
506 | .base = { | ||
507 | .domain[nv_clk_src_gpc] = 540000, | ||
508 | }, | ||
509 | }, | ||
510 | { | ||
511 | .base = { | ||
512 | .domain[nv_clk_src_gpc] = 612000, | ||
513 | }, | ||
514 | }, | ||
515 | { | ||
516 | .base = { | ||
517 | .domain[nv_clk_src_gpc] = 648000, | ||
518 | }, | ||
519 | }, | ||
520 | { | ||
521 | .base = { | ||
522 | .domain[nv_clk_src_gpc] = 684000, | ||
523 | }, | ||
524 | }, | ||
525 | { | ||
526 | .base = { | ||
527 | .domain[nv_clk_src_gpc] = 708000, | ||
528 | }, | ||
529 | }, | ||
530 | { | ||
531 | .base = { | ||
532 | .domain[nv_clk_src_gpc] = 756000, | ||
533 | }, | ||
534 | }, | ||
535 | { | ||
536 | .base = { | ||
537 | .domain[nv_clk_src_gpc] = 804000, | ||
538 | }, | ||
539 | }, | ||
540 | { | ||
541 | .base = { | ||
542 | .domain[nv_clk_src_gpc] = 852000, | ||
543 | }, | ||
544 | }, | ||
545 | }; | ||
546 | |||
547 | static int | ||
548 | gk20a_clock_read(struct nouveau_clock *clk, enum nv_clk_src src) | ||
549 | { | ||
550 | struct gk20a_clock_priv *priv = (void *)clk; | ||
551 | |||
552 | switch (src) { | ||
553 | case nv_clk_src_crystal: | ||
554 | return nv_device(clk)->crystal; | ||
555 | case nv_clk_src_gpc: | ||
556 | gk20a_pllg_read_mnp(priv); | ||
557 | return gk20a_pllg_calc_rate(priv) / GK20A_CLK_GPC_MDIV; | ||
558 | default: | ||
559 | nv_error(clk, "invalid clock source %d\n", src); | ||
560 | return -EINVAL; | ||
561 | } | ||
562 | } | ||
563 | |||
564 | static int | ||
565 | gk20a_clock_calc(struct nouveau_clock *clk, struct nouveau_cstate *cstate) | ||
566 | { | ||
567 | struct gk20a_clock_priv *priv = (void *)clk; | ||
568 | |||
569 | return gk20a_pllg_calc_mnp(priv, cstate->domain[nv_clk_src_gpc] * | ||
570 | GK20A_CLK_GPC_MDIV); | ||
571 | } | ||
572 | |||
573 | static int | ||
574 | gk20a_clock_prog(struct nouveau_clock *clk) | ||
575 | { | ||
576 | struct gk20a_clock_priv *priv = (void *)clk; | ||
577 | |||
578 | return gk20a_pllg_program_mnp(priv); | ||
579 | } | ||
580 | |||
581 | static void | ||
582 | gk20a_clock_tidy(struct nouveau_clock *clk) | ||
583 | { | ||
584 | } | ||
585 | |||
586 | static int | ||
587 | gk20a_clock_fini(struct nouveau_object *object, bool suspend) | ||
588 | { | ||
589 | struct gk20a_clock_priv *priv = (void *)object; | ||
590 | int ret; | ||
591 | |||
592 | ret = nouveau_clock_fini(&priv->base, false); | ||
593 | |||
594 | gk20a_pllg_disable(priv); | ||
595 | |||
596 | return ret; | ||
597 | } | ||
598 | |||
599 | static int | ||
600 | gk20a_clock_init(struct nouveau_object *object) | ||
601 | { | ||
602 | struct gk20a_clock_priv *priv = (void *)object; | ||
603 | int ret; | ||
604 | |||
605 | nv_mask(priv, GPC2CLK_OUT, GPC2CLK_OUT_INIT_MASK, GPC2CLK_OUT_INIT_VAL); | ||
606 | |||
607 | ret = nouveau_clock_init(&priv->base); | ||
608 | if (ret) | ||
609 | return ret; | ||
610 | |||
611 | ret = gk20a_clock_prog(&priv->base); | ||
612 | if (ret) { | ||
613 | nv_error(priv, "cannot initialize clock\n"); | ||
614 | return ret; | ||
615 | } | ||
616 | |||
617 | return 0; | ||
618 | } | ||
619 | |||
620 | static int | ||
621 | gk20a_clock_ctor(struct nouveau_object *parent, struct nouveau_object *engine, | ||
622 | struct nouveau_oclass *oclass, void *data, u32 size, | ||
623 | struct nouveau_object **pobject) | ||
624 | { | ||
625 | struct gk20a_clock_priv *priv; | ||
626 | struct nouveau_platform_device *plat; | ||
627 | int ret; | ||
628 | int i; | ||
629 | |||
630 | /* Finish initializing the pstates */ | ||
631 | for (i = 0; i < ARRAY_SIZE(gk20a_pstates); i++) { | ||
632 | INIT_LIST_HEAD(&gk20a_pstates[i].list); | ||
633 | gk20a_pstates[i].pstate = i + 1; | ||
634 | } | ||
635 | |||
636 | ret = nouveau_clock_create(parent, engine, oclass, gk20a_domains, | ||
637 | gk20a_pstates, ARRAY_SIZE(gk20a_pstates), true, &priv); | ||
638 | *pobject = nv_object(priv); | ||
639 | if (ret) | ||
640 | return ret; | ||
641 | |||
642 | priv->params = &gk20a_pllg_params; | ||
643 | |||
644 | plat = nv_device_to_platform(nv_device(parent)); | ||
645 | priv->parent_rate = clk_get_rate(plat->gpu->clk); | ||
646 | nv_info(priv, "parent clock rate: %d Mhz\n", priv->parent_rate / MHZ); | ||
647 | |||
648 | priv->base.read = gk20a_clock_read; | ||
649 | priv->base.calc = gk20a_clock_calc; | ||
650 | priv->base.prog = gk20a_clock_prog; | ||
651 | priv->base.tidy = gk20a_clock_tidy; | ||
652 | |||
653 | return 0; | ||
654 | } | ||
655 | |||
656 | struct nouveau_oclass | ||
657 | gk20a_clock_oclass = { | ||
658 | .handle = NV_SUBDEV(CLOCK, 0xea), | ||
659 | .ofuncs = &(struct nouveau_ofuncs) { | ||
660 | .ctor = gk20a_clock_ctor, | ||
661 | .dtor = _nouveau_subdev_dtor, | ||
662 | .init = gk20a_clock_init, | ||
663 | .fini = gk20a_clock_fini, | ||
664 | }, | ||
665 | }; | ||
diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/nv04.c b/drivers/gpu/drm/nouveau/core/subdev/clock/nv04.c index eb2d4425a49e..4c48232686be 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/clock/nv04.c +++ b/drivers/gpu/drm/nouveau/core/subdev/clock/nv04.c | |||
@@ -82,8 +82,8 @@ nv04_clock_ctor(struct nouveau_object *parent, struct nouveau_object *engine, | |||
82 | struct nv04_clock_priv *priv; | 82 | struct nv04_clock_priv *priv; |
83 | int ret; | 83 | int ret; |
84 | 84 | ||
85 | ret = nouveau_clock_create(parent, engine, oclass, nv04_domain, false, | 85 | ret = nouveau_clock_create(parent, engine, oclass, nv04_domain, NULL, 0, |
86 | &priv); | 86 | false, &priv); |
87 | *pobject = nv_object(priv); | 87 | *pobject = nv_object(priv); |
88 | if (ret) | 88 | if (ret) |
89 | return ret; | 89 | return ret; |
diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/nv40.c b/drivers/gpu/drm/nouveau/core/subdev/clock/nv40.c index 8a9e16839791..08368fe97029 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/clock/nv40.c +++ b/drivers/gpu/drm/nouveau/core/subdev/clock/nv40.c | |||
@@ -213,8 +213,8 @@ nv40_clock_ctor(struct nouveau_object *parent, struct nouveau_object *engine, | |||
213 | struct nv40_clock_priv *priv; | 213 | struct nv40_clock_priv *priv; |
214 | int ret; | 214 | int ret; |
215 | 215 | ||
216 | ret = nouveau_clock_create(parent, engine, oclass, nv40_domain, true, | 216 | ret = nouveau_clock_create(parent, engine, oclass, nv40_domain, NULL, 0, |
217 | &priv); | 217 | true, &priv); |
218 | *pobject = nv_object(priv); | 218 | *pobject = nv_object(priv); |
219 | if (ret) | 219 | if (ret) |
220 | return ret; | 220 | return ret; |
diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/clock/nv50.c index 8c132772ba9e..5070ebc260f8 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/clock/nv50.c +++ b/drivers/gpu/drm/nouveau/core/subdev/clock/nv50.c | |||
@@ -507,7 +507,7 @@ nv50_clock_ctor(struct nouveau_object *parent, struct nouveau_object *engine, | |||
507 | int ret; | 507 | int ret; |
508 | 508 | ||
509 | ret = nouveau_clock_create(parent, engine, oclass, pclass->domains, | 509 | ret = nouveau_clock_create(parent, engine, oclass, pclass->domains, |
510 | false, &priv); | 510 | NULL, 0, false, &priv); |
511 | *pobject = nv_object(priv); | 511 | *pobject = nv_object(priv); |
512 | if (ret) | 512 | if (ret) |
513 | return ret; | 513 | return ret; |
diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/nva3.c b/drivers/gpu/drm/nouveau/core/subdev/clock/nva3.c index 9fb58354a80b..087012b18956 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/clock/nva3.c +++ b/drivers/gpu/drm/nouveau/core/subdev/clock/nva3.c | |||
@@ -302,8 +302,8 @@ nva3_clock_ctor(struct nouveau_object *parent, struct nouveau_object *engine, | |||
302 | struct nva3_clock_priv *priv; | 302 | struct nva3_clock_priv *priv; |
303 | int ret; | 303 | int ret; |
304 | 304 | ||
305 | ret = nouveau_clock_create(parent, engine, oclass, nva3_domain, false, | 305 | ret = nouveau_clock_create(parent, engine, oclass, nva3_domain, NULL, 0, |
306 | &priv); | 306 | false, &priv); |
307 | *pobject = nv_object(priv); | 307 | *pobject = nv_object(priv); |
308 | if (ret) | 308 | if (ret) |
309 | return ret; | 309 | return ret; |
diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/nvaa.c b/drivers/gpu/drm/nouveau/core/subdev/clock/nvaa.c index 6a65fc9e9663..74e19731b1b7 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/clock/nvaa.c +++ b/drivers/gpu/drm/nouveau/core/subdev/clock/nvaa.c | |||
@@ -421,8 +421,8 @@ nvaa_clock_ctor(struct nouveau_object *parent, struct nouveau_object *engine, | |||
421 | struct nvaa_clock_priv *priv; | 421 | struct nvaa_clock_priv *priv; |
422 | int ret; | 422 | int ret; |
423 | 423 | ||
424 | ret = nouveau_clock_create(parent, engine, oclass, nvaa_domains, true, | 424 | ret = nouveau_clock_create(parent, engine, oclass, nvaa_domains, NULL, |
425 | &priv); | 425 | 0, true, &priv); |
426 | *pobject = nv_object(priv); | 426 | *pobject = nv_object(priv); |
427 | if (ret) | 427 | if (ret) |
428 | return ret; | 428 | return ret; |
diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/clock/nvc0.c index dbf8517f54da..1234abaab2db 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/clock/nvc0.c +++ b/drivers/gpu/drm/nouveau/core/subdev/clock/nvc0.c | |||
@@ -437,8 +437,8 @@ nvc0_clock_ctor(struct nouveau_object *parent, struct nouveau_object *engine, | |||
437 | struct nvc0_clock_priv *priv; | 437 | struct nvc0_clock_priv *priv; |
438 | int ret; | 438 | int ret; |
439 | 439 | ||
440 | ret = nouveau_clock_create(parent, engine, oclass, nvc0_domain, false, | 440 | ret = nouveau_clock_create(parent, engine, oclass, nvc0_domain, NULL, 0, |
441 | &priv); | 441 | false, &priv); |
442 | *pobject = nv_object(priv); | 442 | *pobject = nv_object(priv); |
443 | if (ret) | 443 | if (ret) |
444 | return ret; | 444 | return ret; |
diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/nve0.c b/drivers/gpu/drm/nouveau/core/subdev/clock/nve0.c index 0e62a3240144..7eccad57512e 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/clock/nve0.c +++ b/drivers/gpu/drm/nouveau/core/subdev/clock/nve0.c | |||
@@ -475,8 +475,8 @@ nve0_clock_ctor(struct nouveau_object *parent, struct nouveau_object *engine, | |||
475 | struct nve0_clock_priv *priv; | 475 | struct nve0_clock_priv *priv; |
476 | int ret; | 476 | int ret; |
477 | 477 | ||
478 | ret = nouveau_clock_create(parent, engine, oclass, nve0_domain, true, | 478 | ret = nouveau_clock_create(parent, engine, oclass, nve0_domain, NULL, 0, |
479 | &priv); | 479 | true, &priv); |
480 | *pobject = nv_object(priv); | 480 | *pobject = nv_object(priv); |
481 | if (ret) | 481 | if (ret) |
482 | return ret; | 482 | return ret; |
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv50.c index 1fc55c1e91a1..4150b0d10af8 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/fb/nv50.c +++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv50.c | |||
@@ -250,9 +250,11 @@ nv50_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine, | |||
250 | 250 | ||
251 | priv->r100c08_page = alloc_page(GFP_KERNEL | __GFP_ZERO); | 251 | priv->r100c08_page = alloc_page(GFP_KERNEL | __GFP_ZERO); |
252 | if (priv->r100c08_page) { | 252 | if (priv->r100c08_page) { |
253 | priv->r100c08 = nv_device_map_page(device, priv->r100c08_page); | 253 | priv->r100c08 = dma_map_page(nv_device_base(device), |
254 | if (!priv->r100c08) | 254 | priv->r100c08_page, 0, PAGE_SIZE, |
255 | nv_warn(priv, "failed 0x100c08 page map\n"); | 255 | DMA_BIDIRECTIONAL); |
256 | if (dma_mapping_error(nv_device_base(device), priv->r100c08)) | ||
257 | return -EFAULT; | ||
256 | } else { | 258 | } else { |
257 | nv_warn(priv, "failed 0x100c08 page alloc\n"); | 259 | nv_warn(priv, "failed 0x100c08 page alloc\n"); |
258 | } | 260 | } |
@@ -268,7 +270,8 @@ nv50_fb_dtor(struct nouveau_object *object) | |||
268 | struct nv50_fb_priv *priv = (void *)object; | 270 | struct nv50_fb_priv *priv = (void *)object; |
269 | 271 | ||
270 | if (priv->r100c08_page) { | 272 | if (priv->r100c08_page) { |
271 | nv_device_unmap_page(device, priv->r100c08); | 273 | dma_unmap_page(nv_device_base(device), priv->r100c08, PAGE_SIZE, |
274 | DMA_BIDIRECTIONAL); | ||
272 | __free_page(priv->r100c08_page); | 275 | __free_page(priv->r100c08_page); |
273 | } | 276 | } |
274 | 277 | ||
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.c index 0670ae33ee45..b19a2b3c1081 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.c +++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.c | |||
@@ -70,7 +70,8 @@ nvc0_fb_dtor(struct nouveau_object *object) | |||
70 | struct nvc0_fb_priv *priv = (void *)object; | 70 | struct nvc0_fb_priv *priv = (void *)object; |
71 | 71 | ||
72 | if (priv->r100c10_page) { | 72 | if (priv->r100c10_page) { |
73 | nv_device_unmap_page(device, priv->r100c10); | 73 | dma_unmap_page(nv_device_base(device), priv->r100c10, PAGE_SIZE, |
74 | DMA_BIDIRECTIONAL); | ||
74 | __free_page(priv->r100c10_page); | 75 | __free_page(priv->r100c10_page); |
75 | } | 76 | } |
76 | 77 | ||
@@ -93,8 +94,10 @@ nvc0_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine, | |||
93 | 94 | ||
94 | priv->r100c10_page = alloc_page(GFP_KERNEL | __GFP_ZERO); | 95 | priv->r100c10_page = alloc_page(GFP_KERNEL | __GFP_ZERO); |
95 | if (priv->r100c10_page) { | 96 | if (priv->r100c10_page) { |
96 | priv->r100c10 = nv_device_map_page(device, priv->r100c10_page); | 97 | priv->r100c10 = dma_map_page(nv_device_base(device), |
97 | if (!priv->r100c10) | 98 | priv->r100c10_page, 0, PAGE_SIZE, |
99 | DMA_BIDIRECTIONAL); | ||
100 | if (dma_mapping_error(nv_device_base(device), priv->r100c10)) | ||
98 | return -EFAULT; | 101 | return -EFAULT; |
99 | } | 102 | } |
100 | 103 | ||
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnvc0.c b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnvc0.c index 5a6a5027f749..946518572346 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnvc0.c +++ b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnvc0.c | |||
@@ -26,7 +26,7 @@ | |||
26 | #include <subdev/bios/pll.h> | 26 | #include <subdev/bios/pll.h> |
27 | #include <subdev/bios/rammap.h> | 27 | #include <subdev/bios/rammap.h> |
28 | #include <subdev/bios/timing.h> | 28 | #include <subdev/bios/timing.h> |
29 | #include <subdev/ltcg.h> | 29 | #include <subdev/ltc.h> |
30 | 30 | ||
31 | #include <subdev/clock.h> | 31 | #include <subdev/clock.h> |
32 | #include <subdev/clock/pll.h> | 32 | #include <subdev/clock/pll.h> |
@@ -425,7 +425,7 @@ extern const u8 nvc0_pte_storage_type_map[256]; | |||
425 | void | 425 | void |
426 | nvc0_ram_put(struct nouveau_fb *pfb, struct nouveau_mem **pmem) | 426 | nvc0_ram_put(struct nouveau_fb *pfb, struct nouveau_mem **pmem) |
427 | { | 427 | { |
428 | struct nouveau_ltcg *ltcg = nouveau_ltcg(pfb); | 428 | struct nouveau_ltc *ltc = nouveau_ltc(pfb); |
429 | struct nouveau_mem *mem = *pmem; | 429 | struct nouveau_mem *mem = *pmem; |
430 | 430 | ||
431 | *pmem = NULL; | 431 | *pmem = NULL; |
@@ -434,7 +434,7 @@ nvc0_ram_put(struct nouveau_fb *pfb, struct nouveau_mem **pmem) | |||
434 | 434 | ||
435 | mutex_lock(&pfb->base.mutex); | 435 | mutex_lock(&pfb->base.mutex); |
436 | if (mem->tag) | 436 | if (mem->tag) |
437 | ltcg->tags_free(ltcg, &mem->tag); | 437 | ltc->tags_free(ltc, &mem->tag); |
438 | __nv50_ram_put(pfb, mem); | 438 | __nv50_ram_put(pfb, mem); |
439 | mutex_unlock(&pfb->base.mutex); | 439 | mutex_unlock(&pfb->base.mutex); |
440 | 440 | ||
@@ -468,12 +468,12 @@ nvc0_ram_get(struct nouveau_fb *pfb, u64 size, u32 align, u32 ncmin, | |||
468 | 468 | ||
469 | mutex_lock(&pfb->base.mutex); | 469 | mutex_lock(&pfb->base.mutex); |
470 | if (comp) { | 470 | if (comp) { |
471 | struct nouveau_ltcg *ltcg = nouveau_ltcg(pfb); | 471 | struct nouveau_ltc *ltc = nouveau_ltc(pfb); |
472 | 472 | ||
473 | /* compression only works with lpages */ | 473 | /* compression only works with lpages */ |
474 | if (align == (1 << (17 - 12))) { | 474 | if (align == (1 << (17 - 12))) { |
475 | int n = size >> 5; | 475 | int n = size >> 5; |
476 | ltcg->tags_alloc(ltcg, n, &mem->tag); | 476 | ltc->tags_alloc(ltc, n, &mem->tag); |
477 | } | 477 | } |
478 | 478 | ||
479 | if (unlikely(!mem->tag)) | 479 | if (unlikely(!mem->tag)) |
diff --git a/drivers/gpu/drm/nouveau/core/subdev/gpio/base.c b/drivers/gpu/drm/nouveau/core/subdev/gpio/base.c index 45e0202f3151..b1e3ed7c8beb 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/gpio/base.c +++ b/drivers/gpu/drm/nouveau/core/subdev/gpio/base.c | |||
@@ -106,39 +106,59 @@ nouveau_gpio_get(struct nouveau_gpio *gpio, int idx, u8 tag, u8 line) | |||
106 | } | 106 | } |
107 | 107 | ||
108 | static void | 108 | static void |
109 | nouveau_gpio_intr_disable(struct nouveau_event *event, int type, int index) | 109 | nouveau_gpio_intr_fini(struct nvkm_event *event, int type, int index) |
110 | { | 110 | { |
111 | struct nouveau_gpio *gpio = nouveau_gpio(event->priv); | 111 | struct nouveau_gpio *gpio = container_of(event, typeof(*gpio), event); |
112 | const struct nouveau_gpio_impl *impl = (void *)nv_object(gpio)->oclass; | 112 | const struct nouveau_gpio_impl *impl = (void *)nv_object(gpio)->oclass; |
113 | impl->intr_mask(gpio, type, 1 << index, 0); | 113 | impl->intr_mask(gpio, type, 1 << index, 0); |
114 | } | 114 | } |
115 | 115 | ||
116 | static void | 116 | static void |
117 | nouveau_gpio_intr_enable(struct nouveau_event *event, int type, int index) | 117 | nouveau_gpio_intr_init(struct nvkm_event *event, int type, int index) |
118 | { | 118 | { |
119 | struct nouveau_gpio *gpio = nouveau_gpio(event->priv); | 119 | struct nouveau_gpio *gpio = container_of(event, typeof(*gpio), event); |
120 | const struct nouveau_gpio_impl *impl = (void *)nv_object(gpio)->oclass; | 120 | const struct nouveau_gpio_impl *impl = (void *)nv_object(gpio)->oclass; |
121 | impl->intr_mask(gpio, type, 1 << index, 1 << index); | 121 | impl->intr_mask(gpio, type, 1 << index, 1 << index); |
122 | } | 122 | } |
123 | 123 | ||
124 | static int | ||
125 | nouveau_gpio_intr_ctor(void *data, u32 size, struct nvkm_notify *notify) | ||
126 | { | ||
127 | struct nvkm_gpio_ntfy_req *req = data; | ||
128 | if (!WARN_ON(size != sizeof(*req))) { | ||
129 | notify->size = sizeof(struct nvkm_gpio_ntfy_rep); | ||
130 | notify->types = req->mask; | ||
131 | notify->index = req->line; | ||
132 | return 0; | ||
133 | } | ||
134 | return -EINVAL; | ||
135 | } | ||
136 | |||
124 | static void | 137 | static void |
125 | nouveau_gpio_intr(struct nouveau_subdev *subdev) | 138 | nouveau_gpio_intr(struct nouveau_subdev *subdev) |
126 | { | 139 | { |
127 | struct nouveau_gpio *gpio = nouveau_gpio(subdev); | 140 | struct nouveau_gpio *gpio = nouveau_gpio(subdev); |
128 | const struct nouveau_gpio_impl *impl = (void *)nv_object(gpio)->oclass; | 141 | const struct nouveau_gpio_impl *impl = (void *)nv_object(gpio)->oclass; |
129 | u32 hi, lo, e, i; | 142 | u32 hi, lo, i; |
130 | 143 | ||
131 | impl->intr_stat(gpio, &hi, &lo); | 144 | impl->intr_stat(gpio, &hi, &lo); |
132 | 145 | ||
133 | for (i = 0; e = 0, (hi | lo) && i < impl->lines; i++) { | 146 | for (i = 0; (hi | lo) && i < impl->lines; i++) { |
134 | if (hi & (1 << i)) | 147 | struct nvkm_gpio_ntfy_rep rep = { |
135 | e |= NVKM_GPIO_HI; | 148 | .mask = (NVKM_GPIO_HI * !!(hi & (1 << i))) | |
136 | if (lo & (1 << i)) | 149 | (NVKM_GPIO_LO * !!(lo & (1 << i))), |
137 | e |= NVKM_GPIO_LO; | 150 | }; |
138 | nouveau_event_trigger(gpio->events, e, i); | 151 | nvkm_event_send(&gpio->event, rep.mask, i, &rep, sizeof(rep)); |
139 | } | 152 | } |
140 | } | 153 | } |
141 | 154 | ||
155 | static const struct nvkm_event_func | ||
156 | nouveau_gpio_intr_func = { | ||
157 | .ctor = nouveau_gpio_intr_ctor, | ||
158 | .init = nouveau_gpio_intr_init, | ||
159 | .fini = nouveau_gpio_intr_fini, | ||
160 | }; | ||
161 | |||
142 | int | 162 | int |
143 | _nouveau_gpio_fini(struct nouveau_object *object, bool suspend) | 163 | _nouveau_gpio_fini(struct nouveau_object *object, bool suspend) |
144 | { | 164 | { |
@@ -183,7 +203,7 @@ void | |||
183 | _nouveau_gpio_dtor(struct nouveau_object *object) | 203 | _nouveau_gpio_dtor(struct nouveau_object *object) |
184 | { | 204 | { |
185 | struct nouveau_gpio *gpio = (void *)object; | 205 | struct nouveau_gpio *gpio = (void *)object; |
186 | nouveau_event_destroy(&gpio->events); | 206 | nvkm_event_fini(&gpio->event); |
187 | nouveau_subdev_destroy(&gpio->base); | 207 | nouveau_subdev_destroy(&gpio->base); |
188 | } | 208 | } |
189 | 209 | ||
@@ -208,13 +228,11 @@ nouveau_gpio_create_(struct nouveau_object *parent, | |||
208 | gpio->get = nouveau_gpio_get; | 228 | gpio->get = nouveau_gpio_get; |
209 | gpio->reset = impl->reset; | 229 | gpio->reset = impl->reset; |
210 | 230 | ||
211 | ret = nouveau_event_create(2, impl->lines, &gpio->events); | 231 | ret = nvkm_event_init(&nouveau_gpio_intr_func, 2, impl->lines, |
232 | &gpio->event); | ||
212 | if (ret) | 233 | if (ret) |
213 | return ret; | 234 | return ret; |
214 | 235 | ||
215 | gpio->events->priv = gpio; | ||
216 | gpio->events->enable = nouveau_gpio_intr_enable; | ||
217 | gpio->events->disable = nouveau_gpio_intr_disable; | ||
218 | nv_subdev(gpio)->intr = nouveau_gpio_intr; | 236 | nv_subdev(gpio)->intr = nouveau_gpio_intr; |
219 | return 0; | 237 | return 0; |
220 | } | 238 | } |
diff --git a/drivers/gpu/drm/nouveau/core/subdev/i2c/base.c b/drivers/gpu/drm/nouveau/core/subdev/i2c/base.c index 09ba2cc851cf..a652cafde3d6 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/i2c/base.c +++ b/drivers/gpu/drm/nouveau/core/subdev/i2c/base.c | |||
@@ -326,9 +326,9 @@ nouveau_i2c_identify(struct nouveau_i2c *i2c, int index, const char *what, | |||
326 | } | 326 | } |
327 | 327 | ||
328 | static void | 328 | static void |
329 | nouveau_i2c_intr_disable(struct nouveau_event *event, int type, int index) | 329 | nouveau_i2c_intr_fini(struct nvkm_event *event, int type, int index) |
330 | { | 330 | { |
331 | struct nouveau_i2c *i2c = nouveau_i2c(event->priv); | 331 | struct nouveau_i2c *i2c = container_of(event, typeof(*i2c), event); |
332 | struct nouveau_i2c_port *port = i2c->find(i2c, index); | 332 | struct nouveau_i2c_port *port = i2c->find(i2c, index); |
333 | const struct nouveau_i2c_impl *impl = (void *)nv_object(i2c)->oclass; | 333 | const struct nouveau_i2c_impl *impl = (void *)nv_object(i2c)->oclass; |
334 | if (port && port->aux >= 0) | 334 | if (port && port->aux >= 0) |
@@ -336,15 +336,28 @@ nouveau_i2c_intr_disable(struct nouveau_event *event, int type, int index) | |||
336 | } | 336 | } |
337 | 337 | ||
338 | static void | 338 | static void |
339 | nouveau_i2c_intr_enable(struct nouveau_event *event, int type, int index) | 339 | nouveau_i2c_intr_init(struct nvkm_event *event, int type, int index) |
340 | { | 340 | { |
341 | struct nouveau_i2c *i2c = nouveau_i2c(event->priv); | 341 | struct nouveau_i2c *i2c = container_of(event, typeof(*i2c), event); |
342 | struct nouveau_i2c_port *port = i2c->find(i2c, index); | 342 | struct nouveau_i2c_port *port = i2c->find(i2c, index); |
343 | const struct nouveau_i2c_impl *impl = (void *)nv_object(i2c)->oclass; | 343 | const struct nouveau_i2c_impl *impl = (void *)nv_object(i2c)->oclass; |
344 | if (port && port->aux >= 0) | 344 | if (port && port->aux >= 0) |
345 | impl->aux_mask(i2c, type, 1 << port->aux, 1 << port->aux); | 345 | impl->aux_mask(i2c, type, 1 << port->aux, 1 << port->aux); |
346 | } | 346 | } |
347 | 347 | ||
348 | static int | ||
349 | nouveau_i2c_intr_ctor(void *data, u32 size, struct nvkm_notify *notify) | ||
350 | { | ||
351 | struct nvkm_i2c_ntfy_req *req = data; | ||
352 | if (!WARN_ON(size != sizeof(*req))) { | ||
353 | notify->size = sizeof(struct nvkm_i2c_ntfy_rep); | ||
354 | notify->types = req->mask; | ||
355 | notify->index = req->port; | ||
356 | return 0; | ||
357 | } | ||
358 | return -EINVAL; | ||
359 | } | ||
360 | |||
348 | static void | 361 | static void |
349 | nouveau_i2c_intr(struct nouveau_subdev *subdev) | 362 | nouveau_i2c_intr(struct nouveau_subdev *subdev) |
350 | { | 363 | { |
@@ -364,13 +377,26 @@ nouveau_i2c_intr(struct nouveau_subdev *subdev) | |||
364 | if (lo & (1 << port->aux)) e |= NVKM_I2C_UNPLUG; | 377 | if (lo & (1 << port->aux)) e |= NVKM_I2C_UNPLUG; |
365 | if (rq & (1 << port->aux)) e |= NVKM_I2C_IRQ; | 378 | if (rq & (1 << port->aux)) e |= NVKM_I2C_IRQ; |
366 | if (tx & (1 << port->aux)) e |= NVKM_I2C_DONE; | 379 | if (tx & (1 << port->aux)) e |= NVKM_I2C_DONE; |
367 | 380 | if (e) { | |
368 | nouveau_event_trigger(i2c->ntfy, e, port->index); | 381 | struct nvkm_i2c_ntfy_rep rep = { |
382 | .mask = e, | ||
383 | }; | ||
384 | nvkm_event_send(&i2c->event, rep.mask, | ||
385 | port->index, &rep, | ||
386 | sizeof(rep)); | ||
387 | } | ||
369 | } | 388 | } |
370 | } | 389 | } |
371 | } | 390 | } |
372 | } | 391 | } |
373 | 392 | ||
393 | static const struct nvkm_event_func | ||
394 | nouveau_i2c_intr_func = { | ||
395 | .ctor = nouveau_i2c_intr_ctor, | ||
396 | .init = nouveau_i2c_intr_init, | ||
397 | .fini = nouveau_i2c_intr_fini, | ||
398 | }; | ||
399 | |||
374 | int | 400 | int |
375 | _nouveau_i2c_fini(struct nouveau_object *object, bool suspend) | 401 | _nouveau_i2c_fini(struct nouveau_object *object, bool suspend) |
376 | { | 402 | { |
@@ -431,7 +457,7 @@ _nouveau_i2c_dtor(struct nouveau_object *object) | |||
431 | struct nouveau_i2c *i2c = (void *)object; | 457 | struct nouveau_i2c *i2c = (void *)object; |
432 | struct nouveau_i2c_port *port, *temp; | 458 | struct nouveau_i2c_port *port, *temp; |
433 | 459 | ||
434 | nouveau_event_destroy(&i2c->ntfy); | 460 | nvkm_event_fini(&i2c->event); |
435 | 461 | ||
436 | list_for_each_entry_safe(port, temp, &i2c->ports, head) { | 462 | list_for_each_entry_safe(port, temp, &i2c->ports, head) { |
437 | nouveau_object_ref(NULL, (struct nouveau_object **)&port); | 463 | nouveau_object_ref(NULL, (struct nouveau_object **)&port); |
@@ -547,13 +573,10 @@ nouveau_i2c_create_(struct nouveau_object *parent, | |||
547 | } | 573 | } |
548 | } | 574 | } |
549 | 575 | ||
550 | ret = nouveau_event_create(4, index, &i2c->ntfy); | 576 | ret = nvkm_event_init(&nouveau_i2c_intr_func, 4, index, &i2c->event); |
551 | if (ret) | 577 | if (ret) |
552 | return ret; | 578 | return ret; |
553 | 579 | ||
554 | i2c->ntfy->priv = i2c; | ||
555 | i2c->ntfy->enable = nouveau_i2c_intr_enable; | ||
556 | i2c->ntfy->disable = nouveau_i2c_intr_disable; | ||
557 | return 0; | 580 | return 0; |
558 | } | 581 | } |
559 | 582 | ||
diff --git a/drivers/gpu/drm/nouveau/core/subdev/ltc/base.c b/drivers/gpu/drm/nouveau/core/subdev/ltc/base.c new file mode 100644 index 000000000000..32ed442c5913 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/subdev/ltc/base.c | |||
@@ -0,0 +1,126 @@ | |||
1 | /* | ||
2 | * Copyright 2014 Red Hat Inc. | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice shall be included in | ||
12 | * all copies or substantial portions of the Software. | ||
13 | * | ||
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
21 | * | ||
22 | * Authors: Ben Skeggs <bskeggs@redhat.com> | ||
23 | */ | ||
24 | |||
25 | #include "priv.h" | ||
26 | |||
27 | static int | ||
28 | nvkm_ltc_tags_alloc(struct nouveau_ltc *ltc, u32 n, | ||
29 | struct nouveau_mm_node **pnode) | ||
30 | { | ||
31 | struct nvkm_ltc_priv *priv = (void *)ltc; | ||
32 | int ret; | ||
33 | |||
34 | ret = nouveau_mm_head(&priv->tags, 1, n, n, 1, pnode); | ||
35 | if (ret) | ||
36 | *pnode = NULL; | ||
37 | |||
38 | return ret; | ||
39 | } | ||
40 | |||
41 | static void | ||
42 | nvkm_ltc_tags_free(struct nouveau_ltc *ltc, struct nouveau_mm_node **pnode) | ||
43 | { | ||
44 | struct nvkm_ltc_priv *priv = (void *)ltc; | ||
45 | nouveau_mm_free(&priv->tags, pnode); | ||
46 | } | ||
47 | |||
48 | static void | ||
49 | nvkm_ltc_tags_clear(struct nouveau_ltc *ltc, u32 first, u32 count) | ||
50 | { | ||
51 | const struct nvkm_ltc_impl *impl = (void *)nv_oclass(ltc); | ||
52 | struct nvkm_ltc_priv *priv = (void *)ltc; | ||
53 | const u32 limit = first + count - 1; | ||
54 | |||
55 | BUG_ON((first > limit) || (limit >= priv->num_tags)); | ||
56 | |||
57 | impl->cbc_clear(priv, first, limit); | ||
58 | impl->cbc_wait(priv); | ||
59 | } | ||
60 | |||
61 | static int | ||
62 | nvkm_ltc_zbc_color_get(struct nouveau_ltc *ltc, int index, const u32 color[4]) | ||
63 | { | ||
64 | const struct nvkm_ltc_impl *impl = (void *)nv_oclass(ltc); | ||
65 | struct nvkm_ltc_priv *priv = (void *)ltc; | ||
66 | memcpy(priv->zbc_color[index], color, sizeof(priv->zbc_color[index])); | ||
67 | impl->zbc_clear_color(priv, index, color); | ||
68 | return index; | ||
69 | } | ||
70 | |||
71 | static int | ||
72 | nvkm_ltc_zbc_depth_get(struct nouveau_ltc *ltc, int index, const u32 depth) | ||
73 | { | ||
74 | const struct nvkm_ltc_impl *impl = (void *)nv_oclass(ltc); | ||
75 | struct nvkm_ltc_priv *priv = (void *)ltc; | ||
76 | priv->zbc_depth[index] = depth; | ||
77 | impl->zbc_clear_depth(priv, index, depth); | ||
78 | return index; | ||
79 | } | ||
80 | |||
81 | int | ||
82 | _nvkm_ltc_init(struct nouveau_object *object) | ||
83 | { | ||
84 | const struct nvkm_ltc_impl *impl = (void *)nv_oclass(object); | ||
85 | struct nvkm_ltc_priv *priv = (void *)object; | ||
86 | int ret, i; | ||
87 | |||
88 | ret = nouveau_subdev_init(&priv->base.base); | ||
89 | if (ret) | ||
90 | return ret; | ||
91 | |||
92 | for (i = priv->base.zbc_min; i <= priv->base.zbc_max; i++) { | ||
93 | impl->zbc_clear_color(priv, i, priv->zbc_color[i]); | ||
94 | impl->zbc_clear_depth(priv, i, priv->zbc_depth[i]); | ||
95 | } | ||
96 | |||
97 | return 0; | ||
98 | } | ||
99 | |||
100 | int | ||
101 | nvkm_ltc_create_(struct nouveau_object *parent, struct nouveau_object *engine, | ||
102 | struct nouveau_oclass *oclass, int length, void **pobject) | ||
103 | { | ||
104 | const struct nvkm_ltc_impl *impl = (void *)oclass; | ||
105 | struct nvkm_ltc_priv *priv; | ||
106 | int ret; | ||
107 | |||
108 | ret = nouveau_subdev_create_(parent, engine, oclass, 0, "PLTCG", | ||
109 | "l2c", length, pobject); | ||
110 | priv = *pobject; | ||
111 | if (ret) | ||
112 | return ret; | ||
113 | |||
114 | memset(priv->zbc_color, 0x00, sizeof(priv->zbc_color)); | ||
115 | memset(priv->zbc_depth, 0x00, sizeof(priv->zbc_depth)); | ||
116 | |||
117 | priv->base.base.intr = impl->intr; | ||
118 | priv->base.tags_alloc = nvkm_ltc_tags_alloc; | ||
119 | priv->base.tags_free = nvkm_ltc_tags_free; | ||
120 | priv->base.tags_clear = nvkm_ltc_tags_clear; | ||
121 | priv->base.zbc_min = 1; /* reserve 0 for disabled */ | ||
122 | priv->base.zbc_max = min(impl->zbc, NOUVEAU_LTC_MAX_ZBC_CNT) - 1; | ||
123 | priv->base.zbc_color_get = nvkm_ltc_zbc_color_get; | ||
124 | priv->base.zbc_depth_get = nvkm_ltc_zbc_depth_get; | ||
125 | return 0; | ||
126 | } | ||
diff --git a/drivers/gpu/drm/nouveau/core/subdev/ltcg/gf100.c b/drivers/gpu/drm/nouveau/core/subdev/ltc/gf100.c index f2f3338a967a..9e00a1ede120 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/ltcg/gf100.c +++ b/drivers/gpu/drm/nouveau/core/subdev/ltc/gf100.c | |||
@@ -25,10 +25,45 @@ | |||
25 | #include <subdev/fb.h> | 25 | #include <subdev/fb.h> |
26 | #include <subdev/timer.h> | 26 | #include <subdev/timer.h> |
27 | 27 | ||
28 | #include "gf100.h" | 28 | #include "priv.h" |
29 | |||
30 | void | ||
31 | gf100_ltc_cbc_clear(struct nvkm_ltc_priv *priv, u32 start, u32 limit) | ||
32 | { | ||
33 | nv_wr32(priv, 0x17e8cc, start); | ||
34 | nv_wr32(priv, 0x17e8d0, limit); | ||
35 | nv_wr32(priv, 0x17e8c8, 0x00000004); | ||
36 | } | ||
37 | |||
38 | void | ||
39 | gf100_ltc_cbc_wait(struct nvkm_ltc_priv *priv) | ||
40 | { | ||
41 | int c, s; | ||
42 | for (c = 0; c < priv->ltc_nr; c++) { | ||
43 | for (s = 0; s < priv->lts_nr; s++) | ||
44 | nv_wait(priv, 0x1410c8 + c * 0x2000 + s * 0x400, ~0, 0); | ||
45 | } | ||
46 | } | ||
47 | |||
48 | void | ||
49 | gf100_ltc_zbc_clear_color(struct nvkm_ltc_priv *priv, int i, const u32 color[4]) | ||
50 | { | ||
51 | nv_mask(priv, 0x17ea44, 0x0000000f, i); | ||
52 | nv_wr32(priv, 0x17ea48, color[0]); | ||
53 | nv_wr32(priv, 0x17ea4c, color[1]); | ||
54 | nv_wr32(priv, 0x17ea50, color[2]); | ||
55 | nv_wr32(priv, 0x17ea54, color[3]); | ||
56 | } | ||
57 | |||
58 | void | ||
59 | gf100_ltc_zbc_clear_depth(struct nvkm_ltc_priv *priv, int i, const u32 depth) | ||
60 | { | ||
61 | nv_mask(priv, 0x17ea44, 0x0000000f, i); | ||
62 | nv_wr32(priv, 0x17ea58, depth); | ||
63 | } | ||
29 | 64 | ||
30 | static void | 65 | static void |
31 | gf100_ltcg_lts_isr(struct gf100_ltcg_priv *priv, int ltc, int lts) | 66 | gf100_ltc_lts_isr(struct nvkm_ltc_priv *priv, int ltc, int lts) |
32 | { | 67 | { |
33 | u32 base = 0x141000 + (ltc * 0x2000) + (lts * 0x400); | 68 | u32 base = 0x141000 + (ltc * 0x2000) + (lts * 0x400); |
34 | u32 stat = nv_rd32(priv, base + 0x020); | 69 | u32 stat = nv_rd32(priv, base + 0x020); |
@@ -39,17 +74,17 @@ gf100_ltcg_lts_isr(struct gf100_ltcg_priv *priv, int ltc, int lts) | |||
39 | } | 74 | } |
40 | } | 75 | } |
41 | 76 | ||
42 | static void | 77 | void |
43 | gf100_ltcg_intr(struct nouveau_subdev *subdev) | 78 | gf100_ltc_intr(struct nouveau_subdev *subdev) |
44 | { | 79 | { |
45 | struct gf100_ltcg_priv *priv = (void *)subdev; | 80 | struct nvkm_ltc_priv *priv = (void *)subdev; |
46 | u32 mask; | 81 | u32 mask; |
47 | 82 | ||
48 | mask = nv_rd32(priv, 0x00017c); | 83 | mask = nv_rd32(priv, 0x00017c); |
49 | while (mask) { | 84 | while (mask) { |
50 | u32 lts, ltc = __ffs(mask); | 85 | u32 lts, ltc = __ffs(mask); |
51 | for (lts = 0; lts < priv->lts_nr; lts++) | 86 | for (lts = 0; lts < priv->lts_nr; lts++) |
52 | gf100_ltcg_lts_isr(priv, ltc, lts); | 87 | gf100_ltc_lts_isr(priv, ltc, lts); |
53 | mask &= ~(1 << ltc); | 88 | mask &= ~(1 << ltc); |
54 | } | 89 | } |
55 | 90 | ||
@@ -59,52 +94,38 @@ gf100_ltcg_intr(struct nouveau_subdev *subdev) | |||
59 | nv_mask(priv, 0x000640, 0x02000000, 0x00000000); | 94 | nv_mask(priv, 0x000640, 0x02000000, 0x00000000); |
60 | } | 95 | } |
61 | 96 | ||
62 | int | 97 | static int |
63 | gf100_ltcg_tags_alloc(struct nouveau_ltcg *ltcg, u32 n, | 98 | gf100_ltc_init(struct nouveau_object *object) |
64 | struct nouveau_mm_node **pnode) | ||
65 | { | 99 | { |
66 | struct gf100_ltcg_priv *priv = (struct gf100_ltcg_priv *)ltcg; | 100 | struct nvkm_ltc_priv *priv = (void *)object; |
67 | int ret; | 101 | int ret; |
68 | 102 | ||
69 | ret = nouveau_mm_head(&priv->tags, 1, n, n, 1, pnode); | 103 | ret = nvkm_ltc_init(priv); |
70 | if (ret) | 104 | if (ret) |
71 | *pnode = NULL; | 105 | return ret; |
72 | 106 | ||
73 | return ret; | 107 | nv_mask(priv, 0x17e820, 0x00100000, 0x00000000); /* INTR_EN &= ~0x10 */ |
108 | nv_wr32(priv, 0x17e8d8, priv->ltc_nr); | ||
109 | nv_wr32(priv, 0x17e8d4, priv->tag_base); | ||
110 | return 0; | ||
74 | } | 111 | } |
75 | 112 | ||
76 | void | 113 | void |
77 | gf100_ltcg_tags_free(struct nouveau_ltcg *ltcg, struct nouveau_mm_node **pnode) | 114 | gf100_ltc_dtor(struct nouveau_object *object) |
78 | { | ||
79 | struct gf100_ltcg_priv *priv = (struct gf100_ltcg_priv *)ltcg; | ||
80 | |||
81 | nouveau_mm_free(&priv->tags, pnode); | ||
82 | } | ||
83 | |||
84 | static void | ||
85 | gf100_ltcg_tags_clear(struct nouveau_ltcg *ltcg, u32 first, u32 count) | ||
86 | { | 115 | { |
87 | struct gf100_ltcg_priv *priv = (struct gf100_ltcg_priv *)ltcg; | 116 | struct nouveau_fb *pfb = nouveau_fb(object); |
88 | u32 last = first + count - 1; | 117 | struct nvkm_ltc_priv *priv = (void *)object; |
89 | int p, i; | ||
90 | |||
91 | BUG_ON((first > last) || (last >= priv->num_tags)); | ||
92 | 118 | ||
93 | nv_wr32(priv, 0x17e8cc, first); | 119 | nouveau_mm_fini(&priv->tags); |
94 | nv_wr32(priv, 0x17e8d0, last); | 120 | nouveau_mm_free(&pfb->vram, &priv->tag_ram); |
95 | nv_wr32(priv, 0x17e8c8, 0x4); /* trigger clear */ | ||
96 | 121 | ||
97 | /* wait until it's finished with clearing */ | 122 | nvkm_ltc_destroy(priv); |
98 | for (p = 0; p < priv->ltc_nr; ++p) { | ||
99 | for (i = 0; i < priv->lts_nr; ++i) | ||
100 | nv_wait(priv, 0x1410c8 + p * 0x2000 + i * 0x400, ~0, 0); | ||
101 | } | ||
102 | } | 123 | } |
103 | 124 | ||
104 | /* TODO: Figure out tag memory details and drop the over-cautious allocation. | 125 | /* TODO: Figure out tag memory details and drop the over-cautious allocation. |
105 | */ | 126 | */ |
106 | int | 127 | int |
107 | gf100_ltcg_init_tag_ram(struct nouveau_fb *pfb, struct gf100_ltcg_priv *priv) | 128 | gf100_ltc_init_tag_ram(struct nouveau_fb *pfb, struct nvkm_ltc_priv *priv) |
108 | { | 129 | { |
109 | u32 tag_size, tag_margin, tag_align; | 130 | u32 tag_size, tag_margin, tag_align; |
110 | int ret; | 131 | int ret; |
@@ -142,22 +163,22 @@ gf100_ltcg_init_tag_ram(struct nouveau_fb *pfb, struct gf100_ltcg_priv *priv) | |||
142 | 163 | ||
143 | priv->tag_base = tag_base; | 164 | priv->tag_base = tag_base; |
144 | } | 165 | } |
145 | ret = nouveau_mm_init(&priv->tags, 0, priv->num_tags, 1); | ||
146 | 166 | ||
167 | ret = nouveau_mm_init(&priv->tags, 0, priv->num_tags, 1); | ||
147 | return ret; | 168 | return ret; |
148 | } | 169 | } |
149 | 170 | ||
150 | static int | 171 | int |
151 | gf100_ltcg_ctor(struct nouveau_object *parent, struct nouveau_object *engine, | 172 | gf100_ltc_ctor(struct nouveau_object *parent, struct nouveau_object *engine, |
152 | struct nouveau_oclass *oclass, void *data, u32 size, | 173 | struct nouveau_oclass *oclass, void *data, u32 size, |
153 | struct nouveau_object **pobject) | 174 | struct nouveau_object **pobject) |
154 | { | 175 | { |
155 | struct gf100_ltcg_priv *priv; | ||
156 | struct nouveau_fb *pfb = nouveau_fb(parent); | 176 | struct nouveau_fb *pfb = nouveau_fb(parent); |
177 | struct nvkm_ltc_priv *priv; | ||
157 | u32 parts, mask; | 178 | u32 parts, mask; |
158 | int ret, i; | 179 | int ret, i; |
159 | 180 | ||
160 | ret = nouveau_ltcg_create(parent, engine, oclass, &priv); | 181 | ret = nvkm_ltc_create(parent, engine, oclass, &priv); |
161 | *pobject = nv_object(priv); | 182 | *pobject = nv_object(priv); |
162 | if (ret) | 183 | if (ret) |
163 | return ret; | 184 | return ret; |
@@ -170,57 +191,27 @@ gf100_ltcg_ctor(struct nouveau_object *parent, struct nouveau_object *engine, | |||
170 | } | 191 | } |
171 | priv->lts_nr = nv_rd32(priv, 0x17e8dc) >> 28; | 192 | priv->lts_nr = nv_rd32(priv, 0x17e8dc) >> 28; |
172 | 193 | ||
173 | ret = gf100_ltcg_init_tag_ram(pfb, priv); | 194 | ret = gf100_ltc_init_tag_ram(pfb, priv); |
174 | if (ret) | 195 | if (ret) |
175 | return ret; | 196 | return ret; |
176 | 197 | ||
177 | priv->base.tags_alloc = gf100_ltcg_tags_alloc; | 198 | nv_subdev(priv)->intr = gf100_ltc_intr; |
178 | priv->base.tags_free = gf100_ltcg_tags_free; | ||
179 | priv->base.tags_clear = gf100_ltcg_tags_clear; | ||
180 | |||
181 | nv_subdev(priv)->intr = gf100_ltcg_intr; | ||
182 | return 0; | ||
183 | } | ||
184 | |||
185 | void | ||
186 | gf100_ltcg_dtor(struct nouveau_object *object) | ||
187 | { | ||
188 | struct nouveau_ltcg *ltcg = (struct nouveau_ltcg *)object; | ||
189 | struct gf100_ltcg_priv *priv = (struct gf100_ltcg_priv *)ltcg; | ||
190 | struct nouveau_fb *pfb = nouveau_fb(ltcg->base.base.parent); | ||
191 | |||
192 | nouveau_mm_fini(&priv->tags); | ||
193 | nouveau_mm_free(&pfb->vram, &priv->tag_ram); | ||
194 | |||
195 | nouveau_ltcg_destroy(ltcg); | ||
196 | } | ||
197 | |||
198 | static int | ||
199 | gf100_ltcg_init(struct nouveau_object *object) | ||
200 | { | ||
201 | struct nouveau_ltcg *ltcg = (struct nouveau_ltcg *)object; | ||
202 | struct gf100_ltcg_priv *priv = (struct gf100_ltcg_priv *)ltcg; | ||
203 | int ret; | ||
204 | |||
205 | ret = nouveau_ltcg_init(ltcg); | ||
206 | if (ret) | ||
207 | return ret; | ||
208 | |||
209 | nv_mask(priv, 0x17e820, 0x00100000, 0x00000000); /* INTR_EN &= ~0x10 */ | ||
210 | nv_wr32(priv, 0x17e8d8, priv->ltc_nr); | ||
211 | if (nv_device(ltcg)->card_type >= NV_E0) | ||
212 | nv_wr32(priv, 0x17e000, priv->ltc_nr); | ||
213 | nv_wr32(priv, 0x17e8d4, priv->tag_base); | ||
214 | return 0; | 199 | return 0; |
215 | } | 200 | } |
216 | 201 | ||
217 | struct nouveau_oclass * | 202 | struct nouveau_oclass * |
218 | gf100_ltcg_oclass = &(struct nouveau_oclass) { | 203 | gf100_ltc_oclass = &(struct nvkm_ltc_impl) { |
219 | .handle = NV_SUBDEV(LTCG, 0xc0), | 204 | .base.handle = NV_SUBDEV(LTC, 0xc0), |
220 | .ofuncs = &(struct nouveau_ofuncs) { | 205 | .base.ofuncs = &(struct nouveau_ofuncs) { |
221 | .ctor = gf100_ltcg_ctor, | 206 | .ctor = gf100_ltc_ctor, |
222 | .dtor = gf100_ltcg_dtor, | 207 | .dtor = gf100_ltc_dtor, |
223 | .init = gf100_ltcg_init, | 208 | .init = gf100_ltc_init, |
224 | .fini = _nouveau_ltcg_fini, | 209 | .fini = _nvkm_ltc_fini, |
225 | }, | 210 | }, |
226 | }; | 211 | .intr = gf100_ltc_intr, |
212 | .cbc_clear = gf100_ltc_cbc_clear, | ||
213 | .cbc_wait = gf100_ltc_cbc_wait, | ||
214 | .zbc = 16, | ||
215 | .zbc_clear_color = gf100_ltc_zbc_clear_color, | ||
216 | .zbc_clear_depth = gf100_ltc_zbc_clear_depth, | ||
217 | }.base; | ||
diff --git a/drivers/gpu/drm/nouveau/core/subdev/ltc/gk104.c b/drivers/gpu/drm/nouveau/core/subdev/ltc/gk104.c new file mode 100644 index 000000000000..ea716569745d --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/subdev/ltc/gk104.c | |||
@@ -0,0 +1,58 @@ | |||
1 | /* | ||
2 | * Copyright 2012 Red Hat Inc. | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice shall be included in | ||
12 | * all copies or substantial portions of the Software. | ||
13 | * | ||
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
21 | * | ||
22 | * Authors: Ben Skeggs | ||
23 | */ | ||
24 | |||
25 | #include "priv.h" | ||
26 | |||
27 | static int | ||
28 | gk104_ltc_init(struct nouveau_object *object) | ||
29 | { | ||
30 | struct nvkm_ltc_priv *priv = (void *)object; | ||
31 | int ret; | ||
32 | |||
33 | ret = nvkm_ltc_init(priv); | ||
34 | if (ret) | ||
35 | return ret; | ||
36 | |||
37 | nv_wr32(priv, 0x17e8d8, priv->ltc_nr); | ||
38 | nv_wr32(priv, 0x17e000, priv->ltc_nr); | ||
39 | nv_wr32(priv, 0x17e8d4, priv->tag_base); | ||
40 | return 0; | ||
41 | } | ||
42 | |||
43 | struct nouveau_oclass * | ||
44 | gk104_ltc_oclass = &(struct nvkm_ltc_impl) { | ||
45 | .base.handle = NV_SUBDEV(LTC, 0xe4), | ||
46 | .base.ofuncs = &(struct nouveau_ofuncs) { | ||
47 | .ctor = gf100_ltc_ctor, | ||
48 | .dtor = gf100_ltc_dtor, | ||
49 | .init = gk104_ltc_init, | ||
50 | .fini = _nvkm_ltc_fini, | ||
51 | }, | ||
52 | .intr = gf100_ltc_intr, | ||
53 | .cbc_clear = gf100_ltc_cbc_clear, | ||
54 | .cbc_wait = gf100_ltc_cbc_wait, | ||
55 | .zbc = 16, | ||
56 | .zbc_clear_color = gf100_ltc_zbc_clear_color, | ||
57 | .zbc_clear_depth = gf100_ltc_zbc_clear_depth, | ||
58 | }.base; | ||
diff --git a/drivers/gpu/drm/nouveau/core/subdev/ltcg/gm107.c b/drivers/gpu/drm/nouveau/core/subdev/ltc/gm107.c index e79d0e81de40..4761b2e9af00 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/ltcg/gm107.c +++ b/drivers/gpu/drm/nouveau/core/subdev/ltc/gm107.c | |||
@@ -25,10 +25,45 @@ | |||
25 | #include <subdev/fb.h> | 25 | #include <subdev/fb.h> |
26 | #include <subdev/timer.h> | 26 | #include <subdev/timer.h> |
27 | 27 | ||
28 | #include "gf100.h" | 28 | #include "priv.h" |
29 | 29 | ||
30 | static void | 30 | static void |
31 | gm107_ltcg_lts_isr(struct gf100_ltcg_priv *priv, int ltc, int lts) | 31 | gm107_ltc_cbc_clear(struct nvkm_ltc_priv *priv, u32 start, u32 limit) |
32 | { | ||
33 | nv_wr32(priv, 0x17e270, start); | ||
34 | nv_wr32(priv, 0x17e274, limit); | ||
35 | nv_wr32(priv, 0x17e26c, 0x00000004); | ||
36 | } | ||
37 | |||
38 | static void | ||
39 | gm107_ltc_cbc_wait(struct nvkm_ltc_priv *priv) | ||
40 | { | ||
41 | int c, s; | ||
42 | for (c = 0; c < priv->ltc_nr; c++) { | ||
43 | for (s = 0; s < priv->lts_nr; s++) | ||
44 | nv_wait(priv, 0x14046c + c * 0x2000 + s * 0x200, ~0, 0); | ||
45 | } | ||
46 | } | ||
47 | |||
48 | static void | ||
49 | gm107_ltc_zbc_clear_color(struct nvkm_ltc_priv *priv, int i, const u32 color[4]) | ||
50 | { | ||
51 | nv_mask(priv, 0x17e338, 0x0000000f, i); | ||
52 | nv_wr32(priv, 0x17e33c, color[0]); | ||
53 | nv_wr32(priv, 0x17e340, color[1]); | ||
54 | nv_wr32(priv, 0x17e344, color[2]); | ||
55 | nv_wr32(priv, 0x17e348, color[3]); | ||
56 | } | ||
57 | |||
58 | static void | ||
59 | gm107_ltc_zbc_clear_depth(struct nvkm_ltc_priv *priv, int i, const u32 depth) | ||
60 | { | ||
61 | nv_mask(priv, 0x17e338, 0x0000000f, i); | ||
62 | nv_wr32(priv, 0x17e34c, depth); | ||
63 | } | ||
64 | |||
65 | static void | ||
66 | gm107_ltc_lts_isr(struct nvkm_ltc_priv *priv, int ltc, int lts) | ||
32 | { | 67 | { |
33 | u32 base = 0x140000 + (ltc * 0x2000) + (lts * 0x400); | 68 | u32 base = 0x140000 + (ltc * 0x2000) + (lts * 0x400); |
34 | u32 stat = nv_rd32(priv, base + 0x00c); | 69 | u32 stat = nv_rd32(priv, base + 0x00c); |
@@ -40,16 +75,16 @@ gm107_ltcg_lts_isr(struct gf100_ltcg_priv *priv, int ltc, int lts) | |||
40 | } | 75 | } |
41 | 76 | ||
42 | static void | 77 | static void |
43 | gm107_ltcg_intr(struct nouveau_subdev *subdev) | 78 | gm107_ltc_intr(struct nouveau_subdev *subdev) |
44 | { | 79 | { |
45 | struct gf100_ltcg_priv *priv = (void *)subdev; | 80 | struct nvkm_ltc_priv *priv = (void *)subdev; |
46 | u32 mask; | 81 | u32 mask; |
47 | 82 | ||
48 | mask = nv_rd32(priv, 0x00017c); | 83 | mask = nv_rd32(priv, 0x00017c); |
49 | while (mask) { | 84 | while (mask) { |
50 | u32 lts, ltc = __ffs(mask); | 85 | u32 lts, ltc = __ffs(mask); |
51 | for (lts = 0; lts < priv->lts_nr; lts++) | 86 | for (lts = 0; lts < priv->lts_nr; lts++) |
52 | gm107_ltcg_lts_isr(priv, ltc, lts); | 87 | gm107_ltc_lts_isr(priv, ltc, lts); |
53 | mask &= ~(1 << ltc); | 88 | mask &= ~(1 << ltc); |
54 | } | 89 | } |
55 | 90 | ||
@@ -59,37 +94,32 @@ gm107_ltcg_intr(struct nouveau_subdev *subdev) | |||
59 | nv_mask(priv, 0x000640, 0x02000000, 0x00000000); | 94 | nv_mask(priv, 0x000640, 0x02000000, 0x00000000); |
60 | } | 95 | } |
61 | 96 | ||
62 | static void | 97 | static int |
63 | gm107_ltcg_tags_clear(struct nouveau_ltcg *ltcg, u32 first, u32 count) | 98 | gm107_ltc_init(struct nouveau_object *object) |
64 | { | 99 | { |
65 | struct gf100_ltcg_priv *priv = (struct gf100_ltcg_priv *)ltcg; | 100 | struct nvkm_ltc_priv *priv = (void *)object; |
66 | u32 last = first + count - 1; | 101 | int ret; |
67 | int p, i; | ||
68 | |||
69 | BUG_ON((first > last) || (last >= priv->num_tags)); | ||
70 | 102 | ||
71 | nv_wr32(priv, 0x17e270, first); | 103 | ret = nvkm_ltc_init(priv); |
72 | nv_wr32(priv, 0x17e274, last); | 104 | if (ret) |
73 | nv_wr32(priv, 0x17e26c, 0x4); /* trigger clear */ | 105 | return ret; |
74 | 106 | ||
75 | /* wait until it's finished with clearing */ | 107 | nv_wr32(priv, 0x17e27c, priv->ltc_nr); |
76 | for (p = 0; p < priv->ltc_nr; ++p) { | 108 | nv_wr32(priv, 0x17e278, priv->tag_base); |
77 | for (i = 0; i < priv->lts_nr; ++i) | 109 | return 0; |
78 | nv_wait(priv, 0x14046c + p * 0x2000 + i * 0x200, ~0, 0); | ||
79 | } | ||
80 | } | 110 | } |
81 | 111 | ||
82 | static int | 112 | static int |
83 | gm107_ltcg_ctor(struct nouveau_object *parent, struct nouveau_object *engine, | 113 | gm107_ltc_ctor(struct nouveau_object *parent, struct nouveau_object *engine, |
84 | struct nouveau_oclass *oclass, void *data, u32 size, | 114 | struct nouveau_oclass *oclass, void *data, u32 size, |
85 | struct nouveau_object **pobject) | 115 | struct nouveau_object **pobject) |
86 | { | 116 | { |
87 | struct gf100_ltcg_priv *priv; | ||
88 | struct nouveau_fb *pfb = nouveau_fb(parent); | 117 | struct nouveau_fb *pfb = nouveau_fb(parent); |
118 | struct nvkm_ltc_priv *priv; | ||
89 | u32 parts, mask; | 119 | u32 parts, mask; |
90 | int ret, i; | 120 | int ret, i; |
91 | 121 | ||
92 | ret = nouveau_ltcg_create(parent, engine, oclass, &priv); | 122 | ret = nvkm_ltc_create(parent, engine, oclass, &priv); |
93 | *pobject = nv_object(priv); | 123 | *pobject = nv_object(priv); |
94 | if (ret) | 124 | if (ret) |
95 | return ret; | 125 | return ret; |
@@ -102,41 +132,26 @@ gm107_ltcg_ctor(struct nouveau_object *parent, struct nouveau_object *engine, | |||
102 | } | 132 | } |
103 | priv->lts_nr = nv_rd32(priv, 0x17e280) >> 28; | 133 | priv->lts_nr = nv_rd32(priv, 0x17e280) >> 28; |
104 | 134 | ||
105 | ret = gf100_ltcg_init_tag_ram(pfb, priv); | 135 | ret = gf100_ltc_init_tag_ram(pfb, priv); |
106 | if (ret) | ||
107 | return ret; | ||
108 | |||
109 | priv->base.tags_alloc = gf100_ltcg_tags_alloc; | ||
110 | priv->base.tags_free = gf100_ltcg_tags_free; | ||
111 | priv->base.tags_clear = gm107_ltcg_tags_clear; | ||
112 | |||
113 | nv_subdev(priv)->intr = gm107_ltcg_intr; | ||
114 | return 0; | ||
115 | } | ||
116 | |||
117 | static int | ||
118 | gm107_ltcg_init(struct nouveau_object *object) | ||
119 | { | ||
120 | struct nouveau_ltcg *ltcg = (struct nouveau_ltcg *)object; | ||
121 | struct gf100_ltcg_priv *priv = (struct gf100_ltcg_priv *)ltcg; | ||
122 | int ret; | ||
123 | |||
124 | ret = nouveau_ltcg_init(ltcg); | ||
125 | if (ret) | 136 | if (ret) |
126 | return ret; | 137 | return ret; |
127 | 138 | ||
128 | nv_wr32(priv, 0x17e27c, priv->ltc_nr); | ||
129 | nv_wr32(priv, 0x17e278, priv->tag_base); | ||
130 | return 0; | 139 | return 0; |
131 | } | 140 | } |
132 | 141 | ||
133 | struct nouveau_oclass * | 142 | struct nouveau_oclass * |
134 | gm107_ltcg_oclass = &(struct nouveau_oclass) { | 143 | gm107_ltc_oclass = &(struct nvkm_ltc_impl) { |
135 | .handle = NV_SUBDEV(LTCG, 0xff), | 144 | .base.handle = NV_SUBDEV(LTC, 0xff), |
136 | .ofuncs = &(struct nouveau_ofuncs) { | 145 | .base.ofuncs = &(struct nouveau_ofuncs) { |
137 | .ctor = gm107_ltcg_ctor, | 146 | .ctor = gm107_ltc_ctor, |
138 | .dtor = gf100_ltcg_dtor, | 147 | .dtor = gf100_ltc_dtor, |
139 | .init = gm107_ltcg_init, | 148 | .init = gm107_ltc_init, |
140 | .fini = _nouveau_ltcg_fini, | 149 | .fini = _nvkm_ltc_fini, |
141 | }, | 150 | }, |
142 | }; | 151 | .intr = gm107_ltc_intr, |
152 | .cbc_clear = gm107_ltc_cbc_clear, | ||
153 | .cbc_wait = gm107_ltc_cbc_wait, | ||
154 | .zbc = 16, | ||
155 | .zbc_clear_color = gm107_ltc_zbc_clear_color, | ||
156 | .zbc_clear_depth = gm107_ltc_zbc_clear_depth, | ||
157 | }.base; | ||
diff --git a/drivers/gpu/drm/nouveau/core/subdev/ltc/priv.h b/drivers/gpu/drm/nouveau/core/subdev/ltc/priv.h new file mode 100644 index 000000000000..594924f39126 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/subdev/ltc/priv.h | |||
@@ -0,0 +1,69 @@ | |||
1 | #ifndef __NVKM_LTC_PRIV_H__ | ||
2 | #define __NVKM_LTC_PRIV_H__ | ||
3 | |||
4 | #include <subdev/ltc.h> | ||
5 | #include <subdev/fb.h> | ||
6 | |||
7 | struct nvkm_ltc_priv { | ||
8 | struct nouveau_ltc base; | ||
9 | u32 ltc_nr; | ||
10 | u32 lts_nr; | ||
11 | |||
12 | u32 num_tags; | ||
13 | u32 tag_base; | ||
14 | struct nouveau_mm tags; | ||
15 | struct nouveau_mm_node *tag_ram; | ||
16 | |||
17 | u32 zbc_color[NOUVEAU_LTC_MAX_ZBC_CNT][4]; | ||
18 | u32 zbc_depth[NOUVEAU_LTC_MAX_ZBC_CNT]; | ||
19 | }; | ||
20 | |||
21 | #define nvkm_ltc_create(p,e,o,d) \ | ||
22 | nvkm_ltc_create_((p), (e), (o), sizeof(**d), (void **)d) | ||
23 | #define nvkm_ltc_destroy(p) ({ \ | ||
24 | struct nvkm_ltc_priv *_priv = (p); \ | ||
25 | _nvkm_ltc_dtor(nv_object(_priv)); \ | ||
26 | }) | ||
27 | #define nvkm_ltc_init(p) ({ \ | ||
28 | struct nvkm_ltc_priv *_priv = (p); \ | ||
29 | _nvkm_ltc_init(nv_object(_priv)); \ | ||
30 | }) | ||
31 | #define nvkm_ltc_fini(p,s) ({ \ | ||
32 | struct nvkm_ltc_priv *_priv = (p); \ | ||
33 | _nvkm_ltc_fini(nv_object(_priv), (s)); \ | ||
34 | }) | ||
35 | |||
36 | int nvkm_ltc_create_(struct nouveau_object *, struct nouveau_object *, | ||
37 | struct nouveau_oclass *, int, void **); | ||
38 | |||
39 | #define _nvkm_ltc_dtor _nouveau_subdev_dtor | ||
40 | int _nvkm_ltc_init(struct nouveau_object *); | ||
41 | #define _nvkm_ltc_fini _nouveau_subdev_fini | ||
42 | |||
43 | int gf100_ltc_ctor(struct nouveau_object *, struct nouveau_object *, | ||
44 | struct nouveau_oclass *, void *, u32, | ||
45 | struct nouveau_object **); | ||
46 | void gf100_ltc_dtor(struct nouveau_object *); | ||
47 | int gf100_ltc_init_tag_ram(struct nouveau_fb *, struct nvkm_ltc_priv *); | ||
48 | int gf100_ltc_tags_alloc(struct nouveau_ltc *, u32, struct nouveau_mm_node **); | ||
49 | void gf100_ltc_tags_free(struct nouveau_ltc *, struct nouveau_mm_node **); | ||
50 | |||
51 | struct nvkm_ltc_impl { | ||
52 | struct nouveau_oclass base; | ||
53 | void (*intr)(struct nouveau_subdev *); | ||
54 | |||
55 | void (*cbc_clear)(struct nvkm_ltc_priv *, u32 start, u32 limit); | ||
56 | void (*cbc_wait)(struct nvkm_ltc_priv *); | ||
57 | |||
58 | int zbc; | ||
59 | void (*zbc_clear_color)(struct nvkm_ltc_priv *, int, const u32[4]); | ||
60 | void (*zbc_clear_depth)(struct nvkm_ltc_priv *, int, const u32); | ||
61 | }; | ||
62 | |||
63 | void gf100_ltc_intr(struct nouveau_subdev *); | ||
64 | void gf100_ltc_cbc_clear(struct nvkm_ltc_priv *, u32, u32); | ||
65 | void gf100_ltc_cbc_wait(struct nvkm_ltc_priv *); | ||
66 | void gf100_ltc_zbc_clear_color(struct nvkm_ltc_priv *, int, const u32[4]); | ||
67 | void gf100_ltc_zbc_clear_depth(struct nvkm_ltc_priv *, int, const u32); | ||
68 | |||
69 | #endif | ||
diff --git a/drivers/gpu/drm/nouveau/core/subdev/ltcg/gf100.h b/drivers/gpu/drm/nouveau/core/subdev/ltcg/gf100.h deleted file mode 100644 index 87b10b8412ea..000000000000 --- a/drivers/gpu/drm/nouveau/core/subdev/ltcg/gf100.h +++ /dev/null | |||
@@ -1,21 +0,0 @@ | |||
1 | #ifndef __NVKM_LTCG_PRIV_GF100_H__ | ||
2 | #define __NVKM_LTCG_PRIV_GF100_H__ | ||
3 | |||
4 | #include <subdev/ltcg.h> | ||
5 | |||
6 | struct gf100_ltcg_priv { | ||
7 | struct nouveau_ltcg base; | ||
8 | u32 ltc_nr; | ||
9 | u32 lts_nr; | ||
10 | u32 num_tags; | ||
11 | u32 tag_base; | ||
12 | struct nouveau_mm tags; | ||
13 | struct nouveau_mm_node *tag_ram; | ||
14 | }; | ||
15 | |||
16 | void gf100_ltcg_dtor(struct nouveau_object *); | ||
17 | int gf100_ltcg_init_tag_ram(struct nouveau_fb *, struct gf100_ltcg_priv *); | ||
18 | int gf100_ltcg_tags_alloc(struct nouveau_ltcg *, u32, struct nouveau_mm_node **); | ||
19 | void gf100_ltcg_tags_free(struct nouveau_ltcg *, struct nouveau_mm_node **); | ||
20 | |||
21 | #endif | ||
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/base.c b/drivers/gpu/drm/nouveau/core/subdev/mc/base.c index 8a5555192fa5..ca7cee3a314a 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/mc/base.c +++ b/drivers/gpu/drm/nouveau/core/subdev/mc/base.c | |||
@@ -22,9 +22,17 @@ | |||
22 | * Authors: Ben Skeggs | 22 | * Authors: Ben Skeggs |
23 | */ | 23 | */ |
24 | 24 | ||
25 | #include <subdev/mc.h> | 25 | #include "priv.h" |
26 | #include <core/option.h> | 26 | #include <core/option.h> |
27 | 27 | ||
28 | static inline void | ||
29 | nouveau_mc_unk260(struct nouveau_mc *pmc, u32 data) | ||
30 | { | ||
31 | const struct nouveau_mc_oclass *impl = (void *)nv_oclass(pmc); | ||
32 | if (impl->unk260) | ||
33 | impl->unk260(pmc, data); | ||
34 | } | ||
35 | |||
28 | static inline u32 | 36 | static inline u32 |
29 | nouveau_mc_intr_mask(struct nouveau_mc *pmc) | 37 | nouveau_mc_intr_mask(struct nouveau_mc *pmc) |
30 | { | 38 | { |
@@ -114,6 +122,8 @@ nouveau_mc_create_(struct nouveau_object *parent, struct nouveau_object *engine, | |||
114 | if (ret) | 122 | if (ret) |
115 | return ret; | 123 | return ret; |
116 | 124 | ||
125 | pmc->unk260 = nouveau_mc_unk260; | ||
126 | |||
117 | if (nv_device_is_pci(device)) | 127 | if (nv_device_is_pci(device)) |
118 | switch (device->pdev->device & 0x0ff0) { | 128 | switch (device->pdev->device & 0x0ff0) { |
119 | case 0x00f0: | 129 | case 0x00f0: |
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/gk20a.c b/drivers/gpu/drm/nouveau/core/subdev/mc/gk20a.c new file mode 100644 index 000000000000..b8d6cb435d0a --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/subdev/mc/gk20a.c | |||
@@ -0,0 +1,38 @@ | |||
1 | /* | ||
2 | * Copyright 2012 Red Hat Inc. | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice shall be included in | ||
12 | * all copies or substantial portions of the Software. | ||
13 | * | ||
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
21 | * | ||
22 | * Authors: Ben Skeggs | ||
23 | */ | ||
24 | |||
25 | #include "nv04.h" | ||
26 | |||
27 | struct nouveau_oclass * | ||
28 | gk20a_mc_oclass = &(struct nouveau_mc_oclass) { | ||
29 | .base.handle = NV_SUBDEV(MC, 0xea), | ||
30 | .base.ofuncs = &(struct nouveau_ofuncs) { | ||
31 | .ctor = nv04_mc_ctor, | ||
32 | .dtor = _nouveau_mc_dtor, | ||
33 | .init = nv50_mc_init, | ||
34 | .fini = _nouveau_mc_fini, | ||
35 | }, | ||
36 | .intr = nvc0_mc_intr, | ||
37 | .msi_rearm = nv40_mc_msi_rearm, | ||
38 | }.base; | ||
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nv04.h b/drivers/gpu/drm/nouveau/core/subdev/mc/nv04.h index 81a408e7d034..4d9ea46c47c2 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/mc/nv04.h +++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nv04.h | |||
@@ -1,7 +1,7 @@ | |||
1 | #ifndef __NVKM_MC_NV04_H__ | 1 | #ifndef __NVKM_MC_NV04_H__ |
2 | #define __NVKM_MC_NV04_H__ | 2 | #define __NVKM_MC_NV04_H__ |
3 | 3 | ||
4 | #include <subdev/mc.h> | 4 | #include "priv.h" |
5 | 5 | ||
6 | struct nv04_mc_priv { | 6 | struct nv04_mc_priv { |
7 | struct nouveau_mc base; | 7 | struct nouveau_mc base; |
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c index f9c6a678b47d..15d41dc176ff 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c +++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c | |||
@@ -41,7 +41,7 @@ nvc0_mc_intr[] = { | |||
41 | { 0x00200000, NVDEV_SUBDEV_GPIO }, /* PMGR->GPIO */ | 41 | { 0x00200000, NVDEV_SUBDEV_GPIO }, /* PMGR->GPIO */ |
42 | { 0x00200000, NVDEV_SUBDEV_I2C }, /* PMGR->I2C/AUX */ | 42 | { 0x00200000, NVDEV_SUBDEV_I2C }, /* PMGR->I2C/AUX */ |
43 | { 0x01000000, NVDEV_SUBDEV_PWR }, | 43 | { 0x01000000, NVDEV_SUBDEV_PWR }, |
44 | { 0x02000000, NVDEV_SUBDEV_LTCG }, | 44 | { 0x02000000, NVDEV_SUBDEV_LTC }, |
45 | { 0x08000000, NVDEV_SUBDEV_FB }, | 45 | { 0x08000000, NVDEV_SUBDEV_FB }, |
46 | { 0x10000000, NVDEV_SUBDEV_BUS }, | 46 | { 0x10000000, NVDEV_SUBDEV_BUS }, |
47 | { 0x40000000, NVDEV_SUBDEV_IBUS }, | 47 | { 0x40000000, NVDEV_SUBDEV_IBUS }, |
@@ -56,6 +56,12 @@ nvc0_mc_msi_rearm(struct nouveau_mc *pmc) | |||
56 | nv_wr32(priv, 0x088704, 0x00000000); | 56 | nv_wr32(priv, 0x088704, 0x00000000); |
57 | } | 57 | } |
58 | 58 | ||
59 | void | ||
60 | nvc0_mc_unk260(struct nouveau_mc *pmc, u32 data) | ||
61 | { | ||
62 | nv_wr32(pmc, 0x000260, data); | ||
63 | } | ||
64 | |||
59 | struct nouveau_oclass * | 65 | struct nouveau_oclass * |
60 | nvc0_mc_oclass = &(struct nouveau_mc_oclass) { | 66 | nvc0_mc_oclass = &(struct nouveau_mc_oclass) { |
61 | .base.handle = NV_SUBDEV(MC, 0xc0), | 67 | .base.handle = NV_SUBDEV(MC, 0xc0), |
@@ -67,4 +73,5 @@ nvc0_mc_oclass = &(struct nouveau_mc_oclass) { | |||
67 | }, | 73 | }, |
68 | .intr = nvc0_mc_intr, | 74 | .intr = nvc0_mc_intr, |
69 | .msi_rearm = nvc0_mc_msi_rearm, | 75 | .msi_rearm = nvc0_mc_msi_rearm, |
76 | .unk260 = nvc0_mc_unk260, | ||
70 | }.base; | 77 | }.base; |
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nvc3.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nvc3.c index 837e545aeb9f..68b5f61aadb5 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/mc/nvc3.c +++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nvc3.c | |||
@@ -35,4 +35,5 @@ nvc3_mc_oclass = &(struct nouveau_mc_oclass) { | |||
35 | }, | 35 | }, |
36 | .intr = nvc0_mc_intr, | 36 | .intr = nvc0_mc_intr, |
37 | .msi_rearm = nv40_mc_msi_rearm, | 37 | .msi_rearm = nv40_mc_msi_rearm, |
38 | .unk260 = nvc0_mc_unk260, | ||
38 | }.base; | 39 | }.base; |
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/priv.h b/drivers/gpu/drm/nouveau/core/subdev/mc/priv.h new file mode 100644 index 000000000000..911e66392587 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/subdev/mc/priv.h | |||
@@ -0,0 +1,38 @@ | |||
1 | #ifndef __NVKM_MC_PRIV_H__ | ||
2 | #define __NVKM_MC_PRIV_H__ | ||
3 | |||
4 | #include <subdev/mc.h> | ||
5 | |||
6 | #define nouveau_mc_create(p,e,o,d) \ | ||
7 | nouveau_mc_create_((p), (e), (o), sizeof(**d), (void **)d) | ||
8 | #define nouveau_mc_destroy(p) ({ \ | ||
9 | struct nouveau_mc *pmc = (p); _nouveau_mc_dtor(nv_object(pmc)); \ | ||
10 | }) | ||
11 | #define nouveau_mc_init(p) ({ \ | ||
12 | struct nouveau_mc *pmc = (p); _nouveau_mc_init(nv_object(pmc)); \ | ||
13 | }) | ||
14 | #define nouveau_mc_fini(p,s) ({ \ | ||
15 | struct nouveau_mc *pmc = (p); _nouveau_mc_fini(nv_object(pmc), (s)); \ | ||
16 | }) | ||
17 | |||
18 | int nouveau_mc_create_(struct nouveau_object *, struct nouveau_object *, | ||
19 | struct nouveau_oclass *, int, void **); | ||
20 | void _nouveau_mc_dtor(struct nouveau_object *); | ||
21 | int _nouveau_mc_init(struct nouveau_object *); | ||
22 | int _nouveau_mc_fini(struct nouveau_object *, bool); | ||
23 | |||
24 | struct nouveau_mc_intr { | ||
25 | u32 stat; | ||
26 | u32 unit; | ||
27 | }; | ||
28 | |||
29 | struct nouveau_mc_oclass { | ||
30 | struct nouveau_oclass base; | ||
31 | const struct nouveau_mc_intr *intr; | ||
32 | void (*msi_rearm)(struct nouveau_mc *); | ||
33 | void (*unk260)(struct nouveau_mc *, u32); | ||
34 | }; | ||
35 | |||
36 | void nvc0_mc_unk260(struct nouveau_mc *, u32); | ||
37 | |||
38 | #endif | ||
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/base.c b/drivers/gpu/drm/nouveau/core/subdev/pwr/base.c index d4fd3bc9c66f..69f1f34f6931 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/pwr/base.c +++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/base.c | |||
@@ -22,9 +22,18 @@ | |||
22 | * Authors: Ben Skeggs | 22 | * Authors: Ben Skeggs |
23 | */ | 23 | */ |
24 | 24 | ||
25 | #include <subdev/pwr.h> | ||
26 | #include <subdev/timer.h> | 25 | #include <subdev/timer.h> |
27 | 26 | ||
27 | #include "priv.h" | ||
28 | |||
29 | static void | ||
30 | nouveau_pwr_pgob(struct nouveau_pwr *ppwr, bool enable) | ||
31 | { | ||
32 | const struct nvkm_pwr_impl *impl = (void *)nv_oclass(ppwr); | ||
33 | if (impl->pgob) | ||
34 | impl->pgob(ppwr, enable); | ||
35 | } | ||
36 | |||
28 | static int | 37 | static int |
29 | nouveau_pwr_send(struct nouveau_pwr *ppwr, u32 reply[2], | 38 | nouveau_pwr_send(struct nouveau_pwr *ppwr, u32 reply[2], |
30 | u32 process, u32 message, u32 data0, u32 data1) | 39 | u32 process, u32 message, u32 data0, u32 data1) |
@@ -177,6 +186,7 @@ _nouveau_pwr_fini(struct nouveau_object *object, bool suspend) | |||
177 | int | 186 | int |
178 | _nouveau_pwr_init(struct nouveau_object *object) | 187 | _nouveau_pwr_init(struct nouveau_object *object) |
179 | { | 188 | { |
189 | const struct nvkm_pwr_impl *impl = (void *)object->oclass; | ||
180 | struct nouveau_pwr *ppwr = (void *)object; | 190 | struct nouveau_pwr *ppwr = (void *)object; |
181 | int ret, i; | 191 | int ret, i; |
182 | 192 | ||
@@ -186,6 +196,7 @@ _nouveau_pwr_init(struct nouveau_object *object) | |||
186 | 196 | ||
187 | nv_subdev(ppwr)->intr = nouveau_pwr_intr; | 197 | nv_subdev(ppwr)->intr = nouveau_pwr_intr; |
188 | ppwr->message = nouveau_pwr_send; | 198 | ppwr->message = nouveau_pwr_send; |
199 | ppwr->pgob = nouveau_pwr_pgob; | ||
189 | 200 | ||
190 | /* prevent previous ucode from running, wait for idle, reset */ | 201 | /* prevent previous ucode from running, wait for idle, reset */ |
191 | nv_wr32(ppwr, 0x10a014, 0x0000ffff); /* INTR_EN_CLR = ALL */ | 202 | nv_wr32(ppwr, 0x10a014, 0x0000ffff); /* INTR_EN_CLR = ALL */ |
@@ -195,15 +206,15 @@ _nouveau_pwr_init(struct nouveau_object *object) | |||
195 | 206 | ||
196 | /* upload data segment */ | 207 | /* upload data segment */ |
197 | nv_wr32(ppwr, 0x10a1c0, 0x01000000); | 208 | nv_wr32(ppwr, 0x10a1c0, 0x01000000); |
198 | for (i = 0; i < ppwr->data.size / 4; i++) | 209 | for (i = 0; i < impl->data.size / 4; i++) |
199 | nv_wr32(ppwr, 0x10a1c4, ppwr->data.data[i]); | 210 | nv_wr32(ppwr, 0x10a1c4, impl->data.data[i]); |
200 | 211 | ||
201 | /* upload code segment */ | 212 | /* upload code segment */ |
202 | nv_wr32(ppwr, 0x10a180, 0x01000000); | 213 | nv_wr32(ppwr, 0x10a180, 0x01000000); |
203 | for (i = 0; i < ppwr->code.size / 4; i++) { | 214 | for (i = 0; i < impl->code.size / 4; i++) { |
204 | if ((i & 0x3f) == 0) | 215 | if ((i & 0x3f) == 0) |
205 | nv_wr32(ppwr, 0x10a188, i >> 6); | 216 | nv_wr32(ppwr, 0x10a188, i >> 6); |
206 | nv_wr32(ppwr, 0x10a184, ppwr->code.data[i]); | 217 | nv_wr32(ppwr, 0x10a184, impl->code.data[i]); |
207 | } | 218 | } |
208 | 219 | ||
209 | /* start it running */ | 220 | /* start it running */ |
@@ -245,3 +256,15 @@ nouveau_pwr_create_(struct nouveau_object *parent, | |||
245 | init_waitqueue_head(&ppwr->recv.wait); | 256 | init_waitqueue_head(&ppwr->recv.wait); |
246 | return 0; | 257 | return 0; |
247 | } | 258 | } |
259 | |||
260 | int | ||
261 | _nouveau_pwr_ctor(struct nouveau_object *parent, | ||
262 | struct nouveau_object *engine, | ||
263 | struct nouveau_oclass *oclass, void *data, u32 size, | ||
264 | struct nouveau_object **pobject) | ||
265 | { | ||
266 | struct nouveau_pwr *ppwr; | ||
267 | int ret = nouveau_pwr_create(parent, engine, oclass, &ppwr); | ||
268 | *pobject = nv_object(ppwr); | ||
269 | return ret; | ||
270 | } | ||
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/macros.fuc b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/macros.fuc index e2a63ac5422b..5668e045bac1 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/macros.fuc +++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/macros.fuc | |||
@@ -242,7 +242,7 @@ | |||
242 | */ push reg /* | 242 | */ push reg /* |
243 | */ pop $r13 /* | 243 | */ pop $r13 /* |
244 | */ pop $r14 /* | 244 | */ pop $r14 /* |
245 | */ call(wr32) /* | 245 | */ call(wr32) |
246 | #else | 246 | #else |
247 | #define nv_wr32(addr,reg) /* | 247 | #define nv_wr32(addr,reg) /* |
248 | */ sethi $r0 0x14000000 /* | 248 | */ sethi $r0 0x14000000 /* |
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nv108.fuc.h b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nv108.fuc.h index 39a5dc150a05..986495d533dd 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nv108.fuc.h +++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nv108.fuc.h | |||
@@ -46,8 +46,8 @@ uint32_t nv108_pwr_data[] = { | |||
46 | 0x00000000, | 46 | 0x00000000, |
47 | 0x00000000, | 47 | 0x00000000, |
48 | 0x584d454d, | 48 | 0x584d454d, |
49 | 0x0000046f, | 49 | 0x00000464, |
50 | 0x00000461, | 50 | 0x00000456, |
51 | 0x00000000, | 51 | 0x00000000, |
52 | 0x00000000, | 52 | 0x00000000, |
53 | 0x00000000, | 53 | 0x00000000, |
@@ -68,8 +68,8 @@ uint32_t nv108_pwr_data[] = { | |||
68 | 0x00000000, | 68 | 0x00000000, |
69 | 0x00000000, | 69 | 0x00000000, |
70 | 0x46524550, | 70 | 0x46524550, |
71 | 0x00000473, | 71 | 0x00000468, |
72 | 0x00000471, | 72 | 0x00000466, |
73 | 0x00000000, | 73 | 0x00000000, |
74 | 0x00000000, | 74 | 0x00000000, |
75 | 0x00000000, | 75 | 0x00000000, |
@@ -90,8 +90,8 @@ uint32_t nv108_pwr_data[] = { | |||
90 | 0x00000000, | 90 | 0x00000000, |
91 | 0x00000000, | 91 | 0x00000000, |
92 | 0x5f433249, | 92 | 0x5f433249, |
93 | 0x00000877, | 93 | 0x0000086c, |
94 | 0x0000071e, | 94 | 0x00000713, |
95 | 0x00000000, | 95 | 0x00000000, |
96 | 0x00000000, | 96 | 0x00000000, |
97 | 0x00000000, | 97 | 0x00000000, |
@@ -112,8 +112,8 @@ uint32_t nv108_pwr_data[] = { | |||
112 | 0x00000000, | 112 | 0x00000000, |
113 | 0x00000000, | 113 | 0x00000000, |
114 | 0x54534554, | 114 | 0x54534554, |
115 | 0x00000898, | 115 | 0x0000088d, |
116 | 0x00000879, | 116 | 0x0000086e, |
117 | 0x00000000, | 117 | 0x00000000, |
118 | 0x00000000, | 118 | 0x00000000, |
119 | 0x00000000, | 119 | 0x00000000, |
@@ -134,8 +134,8 @@ uint32_t nv108_pwr_data[] = { | |||
134 | 0x00000000, | 134 | 0x00000000, |
135 | 0x00000000, | 135 | 0x00000000, |
136 | 0x454c4449, | 136 | 0x454c4449, |
137 | 0x000008a3, | 137 | 0x00000898, |
138 | 0x000008a1, | 138 | 0x00000896, |
139 | 0x00000000, | 139 | 0x00000000, |
140 | 0x00000000, | 140 | 0x00000000, |
141 | 0x00000000, | 141 | 0x00000000, |
@@ -239,10 +239,10 @@ uint32_t nv108_pwr_data[] = { | |||
239 | 0x000003df, | 239 | 0x000003df, |
240 | 0x00040003, | 240 | 0x00040003, |
241 | 0x00000000, | 241 | 0x00000000, |
242 | 0x00000407, | 242 | 0x000003fc, |
243 | 0x00010004, | 243 | 0x00010004, |
244 | 0x00000000, | 244 | 0x00000000, |
245 | 0x00000421, | 245 | 0x00000416, |
246 | /* 0x03ac: memx_func_tail */ | 246 | /* 0x03ac: memx_func_tail */ |
247 | /* 0x03ac: memx_data_head */ | 247 | /* 0x03ac: memx_data_head */ |
248 | 0x00000000, | 248 | 0x00000000, |
@@ -1080,375 +1080,375 @@ uint32_t nv108_pwr_code[] = { | |||
1080 | 0x50f960f9, | 1080 | 0x50f960f9, |
1081 | 0xe0fcd0fc, | 1081 | 0xe0fcd0fc, |
1082 | 0x00002e7e, | 1082 | 0x00002e7e, |
1083 | 0x140003f1, | 1083 | 0xf40242b6, |
1084 | 0xa00506fd, | 1084 | 0x00f8e81b, |
1085 | 0xb604bd05, | 1085 | /* 0x03fc: memx_func_wait */ |
1086 | 0x1bf40242, | 1086 | 0x88cf2c08, |
1087 | /* 0x0407: memx_func_wait */ | ||
1088 | 0x0800f8dd, | ||
1089 | 0x0088cf2c, | ||
1090 | 0x98001e98, | ||
1091 | 0x1c98011d, | ||
1092 | 0x031b9802, | ||
1093 | 0x7e1010b6, | ||
1094 | 0xf8000071, | ||
1095 | /* 0x0421: memx_func_delay */ | ||
1096 | 0x001e9800, | 1087 | 0x001e9800, |
1097 | 0x7e0410b6, | 1088 | 0x98011d98, |
1098 | 0xf800005d, | 1089 | 0x1b98021c, |
1099 | /* 0x042d: memx_exec */ | 1090 | 0x1010b603, |
1100 | 0xf9e0f900, | 1091 | 0x0000717e, |
1101 | 0xb2c1b2d0, | 1092 | /* 0x0416: memx_func_delay */ |
1102 | /* 0x0435: memx_exec_next */ | 1093 | 0x1e9800f8, |
1103 | 0x001398b2, | 1094 | 0x0410b600, |
1104 | 0x950410b6, | ||
1105 | 0x30f01034, | ||
1106 | 0xde35980c, | ||
1107 | 0x12a655f9, | ||
1108 | 0xfced1ef4, | ||
1109 | 0x7ee0fcd0, | ||
1110 | 0xf800023f, | ||
1111 | /* 0x0455: memx_info */ | ||
1112 | 0x03ac4c00, | ||
1113 | 0x7e08004b, | ||
1114 | 0xf800023f, | ||
1115 | /* 0x0461: memx_recv */ | ||
1116 | 0x01d6b000, | ||
1117 | 0xb0c90bf4, | ||
1118 | 0x0bf400d6, | ||
1119 | /* 0x046f: memx_init */ | ||
1120 | 0xf800f8eb, | ||
1121 | /* 0x0471: perf_recv */ | ||
1122 | /* 0x0473: perf_init */ | ||
1123 | 0xf800f800, | ||
1124 | /* 0x0475: i2c_drive_scl */ | ||
1125 | 0x0036b000, | ||
1126 | 0x400d0bf4, | ||
1127 | 0x01f607e0, | ||
1128 | 0xf804bd00, | ||
1129 | /* 0x0485: i2c_drive_scl_lo */ | ||
1130 | 0x07e44000, | ||
1131 | 0xbd0001f6, | ||
1132 | /* 0x048f: i2c_drive_sda */ | ||
1133 | 0xb000f804, | ||
1134 | 0x0bf40036, | ||
1135 | 0x07e0400d, | ||
1136 | 0xbd0002f6, | ||
1137 | /* 0x049f: i2c_drive_sda_lo */ | ||
1138 | 0x4000f804, | ||
1139 | 0x02f607e4, | ||
1140 | 0xf804bd00, | ||
1141 | /* 0x04a9: i2c_sense_scl */ | ||
1142 | 0x0132f400, | ||
1143 | 0xcf07c443, | ||
1144 | 0x31fd0033, | ||
1145 | 0x060bf404, | ||
1146 | /* 0x04bb: i2c_sense_scl_done */ | ||
1147 | 0xf80131f4, | ||
1148 | /* 0x04bd: i2c_sense_sda */ | ||
1149 | 0x0132f400, | ||
1150 | 0xcf07c443, | ||
1151 | 0x32fd0033, | ||
1152 | 0x060bf404, | ||
1153 | /* 0x04cf: i2c_sense_sda_done */ | ||
1154 | 0xf80131f4, | ||
1155 | /* 0x04d1: i2c_raise_scl */ | ||
1156 | 0x4440f900, | ||
1157 | 0x01030898, | ||
1158 | 0x0004757e, | ||
1159 | /* 0x04dc: i2c_raise_scl_wait */ | ||
1160 | 0x7e03e84e, | ||
1161 | 0x7e00005d, | ||
1162 | 0xf40004a9, | ||
1163 | 0x42b60901, | ||
1164 | 0xef1bf401, | ||
1165 | /* 0x04f0: i2c_raise_scl_done */ | ||
1166 | 0x00f840fc, | ||
1167 | /* 0x04f4: i2c_start */ | ||
1168 | 0x0004a97e, | ||
1169 | 0x7e0d11f4, | ||
1170 | 0xf40004bd, | ||
1171 | 0x0ef40611, | ||
1172 | /* 0x0505: i2c_start_rep */ | ||
1173 | 0x7e00032e, | ||
1174 | 0x03000475, | ||
1175 | 0x048f7e01, | ||
1176 | 0x0076bb00, | ||
1177 | 0xf90465b6, | ||
1178 | 0x04659450, | ||
1179 | 0xbd0256bb, | ||
1180 | 0x0475fd50, | ||
1181 | 0xd17e50fc, | ||
1182 | 0x64b60004, | ||
1183 | 0x1d11f404, | ||
1184 | /* 0x0530: i2c_start_send */ | ||
1185 | 0x8f7e0003, | ||
1186 | 0x884e0004, | ||
1187 | 0x005d7e13, | ||
1188 | 0x7e000300, | ||
1189 | 0x4e000475, | ||
1190 | 0x5d7e1388, | ||
1191 | /* 0x054a: i2c_start_out */ | ||
1192 | 0x00f80000, | ||
1193 | /* 0x054c: i2c_stop */ | ||
1194 | 0x757e0003, | ||
1195 | 0x00030004, | ||
1196 | 0x00048f7e, | ||
1197 | 0x7e03e84e, | ||
1198 | 0x0300005d, | ||
1199 | 0x04757e01, | ||
1200 | 0x13884e00, | ||
1201 | 0x00005d7e, | 1095 | 0x00005d7e, |
1202 | 0x8f7e0103, | 1096 | /* 0x0422: memx_exec */ |
1203 | 0x884e0004, | 1097 | 0xe0f900f8, |
1204 | 0x005d7e13, | 1098 | 0xc1b2d0f9, |
1205 | /* 0x057b: i2c_bitw */ | 1099 | /* 0x042a: memx_exec_next */ |
1206 | 0x7e00f800, | 1100 | 0x1398b2b2, |
1207 | 0x4e00048f, | 1101 | 0x0410b600, |
1208 | 0x5d7e03e8, | 1102 | 0xf0103495, |
1209 | 0x76bb0000, | 1103 | 0x35980c30, |
1104 | 0xa655f9de, | ||
1105 | 0xed1ef412, | ||
1106 | 0xe0fcd0fc, | ||
1107 | 0x00023f7e, | ||
1108 | /* 0x044a: memx_info */ | ||
1109 | 0xac4c00f8, | ||
1110 | 0x08004b03, | ||
1111 | 0x00023f7e, | ||
1112 | /* 0x0456: memx_recv */ | ||
1113 | 0xd6b000f8, | ||
1114 | 0xc90bf401, | ||
1115 | 0xf400d6b0, | ||
1116 | 0x00f8eb0b, | ||
1117 | /* 0x0464: memx_init */ | ||
1118 | /* 0x0466: perf_recv */ | ||
1119 | 0x00f800f8, | ||
1120 | /* 0x0468: perf_init */ | ||
1121 | /* 0x046a: i2c_drive_scl */ | ||
1122 | 0x36b000f8, | ||
1123 | 0x0d0bf400, | ||
1124 | 0xf607e040, | ||
1125 | 0x04bd0001, | ||
1126 | /* 0x047a: i2c_drive_scl_lo */ | ||
1127 | 0xe44000f8, | ||
1128 | 0x0001f607, | ||
1129 | 0x00f804bd, | ||
1130 | /* 0x0484: i2c_drive_sda */ | ||
1131 | 0xf40036b0, | ||
1132 | 0xe0400d0b, | ||
1133 | 0x0002f607, | ||
1134 | 0x00f804bd, | ||
1135 | /* 0x0494: i2c_drive_sda_lo */ | ||
1136 | 0xf607e440, | ||
1137 | 0x04bd0002, | ||
1138 | /* 0x049e: i2c_sense_scl */ | ||
1139 | 0x32f400f8, | ||
1140 | 0x07c44301, | ||
1141 | 0xfd0033cf, | ||
1142 | 0x0bf40431, | ||
1143 | 0x0131f406, | ||
1144 | /* 0x04b0: i2c_sense_scl_done */ | ||
1145 | /* 0x04b2: i2c_sense_sda */ | ||
1146 | 0x32f400f8, | ||
1147 | 0x07c44301, | ||
1148 | 0xfd0033cf, | ||
1149 | 0x0bf40432, | ||
1150 | 0x0131f406, | ||
1151 | /* 0x04c4: i2c_sense_sda_done */ | ||
1152 | /* 0x04c6: i2c_raise_scl */ | ||
1153 | 0x40f900f8, | ||
1154 | 0x03089844, | ||
1155 | 0x046a7e01, | ||
1156 | /* 0x04d1: i2c_raise_scl_wait */ | ||
1157 | 0x03e84e00, | ||
1158 | 0x00005d7e, | ||
1159 | 0x00049e7e, | ||
1160 | 0xb60901f4, | ||
1161 | 0x1bf40142, | ||
1162 | /* 0x04e5: i2c_raise_scl_done */ | ||
1163 | 0xf840fcef, | ||
1164 | /* 0x04e9: i2c_start */ | ||
1165 | 0x049e7e00, | ||
1166 | 0x0d11f400, | ||
1167 | 0x0004b27e, | ||
1168 | 0xf40611f4, | ||
1169 | /* 0x04fa: i2c_start_rep */ | ||
1170 | 0x00032e0e, | ||
1171 | 0x00046a7e, | ||
1172 | 0x847e0103, | ||
1173 | 0x76bb0004, | ||
1210 | 0x0465b600, | 1174 | 0x0465b600, |
1211 | 0x659450f9, | 1175 | 0x659450f9, |
1212 | 0x0256bb04, | 1176 | 0x0256bb04, |
1213 | 0x75fd50bd, | 1177 | 0x75fd50bd, |
1214 | 0x7e50fc04, | 1178 | 0x7e50fc04, |
1215 | 0xb60004d1, | 1179 | 0xb60004c6, |
1216 | 0x11f40464, | 1180 | 0x11f40464, |
1217 | 0x13884e17, | 1181 | /* 0x0525: i2c_start_send */ |
1182 | 0x7e00031d, | ||
1183 | 0x4e000484, | ||
1184 | 0x5d7e1388, | ||
1185 | 0x00030000, | ||
1186 | 0x00046a7e, | ||
1187 | 0x7e13884e, | ||
1188 | /* 0x053f: i2c_start_out */ | ||
1189 | 0xf800005d, | ||
1190 | /* 0x0541: i2c_stop */ | ||
1191 | 0x7e000300, | ||
1192 | 0x0300046a, | ||
1193 | 0x04847e00, | ||
1194 | 0x03e84e00, | ||
1218 | 0x00005d7e, | 1195 | 0x00005d7e, |
1219 | 0x757e0003, | 1196 | 0x6a7e0103, |
1220 | 0x884e0004, | 1197 | 0x884e0004, |
1221 | 0x005d7e13, | 1198 | 0x005d7e13, |
1222 | /* 0x05b9: i2c_bitw_out */ | 1199 | 0x7e010300, |
1223 | /* 0x05bb: i2c_bitr */ | 1200 | 0x4e000484, |
1224 | 0x0300f800, | 1201 | 0x5d7e1388, |
1225 | 0x048f7e01, | 1202 | 0x00f80000, |
1226 | 0x03e84e00, | 1203 | /* 0x0570: i2c_bitw */ |
1227 | 0x00005d7e, | 1204 | 0x0004847e, |
1228 | 0xb60076bb, | 1205 | 0x7e03e84e, |
1229 | 0x50f90465, | 1206 | 0xbb00005d, |
1230 | 0xbb046594, | ||
1231 | 0x50bd0256, | ||
1232 | 0xfc0475fd, | ||
1233 | 0x04d17e50, | ||
1234 | 0x0464b600, | ||
1235 | 0x7e1a11f4, | ||
1236 | 0x030004bd, | ||
1237 | 0x04757e00, | ||
1238 | 0x13884e00, | ||
1239 | 0x00005d7e, | ||
1240 | 0xf4013cf0, | ||
1241 | /* 0x05fe: i2c_bitr_done */ | ||
1242 | 0x00f80131, | ||
1243 | /* 0x0600: i2c_get_byte */ | ||
1244 | 0x08040005, | ||
1245 | /* 0x0604: i2c_get_byte_next */ | ||
1246 | 0xbb0154b6, | ||
1247 | 0x65b60076, | 1207 | 0x65b60076, |
1248 | 0x9450f904, | 1208 | 0x9450f904, |
1249 | 0x56bb0465, | 1209 | 0x56bb0465, |
1250 | 0xfd50bd02, | 1210 | 0xfd50bd02, |
1251 | 0x50fc0475, | 1211 | 0x50fc0475, |
1252 | 0x0005bb7e, | 1212 | 0x0004c67e, |
1253 | 0xf40464b6, | 1213 | 0xf40464b6, |
1254 | 0x53fd2a11, | 1214 | 0x884e1711, |
1255 | 0x0142b605, | 1215 | 0x005d7e13, |
1256 | 0x03d81bf4, | 1216 | 0x7e000300, |
1257 | 0x0076bb01, | 1217 | 0x4e00046a, |
1218 | 0x5d7e1388, | ||
1219 | /* 0x05ae: i2c_bitw_out */ | ||
1220 | 0x00f80000, | ||
1221 | /* 0x05b0: i2c_bitr */ | ||
1222 | 0x847e0103, | ||
1223 | 0xe84e0004, | ||
1224 | 0x005d7e03, | ||
1225 | 0x0076bb00, | ||
1258 | 0xf90465b6, | 1226 | 0xf90465b6, |
1259 | 0x04659450, | 1227 | 0x04659450, |
1260 | 0xbd0256bb, | 1228 | 0xbd0256bb, |
1261 | 0x0475fd50, | 1229 | 0x0475fd50, |
1262 | 0x7b7e50fc, | 1230 | 0xc67e50fc, |
1263 | 0x64b60005, | 1231 | 0x64b60004, |
1264 | /* 0x064d: i2c_get_byte_done */ | 1232 | 0x1a11f404, |
1265 | /* 0x064f: i2c_put_byte */ | 1233 | 0x0004b27e, |
1266 | 0x0400f804, | 1234 | 0x6a7e0003, |
1267 | /* 0x0651: i2c_put_byte_next */ | 1235 | 0x884e0004, |
1268 | 0x0142b608, | 1236 | 0x005d7e13, |
1269 | 0xbb3854ff, | 1237 | 0x013cf000, |
1270 | 0x65b60076, | 1238 | /* 0x05f3: i2c_bitr_done */ |
1271 | 0x9450f904, | 1239 | 0xf80131f4, |
1272 | 0x56bb0465, | 1240 | /* 0x05f5: i2c_get_byte */ |
1273 | 0xfd50bd02, | 1241 | 0x04000500, |
1274 | 0x50fc0475, | 1242 | /* 0x05f9: i2c_get_byte_next */ |
1275 | 0x00057b7e, | 1243 | 0x0154b608, |
1276 | 0xf40464b6, | ||
1277 | 0x46b03411, | ||
1278 | 0xd81bf400, | ||
1279 | 0xb60076bb, | 1244 | 0xb60076bb, |
1280 | 0x50f90465, | 1245 | 0x50f90465, |
1281 | 0xbb046594, | 1246 | 0xbb046594, |
1282 | 0x50bd0256, | 1247 | 0x50bd0256, |
1283 | 0xfc0475fd, | 1248 | 0xfc0475fd, |
1284 | 0x05bb7e50, | 1249 | 0x05b07e50, |
1285 | 0x0464b600, | 1250 | 0x0464b600, |
1286 | 0xbb0f11f4, | 1251 | 0xfd2a11f4, |
1287 | 0x36b00076, | 1252 | 0x42b60553, |
1288 | 0x061bf401, | 1253 | 0xd81bf401, |
1289 | /* 0x06a7: i2c_put_byte_done */ | 1254 | 0x76bb0103, |
1290 | 0xf80132f4, | ||
1291 | /* 0x06a9: i2c_addr */ | ||
1292 | 0x0076bb00, | ||
1293 | 0xf90465b6, | ||
1294 | 0x04659450, | ||
1295 | 0xbd0256bb, | ||
1296 | 0x0475fd50, | ||
1297 | 0xf47e50fc, | ||
1298 | 0x64b60004, | ||
1299 | 0x2911f404, | ||
1300 | 0x012ec3e7, | ||
1301 | 0xfd0134b6, | ||
1302 | 0x76bb0553, | ||
1303 | 0x0465b600, | 1255 | 0x0465b600, |
1304 | 0x659450f9, | 1256 | 0x659450f9, |
1305 | 0x0256bb04, | 1257 | 0x0256bb04, |
1306 | 0x75fd50bd, | 1258 | 0x75fd50bd, |
1307 | 0x7e50fc04, | 1259 | 0x7e50fc04, |
1308 | 0xb600064f, | 1260 | 0xb6000570, |
1309 | /* 0x06ee: i2c_addr_done */ | 1261 | /* 0x0642: i2c_get_byte_done */ |
1310 | 0x00f80464, | 1262 | 0x00f80464, |
1311 | /* 0x06f0: i2c_acquire_addr */ | 1263 | /* 0x0644: i2c_put_byte */ |
1312 | 0xb6f8cec7, | 1264 | /* 0x0646: i2c_put_byte_next */ |
1313 | 0xe0b705e4, | 1265 | 0x42b60804, |
1314 | 0x00f8d014, | 1266 | 0x3854ff01, |
1315 | /* 0x06fc: i2c_acquire */ | ||
1316 | 0x0006f07e, | ||
1317 | 0x0000047e, | ||
1318 | 0x7e03d9f0, | ||
1319 | 0xf800002e, | ||
1320 | /* 0x070d: i2c_release */ | ||
1321 | 0x06f07e00, | ||
1322 | 0x00047e00, | ||
1323 | 0x03daf000, | ||
1324 | 0x00002e7e, | ||
1325 | /* 0x071e: i2c_recv */ | ||
1326 | 0x32f400f8, | ||
1327 | 0xf8c1c701, | ||
1328 | 0xb00214b6, | ||
1329 | 0x1ff52816, | ||
1330 | 0x13b80137, | ||
1331 | 0x98000bd4, | ||
1332 | 0x13b80032, | ||
1333 | 0x98000bac, | ||
1334 | 0x31f40031, | ||
1335 | 0xf9d0f902, | ||
1336 | 0xf1d0f9e0, | ||
1337 | 0xf1000067, | ||
1338 | 0x92100063, | ||
1339 | 0x76bb0167, | ||
1340 | 0x0465b600, | ||
1341 | 0x659450f9, | ||
1342 | 0x0256bb04, | ||
1343 | 0x75fd50bd, | ||
1344 | 0x7e50fc04, | ||
1345 | 0xb60006fc, | ||
1346 | 0xd0fc0464, | ||
1347 | 0xf500d6b0, | ||
1348 | 0x0500b01b, | ||
1349 | 0x0076bb00, | ||
1350 | 0xf90465b6, | ||
1351 | 0x04659450, | ||
1352 | 0xbd0256bb, | ||
1353 | 0x0475fd50, | ||
1354 | 0xa97e50fc, | ||
1355 | 0x64b60006, | ||
1356 | 0xcc11f504, | ||
1357 | 0xe0c5c700, | ||
1358 | 0xb60076bb, | 1267 | 0xb60076bb, |
1359 | 0x50f90465, | 1268 | 0x50f90465, |
1360 | 0xbb046594, | 1269 | 0xbb046594, |
1361 | 0x50bd0256, | 1270 | 0x50bd0256, |
1362 | 0xfc0475fd, | 1271 | 0xfc0475fd, |
1363 | 0x064f7e50, | 1272 | 0x05707e50, |
1364 | 0x0464b600, | 1273 | 0x0464b600, |
1365 | 0x00a911f5, | 1274 | 0xb03411f4, |
1366 | 0x76bb0105, | 1275 | 0x1bf40046, |
1276 | 0x0076bbd8, | ||
1277 | 0xf90465b6, | ||
1278 | 0x04659450, | ||
1279 | 0xbd0256bb, | ||
1280 | 0x0475fd50, | ||
1281 | 0xb07e50fc, | ||
1282 | 0x64b60005, | ||
1283 | 0x0f11f404, | ||
1284 | 0xb00076bb, | ||
1285 | 0x1bf40136, | ||
1286 | 0x0132f406, | ||
1287 | /* 0x069c: i2c_put_byte_done */ | ||
1288 | /* 0x069e: i2c_addr */ | ||
1289 | 0x76bb00f8, | ||
1367 | 0x0465b600, | 1290 | 0x0465b600, |
1368 | 0x659450f9, | 1291 | 0x659450f9, |
1369 | 0x0256bb04, | 1292 | 0x0256bb04, |
1370 | 0x75fd50bd, | 1293 | 0x75fd50bd, |
1371 | 0x7e50fc04, | 1294 | 0x7e50fc04, |
1372 | 0xb60006a9, | 1295 | 0xb60004e9, |
1373 | 0x11f50464, | 1296 | 0x11f40464, |
1374 | 0x76bb0087, | 1297 | 0x2ec3e729, |
1298 | 0x0134b601, | ||
1299 | 0xbb0553fd, | ||
1300 | 0x65b60076, | ||
1301 | 0x9450f904, | ||
1302 | 0x56bb0465, | ||
1303 | 0xfd50bd02, | ||
1304 | 0x50fc0475, | ||
1305 | 0x0006447e, | ||
1306 | /* 0x06e3: i2c_addr_done */ | ||
1307 | 0xf80464b6, | ||
1308 | /* 0x06e5: i2c_acquire_addr */ | ||
1309 | 0xf8cec700, | ||
1310 | 0xb705e4b6, | ||
1311 | 0xf8d014e0, | ||
1312 | /* 0x06f1: i2c_acquire */ | ||
1313 | 0x06e57e00, | ||
1314 | 0x00047e00, | ||
1315 | 0x03d9f000, | ||
1316 | 0x00002e7e, | ||
1317 | /* 0x0702: i2c_release */ | ||
1318 | 0xe57e00f8, | ||
1319 | 0x047e0006, | ||
1320 | 0xdaf00000, | ||
1321 | 0x002e7e03, | ||
1322 | /* 0x0713: i2c_recv */ | ||
1323 | 0xf400f800, | ||
1324 | 0xc1c70132, | ||
1325 | 0x0214b6f8, | ||
1326 | 0xf52816b0, | ||
1327 | 0xb801371f, | ||
1328 | 0x000bd413, | ||
1329 | 0xb8003298, | ||
1330 | 0x000bac13, | ||
1331 | 0xf4003198, | ||
1332 | 0xd0f90231, | ||
1333 | 0xd0f9e0f9, | ||
1334 | 0x000067f1, | ||
1335 | 0x100063f1, | ||
1336 | 0xbb016792, | ||
1337 | 0x65b60076, | ||
1338 | 0x9450f904, | ||
1339 | 0x56bb0465, | ||
1340 | 0xfd50bd02, | ||
1341 | 0x50fc0475, | ||
1342 | 0x0006f17e, | ||
1343 | 0xfc0464b6, | ||
1344 | 0x00d6b0d0, | ||
1345 | 0x00b01bf5, | ||
1346 | 0x76bb0005, | ||
1375 | 0x0465b600, | 1347 | 0x0465b600, |
1376 | 0x659450f9, | 1348 | 0x659450f9, |
1377 | 0x0256bb04, | 1349 | 0x0256bb04, |
1378 | 0x75fd50bd, | 1350 | 0x75fd50bd, |
1379 | 0x7e50fc04, | 1351 | 0x7e50fc04, |
1380 | 0xb6000600, | 1352 | 0xb600069e, |
1381 | 0x11f40464, | 1353 | 0x11f50464, |
1382 | 0xe05bcb67, | 1354 | 0xc5c700cc, |
1383 | 0xb60076bb, | 1355 | 0x0076bbe0, |
1384 | 0x50f90465, | 1356 | 0xf90465b6, |
1385 | 0xbb046594, | 1357 | 0x04659450, |
1386 | 0x50bd0256, | 1358 | 0xbd0256bb, |
1387 | 0xfc0475fd, | 1359 | 0x0475fd50, |
1388 | 0x054c7e50, | 1360 | 0x447e50fc, |
1389 | 0x0464b600, | 1361 | 0x64b60006, |
1390 | 0x74bd5bb2, | 1362 | 0xa911f504, |
1391 | /* 0x0823: i2c_recv_not_rd08 */ | 1363 | 0xbb010500, |
1392 | 0xb0410ef4, | 1364 | 0x65b60076, |
1393 | 0x1bf401d6, | 1365 | 0x9450f904, |
1394 | 0x7e00053b, | 1366 | 0x56bb0465, |
1395 | 0xf40006a9, | 1367 | 0xfd50bd02, |
1396 | 0xc5c73211, | 1368 | 0x50fc0475, |
1397 | 0x064f7ee0, | 1369 | 0x00069e7e, |
1398 | 0x2811f400, | 1370 | 0xf50464b6, |
1399 | 0xa97e0005, | 1371 | 0xbb008711, |
1372 | 0x65b60076, | ||
1373 | 0x9450f904, | ||
1374 | 0x56bb0465, | ||
1375 | 0xfd50bd02, | ||
1376 | 0x50fc0475, | ||
1377 | 0x0005f57e, | ||
1378 | 0xf40464b6, | ||
1379 | 0x5bcb6711, | ||
1380 | 0x0076bbe0, | ||
1381 | 0xf90465b6, | ||
1382 | 0x04659450, | ||
1383 | 0xbd0256bb, | ||
1384 | 0x0475fd50, | ||
1385 | 0x417e50fc, | ||
1386 | 0x64b60005, | ||
1387 | 0xbd5bb204, | ||
1388 | 0x410ef474, | ||
1389 | /* 0x0818: i2c_recv_not_rd08 */ | ||
1390 | 0xf401d6b0, | ||
1391 | 0x00053b1b, | ||
1392 | 0x00069e7e, | ||
1393 | 0xc73211f4, | ||
1394 | 0x447ee0c5, | ||
1400 | 0x11f40006, | 1395 | 0x11f40006, |
1401 | 0xe0b5c71f, | 1396 | 0x7e000528, |
1402 | 0x00064f7e, | 1397 | 0xf400069e, |
1403 | 0x7e1511f4, | 1398 | 0xb5c71f11, |
1404 | 0xbd00054c, | 1399 | 0x06447ee0, |
1405 | 0x08c5c774, | 1400 | 0x1511f400, |
1406 | 0xf4091bf4, | 1401 | 0x0005417e, |
1407 | 0x0ef40232, | 1402 | 0xc5c774bd, |
1408 | /* 0x0861: i2c_recv_not_wr08 */ | 1403 | 0x091bf408, |
1409 | /* 0x0861: i2c_recv_done */ | 1404 | 0xf40232f4, |
1410 | 0xf8cec703, | 1405 | /* 0x0856: i2c_recv_not_wr08 */ |
1411 | 0x00070d7e, | 1406 | /* 0x0856: i2c_recv_done */ |
1412 | 0xd0fce0fc, | 1407 | 0xcec7030e, |
1413 | 0xb20912f4, | 1408 | 0x07027ef8, |
1414 | 0x023f7e7c, | 1409 | 0xfce0fc00, |
1415 | /* 0x0875: i2c_recv_exit */ | 1410 | 0x0912f4d0, |
1416 | /* 0x0877: i2c_init */ | 1411 | 0x3f7e7cb2, |
1417 | 0xf800f800, | 1412 | /* 0x086a: i2c_recv_exit */ |
1418 | /* 0x0879: test_recv */ | 1413 | 0x00f80002, |
1419 | 0x04584100, | 1414 | /* 0x086c: i2c_init */ |
1420 | 0xb60011cf, | 1415 | /* 0x086e: test_recv */ |
1421 | 0x58400110, | 1416 | 0x584100f8, |
1422 | 0x0001f604, | ||
1423 | 0xe7f104bd, | ||
1424 | 0xe3f1d900, | ||
1425 | 0x967e134f, | ||
1426 | 0x00f80001, | ||
1427 | /* 0x0898: test_init */ | ||
1428 | 0x7e08004e, | ||
1429 | 0xf8000196, | ||
1430 | /* 0x08a1: idle_recv */ | ||
1431 | /* 0x08a3: idle */ | ||
1432 | 0xf400f800, | ||
1433 | 0x54410031, | ||
1434 | 0x0011cf04, | 1417 | 0x0011cf04, |
1435 | 0x400110b6, | 1418 | 0x400110b6, |
1436 | 0x01f60454, | 1419 | 0x01f60458, |
1437 | /* 0x08b7: idle_loop */ | 1420 | 0xf104bd00, |
1438 | 0x0104bd00, | 1421 | 0xf1d900e7, |
1439 | 0x0232f458, | 1422 | 0x7e134fe3, |
1440 | /* 0x08bc: idle_proc */ | 1423 | 0xf8000196, |
1441 | /* 0x08bc: idle_proc_exec */ | 1424 | /* 0x088d: test_init */ |
1442 | 0x1eb210f9, | 1425 | 0x08004e00, |
1443 | 0x0002487e, | 1426 | 0x0001967e, |
1444 | 0x11f410fc, | 1427 | /* 0x0896: idle_recv */ |
1445 | 0x0231f409, | 1428 | 0x00f800f8, |
1446 | /* 0x08cf: idle_proc_next */ | 1429 | /* 0x0898: idle */ |
1447 | 0xb6f00ef4, | 1430 | 0x410031f4, |
1448 | 0x1fa65810, | 1431 | 0x11cf0454, |
1449 | 0xf4e81bf4, | 1432 | 0x0110b600, |
1450 | 0x28f4e002, | 1433 | 0xf6045440, |
1451 | 0xc60ef400, | 1434 | 0x04bd0001, |
1435 | /* 0x08ac: idle_loop */ | ||
1436 | 0x32f45801, | ||
1437 | /* 0x08b1: idle_proc */ | ||
1438 | /* 0x08b1: idle_proc_exec */ | ||
1439 | 0xb210f902, | ||
1440 | 0x02487e1e, | ||
1441 | 0xf410fc00, | ||
1442 | 0x31f40911, | ||
1443 | 0xf00ef402, | ||
1444 | /* 0x08c4: idle_proc_next */ | ||
1445 | 0xa65810b6, | ||
1446 | 0xe81bf41f, | ||
1447 | 0xf4e002f4, | ||
1448 | 0x0ef40028, | ||
1449 | 0x000000c6, | ||
1450 | 0x00000000, | ||
1451 | 0x00000000, | ||
1452 | 0x00000000, | 1452 | 0x00000000, |
1453 | 0x00000000, | 1453 | 0x00000000, |
1454 | 0x00000000, | 1454 | 0x00000000, |
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nva3.fuc.h b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nva3.fuc.h index 254205cd5166..e087ce3041be 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nva3.fuc.h +++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nva3.fuc.h | |||
@@ -46,8 +46,8 @@ uint32_t nva3_pwr_data[] = { | |||
46 | 0x00000000, | 46 | 0x00000000, |
47 | 0x00000000, | 47 | 0x00000000, |
48 | 0x584d454d, | 48 | 0x584d454d, |
49 | 0x0000054e, | 49 | 0x00000542, |
50 | 0x00000540, | 50 | 0x00000534, |
51 | 0x00000000, | 51 | 0x00000000, |
52 | 0x00000000, | 52 | 0x00000000, |
53 | 0x00000000, | 53 | 0x00000000, |
@@ -68,8 +68,8 @@ uint32_t nva3_pwr_data[] = { | |||
68 | 0x00000000, | 68 | 0x00000000, |
69 | 0x00000000, | 69 | 0x00000000, |
70 | 0x46524550, | 70 | 0x46524550, |
71 | 0x00000552, | 71 | 0x00000546, |
72 | 0x00000550, | 72 | 0x00000544, |
73 | 0x00000000, | 73 | 0x00000000, |
74 | 0x00000000, | 74 | 0x00000000, |
75 | 0x00000000, | 75 | 0x00000000, |
@@ -90,8 +90,8 @@ uint32_t nva3_pwr_data[] = { | |||
90 | 0x00000000, | 90 | 0x00000000, |
91 | 0x00000000, | 91 | 0x00000000, |
92 | 0x5f433249, | 92 | 0x5f433249, |
93 | 0x00000982, | 93 | 0x00000976, |
94 | 0x00000825, | 94 | 0x00000819, |
95 | 0x00000000, | 95 | 0x00000000, |
96 | 0x00000000, | 96 | 0x00000000, |
97 | 0x00000000, | 97 | 0x00000000, |
@@ -112,8 +112,8 @@ uint32_t nva3_pwr_data[] = { | |||
112 | 0x00000000, | 112 | 0x00000000, |
113 | 0x00000000, | 113 | 0x00000000, |
114 | 0x54534554, | 114 | 0x54534554, |
115 | 0x000009ab, | 115 | 0x0000099f, |
116 | 0x00000984, | 116 | 0x00000978, |
117 | 0x00000000, | 117 | 0x00000000, |
118 | 0x00000000, | 118 | 0x00000000, |
119 | 0x00000000, | 119 | 0x00000000, |
@@ -134,8 +134,8 @@ uint32_t nva3_pwr_data[] = { | |||
134 | 0x00000000, | 134 | 0x00000000, |
135 | 0x00000000, | 135 | 0x00000000, |
136 | 0x454c4449, | 136 | 0x454c4449, |
137 | 0x000009b7, | 137 | 0x000009ab, |
138 | 0x000009b5, | 138 | 0x000009a9, |
139 | 0x00000000, | 139 | 0x00000000, |
140 | 0x00000000, | 140 | 0x00000000, |
141 | 0x00000000, | 141 | 0x00000000, |
@@ -239,10 +239,10 @@ uint32_t nva3_pwr_data[] = { | |||
239 | 0x000004b7, | 239 | 0x000004b7, |
240 | 0x00040003, | 240 | 0x00040003, |
241 | 0x00000000, | 241 | 0x00000000, |
242 | 0x000004df, | 242 | 0x000004d3, |
243 | 0x00010004, | 243 | 0x00010004, |
244 | 0x00000000, | 244 | 0x00000000, |
245 | 0x000004fc, | 245 | 0x000004f0, |
246 | /* 0x03ac: memx_func_tail */ | 246 | /* 0x03ac: memx_func_tail */ |
247 | /* 0x03ac: memx_data_head */ | 247 | /* 0x03ac: memx_data_head */ |
248 | 0x00000000, | 248 | 0x00000000, |
@@ -1198,13 +1198,10 @@ uint32_t nva3_pwr_code[] = { | |||
1198 | 0x0810b601, | 1198 | 0x0810b601, |
1199 | 0x50f960f9, | 1199 | 0x50f960f9, |
1200 | 0xe0fcd0fc, | 1200 | 0xe0fcd0fc, |
1201 | 0xf13f21f4, | 1201 | 0xb63f21f4, |
1202 | 0xfd140003, | ||
1203 | 0x05800506, | ||
1204 | 0xb604bd00, | ||
1205 | 0x1bf40242, | 1202 | 0x1bf40242, |
1206 | /* 0x04df: memx_func_wait */ | 1203 | /* 0x04d3: memx_func_wait */ |
1207 | 0xf000f8dd, | 1204 | 0xf000f8e9, |
1208 | 0x84b62c87, | 1205 | 0x84b62c87, |
1209 | 0x0088cf06, | 1206 | 0x0088cf06, |
1210 | 0x98001e98, | 1207 | 0x98001e98, |
@@ -1212,14 +1209,14 @@ uint32_t nva3_pwr_code[] = { | |||
1212 | 0x031b9802, | 1209 | 0x031b9802, |
1213 | 0xf41010b6, | 1210 | 0xf41010b6, |
1214 | 0x00f89c21, | 1211 | 0x00f89c21, |
1215 | /* 0x04fc: memx_func_delay */ | 1212 | /* 0x04f0: memx_func_delay */ |
1216 | 0xb6001e98, | 1213 | 0xb6001e98, |
1217 | 0x21f40410, | 1214 | 0x21f40410, |
1218 | /* 0x0507: memx_exec */ | 1215 | /* 0x04fb: memx_exec */ |
1219 | 0xf900f87f, | 1216 | 0xf900f87f, |
1220 | 0xb9d0f9e0, | 1217 | 0xb9d0f9e0, |
1221 | 0xb2b902c1, | 1218 | 0xb2b902c1, |
1222 | /* 0x0511: memx_exec_next */ | 1219 | /* 0x0505: memx_exec_next */ |
1223 | 0x00139802, | 1220 | 0x00139802, |
1224 | 0x950410b6, | 1221 | 0x950410b6, |
1225 | 0x30f01034, | 1222 | 0x30f01034, |
@@ -1228,112 +1225,112 @@ uint32_t nva3_pwr_code[] = { | |||
1228 | 0xec1ef406, | 1225 | 0xec1ef406, |
1229 | 0xe0fcd0fc, | 1226 | 0xe0fcd0fc, |
1230 | 0x02b921f5, | 1227 | 0x02b921f5, |
1231 | /* 0x0532: memx_info */ | 1228 | /* 0x0526: memx_info */ |
1232 | 0xc7f100f8, | 1229 | 0xc7f100f8, |
1233 | 0xb7f103ac, | 1230 | 0xb7f103ac, |
1234 | 0x21f50800, | 1231 | 0x21f50800, |
1235 | 0x00f802b9, | 1232 | 0x00f802b9, |
1236 | /* 0x0540: memx_recv */ | 1233 | /* 0x0534: memx_recv */ |
1237 | 0xf401d6b0, | 1234 | 0xf401d6b0, |
1238 | 0xd6b0c40b, | 1235 | 0xd6b0c40b, |
1239 | 0xe90bf400, | 1236 | 0xe90bf400, |
1240 | /* 0x054e: memx_init */ | 1237 | /* 0x0542: memx_init */ |
1241 | 0x00f800f8, | 1238 | 0x00f800f8, |
1242 | /* 0x0550: perf_recv */ | 1239 | /* 0x0544: perf_recv */ |
1243 | /* 0x0552: perf_init */ | 1240 | /* 0x0546: perf_init */ |
1244 | 0x00f800f8, | 1241 | 0x00f800f8, |
1245 | /* 0x0554: i2c_drive_scl */ | 1242 | /* 0x0548: i2c_drive_scl */ |
1246 | 0xf40036b0, | 1243 | 0xf40036b0, |
1247 | 0x07f1110b, | 1244 | 0x07f1110b, |
1248 | 0x04b607e0, | 1245 | 0x04b607e0, |
1249 | 0x0001d006, | 1246 | 0x0001d006, |
1250 | 0x00f804bd, | 1247 | 0x00f804bd, |
1251 | /* 0x0568: i2c_drive_scl_lo */ | 1248 | /* 0x055c: i2c_drive_scl_lo */ |
1252 | 0x07e407f1, | 1249 | 0x07e407f1, |
1253 | 0xd00604b6, | 1250 | 0xd00604b6, |
1254 | 0x04bd0001, | 1251 | 0x04bd0001, |
1255 | /* 0x0576: i2c_drive_sda */ | 1252 | /* 0x056a: i2c_drive_sda */ |
1256 | 0x36b000f8, | 1253 | 0x36b000f8, |
1257 | 0x110bf400, | 1254 | 0x110bf400, |
1258 | 0x07e007f1, | 1255 | 0x07e007f1, |
1259 | 0xd00604b6, | 1256 | 0xd00604b6, |
1260 | 0x04bd0002, | 1257 | 0x04bd0002, |
1261 | /* 0x058a: i2c_drive_sda_lo */ | 1258 | /* 0x057e: i2c_drive_sda_lo */ |
1262 | 0x07f100f8, | 1259 | 0x07f100f8, |
1263 | 0x04b607e4, | 1260 | 0x04b607e4, |
1264 | 0x0002d006, | 1261 | 0x0002d006, |
1265 | 0x00f804bd, | 1262 | 0x00f804bd, |
1266 | /* 0x0598: i2c_sense_scl */ | 1263 | /* 0x058c: i2c_sense_scl */ |
1267 | 0xf10132f4, | 1264 | 0xf10132f4, |
1268 | 0xb607c437, | 1265 | 0xb607c437, |
1269 | 0x33cf0634, | 1266 | 0x33cf0634, |
1270 | 0x0431fd00, | 1267 | 0x0431fd00, |
1271 | 0xf4060bf4, | 1268 | 0xf4060bf4, |
1272 | /* 0x05ae: i2c_sense_scl_done */ | 1269 | /* 0x05a2: i2c_sense_scl_done */ |
1273 | 0x00f80131, | 1270 | 0x00f80131, |
1274 | /* 0x05b0: i2c_sense_sda */ | 1271 | /* 0x05a4: i2c_sense_sda */ |
1275 | 0xf10132f4, | 1272 | 0xf10132f4, |
1276 | 0xb607c437, | 1273 | 0xb607c437, |
1277 | 0x33cf0634, | 1274 | 0x33cf0634, |
1278 | 0x0432fd00, | 1275 | 0x0432fd00, |
1279 | 0xf4060bf4, | 1276 | 0xf4060bf4, |
1280 | /* 0x05c6: i2c_sense_sda_done */ | 1277 | /* 0x05ba: i2c_sense_sda_done */ |
1281 | 0x00f80131, | 1278 | 0x00f80131, |
1282 | /* 0x05c8: i2c_raise_scl */ | 1279 | /* 0x05bc: i2c_raise_scl */ |
1283 | 0x47f140f9, | 1280 | 0x47f140f9, |
1284 | 0x37f00898, | 1281 | 0x37f00898, |
1285 | 0x5421f501, | 1282 | 0x4821f501, |
1286 | /* 0x05d5: i2c_raise_scl_wait */ | 1283 | /* 0x05c9: i2c_raise_scl_wait */ |
1287 | 0xe8e7f105, | 1284 | 0xe8e7f105, |
1288 | 0x7f21f403, | 1285 | 0x7f21f403, |
1289 | 0x059821f5, | 1286 | 0x058c21f5, |
1290 | 0xb60901f4, | 1287 | 0xb60901f4, |
1291 | 0x1bf40142, | 1288 | 0x1bf40142, |
1292 | /* 0x05e9: i2c_raise_scl_done */ | 1289 | /* 0x05dd: i2c_raise_scl_done */ |
1293 | 0xf840fcef, | 1290 | 0xf840fcef, |
1294 | /* 0x05ed: i2c_start */ | 1291 | /* 0x05e1: i2c_start */ |
1295 | 0x9821f500, | 1292 | 0x8c21f500, |
1296 | 0x0d11f405, | 1293 | 0x0d11f405, |
1297 | 0x05b021f5, | 1294 | 0x05a421f5, |
1298 | 0xf40611f4, | 1295 | 0xf40611f4, |
1299 | /* 0x05fe: i2c_start_rep */ | 1296 | /* 0x05f2: i2c_start_rep */ |
1300 | 0x37f0300e, | 1297 | 0x37f0300e, |
1301 | 0x5421f500, | 1298 | 0x4821f500, |
1302 | 0x0137f005, | 1299 | 0x0137f005, |
1303 | 0x057621f5, | 1300 | 0x056a21f5, |
1304 | 0xb60076bb, | 1301 | 0xb60076bb, |
1305 | 0x50f90465, | 1302 | 0x50f90465, |
1306 | 0xbb046594, | 1303 | 0xbb046594, |
1307 | 0x50bd0256, | 1304 | 0x50bd0256, |
1308 | 0xfc0475fd, | 1305 | 0xfc0475fd, |
1309 | 0xc821f550, | 1306 | 0xbc21f550, |
1310 | 0x0464b605, | 1307 | 0x0464b605, |
1311 | /* 0x062b: i2c_start_send */ | 1308 | /* 0x061f: i2c_start_send */ |
1312 | 0xf01f11f4, | 1309 | 0xf01f11f4, |
1313 | 0x21f50037, | 1310 | 0x21f50037, |
1314 | 0xe7f10576, | 1311 | 0xe7f1056a, |
1315 | 0x21f41388, | 1312 | 0x21f41388, |
1316 | 0x0037f07f, | 1313 | 0x0037f07f, |
1317 | 0x055421f5, | 1314 | 0x054821f5, |
1318 | 0x1388e7f1, | 1315 | 0x1388e7f1, |
1319 | /* 0x0647: i2c_start_out */ | 1316 | /* 0x063b: i2c_start_out */ |
1320 | 0xf87f21f4, | 1317 | 0xf87f21f4, |
1321 | /* 0x0649: i2c_stop */ | 1318 | /* 0x063d: i2c_stop */ |
1322 | 0x0037f000, | 1319 | 0x0037f000, |
1323 | 0x055421f5, | 1320 | 0x054821f5, |
1324 | 0xf50037f0, | 1321 | 0xf50037f0, |
1325 | 0xf1057621, | 1322 | 0xf1056a21, |
1326 | 0xf403e8e7, | 1323 | 0xf403e8e7, |
1327 | 0x37f07f21, | 1324 | 0x37f07f21, |
1328 | 0x5421f501, | 1325 | 0x4821f501, |
1329 | 0x88e7f105, | 1326 | 0x88e7f105, |
1330 | 0x7f21f413, | 1327 | 0x7f21f413, |
1331 | 0xf50137f0, | 1328 | 0xf50137f0, |
1332 | 0xf1057621, | 1329 | 0xf1056a21, |
1333 | 0xf41388e7, | 1330 | 0xf41388e7, |
1334 | 0x00f87f21, | 1331 | 0x00f87f21, |
1335 | /* 0x067c: i2c_bitw */ | 1332 | /* 0x0670: i2c_bitw */ |
1336 | 0x057621f5, | 1333 | 0x056a21f5, |
1337 | 0x03e8e7f1, | 1334 | 0x03e8e7f1, |
1338 | 0xbb7f21f4, | 1335 | 0xbb7f21f4, |
1339 | 0x65b60076, | 1336 | 0x65b60076, |
@@ -1341,18 +1338,18 @@ uint32_t nva3_pwr_code[] = { | |||
1341 | 0x56bb0465, | 1338 | 0x56bb0465, |
1342 | 0xfd50bd02, | 1339 | 0xfd50bd02, |
1343 | 0x50fc0475, | 1340 | 0x50fc0475, |
1344 | 0x05c821f5, | 1341 | 0x05bc21f5, |
1345 | 0xf40464b6, | 1342 | 0xf40464b6, |
1346 | 0xe7f11811, | 1343 | 0xe7f11811, |
1347 | 0x21f41388, | 1344 | 0x21f41388, |
1348 | 0x0037f07f, | 1345 | 0x0037f07f, |
1349 | 0x055421f5, | 1346 | 0x054821f5, |
1350 | 0x1388e7f1, | 1347 | 0x1388e7f1, |
1351 | /* 0x06bb: i2c_bitw_out */ | 1348 | /* 0x06af: i2c_bitw_out */ |
1352 | 0xf87f21f4, | 1349 | 0xf87f21f4, |
1353 | /* 0x06bd: i2c_bitr */ | 1350 | /* 0x06b1: i2c_bitr */ |
1354 | 0x0137f000, | 1351 | 0x0137f000, |
1355 | 0x057621f5, | 1352 | 0x056a21f5, |
1356 | 0x03e8e7f1, | 1353 | 0x03e8e7f1, |
1357 | 0xbb7f21f4, | 1354 | 0xbb7f21f4, |
1358 | 0x65b60076, | 1355 | 0x65b60076, |
@@ -1360,19 +1357,19 @@ uint32_t nva3_pwr_code[] = { | |||
1360 | 0x56bb0465, | 1357 | 0x56bb0465, |
1361 | 0xfd50bd02, | 1358 | 0xfd50bd02, |
1362 | 0x50fc0475, | 1359 | 0x50fc0475, |
1363 | 0x05c821f5, | 1360 | 0x05bc21f5, |
1364 | 0xf40464b6, | 1361 | 0xf40464b6, |
1365 | 0x21f51b11, | 1362 | 0x21f51b11, |
1366 | 0x37f005b0, | 1363 | 0x37f005a4, |
1367 | 0x5421f500, | 1364 | 0x4821f500, |
1368 | 0x88e7f105, | 1365 | 0x88e7f105, |
1369 | 0x7f21f413, | 1366 | 0x7f21f413, |
1370 | 0xf4013cf0, | 1367 | 0xf4013cf0, |
1371 | /* 0x0702: i2c_bitr_done */ | 1368 | /* 0x06f6: i2c_bitr_done */ |
1372 | 0x00f80131, | 1369 | 0x00f80131, |
1373 | /* 0x0704: i2c_get_byte */ | 1370 | /* 0x06f8: i2c_get_byte */ |
1374 | 0xf00057f0, | 1371 | 0xf00057f0, |
1375 | /* 0x070a: i2c_get_byte_next */ | 1372 | /* 0x06fe: i2c_get_byte_next */ |
1376 | 0x54b60847, | 1373 | 0x54b60847, |
1377 | 0x0076bb01, | 1374 | 0x0076bb01, |
1378 | 0xf90465b6, | 1375 | 0xf90465b6, |
@@ -1380,7 +1377,7 @@ uint32_t nva3_pwr_code[] = { | |||
1380 | 0xbd0256bb, | 1377 | 0xbd0256bb, |
1381 | 0x0475fd50, | 1378 | 0x0475fd50, |
1382 | 0x21f550fc, | 1379 | 0x21f550fc, |
1383 | 0x64b606bd, | 1380 | 0x64b606b1, |
1384 | 0x2b11f404, | 1381 | 0x2b11f404, |
1385 | 0xb60553fd, | 1382 | 0xb60553fd, |
1386 | 0x1bf40142, | 1383 | 0x1bf40142, |
@@ -1390,12 +1387,12 @@ uint32_t nva3_pwr_code[] = { | |||
1390 | 0xbb046594, | 1387 | 0xbb046594, |
1391 | 0x50bd0256, | 1388 | 0x50bd0256, |
1392 | 0xfc0475fd, | 1389 | 0xfc0475fd, |
1393 | 0x7c21f550, | 1390 | 0x7021f550, |
1394 | 0x0464b606, | 1391 | 0x0464b606, |
1395 | /* 0x0754: i2c_get_byte_done */ | 1392 | /* 0x0748: i2c_get_byte_done */ |
1396 | /* 0x0756: i2c_put_byte */ | 1393 | /* 0x074a: i2c_put_byte */ |
1397 | 0x47f000f8, | 1394 | 0x47f000f8, |
1398 | /* 0x0759: i2c_put_byte_next */ | 1395 | /* 0x074d: i2c_put_byte_next */ |
1399 | 0x0142b608, | 1396 | 0x0142b608, |
1400 | 0xbb3854ff, | 1397 | 0xbb3854ff, |
1401 | 0x65b60076, | 1398 | 0x65b60076, |
@@ -1403,7 +1400,7 @@ uint32_t nva3_pwr_code[] = { | |||
1403 | 0x56bb0465, | 1400 | 0x56bb0465, |
1404 | 0xfd50bd02, | 1401 | 0xfd50bd02, |
1405 | 0x50fc0475, | 1402 | 0x50fc0475, |
1406 | 0x067c21f5, | 1403 | 0x067021f5, |
1407 | 0xf40464b6, | 1404 | 0xf40464b6, |
1408 | 0x46b03411, | 1405 | 0x46b03411, |
1409 | 0xd81bf400, | 1406 | 0xd81bf400, |
@@ -1412,21 +1409,21 @@ uint32_t nva3_pwr_code[] = { | |||
1412 | 0xbb046594, | 1409 | 0xbb046594, |
1413 | 0x50bd0256, | 1410 | 0x50bd0256, |
1414 | 0xfc0475fd, | 1411 | 0xfc0475fd, |
1415 | 0xbd21f550, | 1412 | 0xb121f550, |
1416 | 0x0464b606, | 1413 | 0x0464b606, |
1417 | 0xbb0f11f4, | 1414 | 0xbb0f11f4, |
1418 | 0x36b00076, | 1415 | 0x36b00076, |
1419 | 0x061bf401, | 1416 | 0x061bf401, |
1420 | /* 0x07af: i2c_put_byte_done */ | 1417 | /* 0x07a3: i2c_put_byte_done */ |
1421 | 0xf80132f4, | 1418 | 0xf80132f4, |
1422 | /* 0x07b1: i2c_addr */ | 1419 | /* 0x07a5: i2c_addr */ |
1423 | 0x0076bb00, | 1420 | 0x0076bb00, |
1424 | 0xf90465b6, | 1421 | 0xf90465b6, |
1425 | 0x04659450, | 1422 | 0x04659450, |
1426 | 0xbd0256bb, | 1423 | 0xbd0256bb, |
1427 | 0x0475fd50, | 1424 | 0x0475fd50, |
1428 | 0x21f550fc, | 1425 | 0x21f550fc, |
1429 | 0x64b605ed, | 1426 | 0x64b605e1, |
1430 | 0x2911f404, | 1427 | 0x2911f404, |
1431 | 0x012ec3e7, | 1428 | 0x012ec3e7, |
1432 | 0xfd0134b6, | 1429 | 0xfd0134b6, |
@@ -1436,24 +1433,24 @@ uint32_t nva3_pwr_code[] = { | |||
1436 | 0x0256bb04, | 1433 | 0x0256bb04, |
1437 | 0x75fd50bd, | 1434 | 0x75fd50bd, |
1438 | 0xf550fc04, | 1435 | 0xf550fc04, |
1439 | 0xb6075621, | 1436 | 0xb6074a21, |
1440 | /* 0x07f6: i2c_addr_done */ | 1437 | /* 0x07ea: i2c_addr_done */ |
1441 | 0x00f80464, | 1438 | 0x00f80464, |
1442 | /* 0x07f8: i2c_acquire_addr */ | 1439 | /* 0x07ec: i2c_acquire_addr */ |
1443 | 0xb6f8cec7, | 1440 | 0xb6f8cec7, |
1444 | 0xe0b702e4, | 1441 | 0xe0b702e4, |
1445 | 0xee980bfc, | 1442 | 0xee980bfc, |
1446 | /* 0x0807: i2c_acquire */ | 1443 | /* 0x07fb: i2c_acquire */ |
1447 | 0xf500f800, | 1444 | 0xf500f800, |
1448 | 0xf407f821, | 1445 | 0xf407ec21, |
1449 | 0xd9f00421, | 1446 | 0xd9f00421, |
1450 | 0x3f21f403, | 1447 | 0x3f21f403, |
1451 | /* 0x0816: i2c_release */ | 1448 | /* 0x080a: i2c_release */ |
1452 | 0x21f500f8, | 1449 | 0x21f500f8, |
1453 | 0x21f407f8, | 1450 | 0x21f407ec, |
1454 | 0x03daf004, | 1451 | 0x03daf004, |
1455 | 0xf83f21f4, | 1452 | 0xf83f21f4, |
1456 | /* 0x0825: i2c_recv */ | 1453 | /* 0x0819: i2c_recv */ |
1457 | 0x0132f400, | 1454 | 0x0132f400, |
1458 | 0xb6f8c1c7, | 1455 | 0xb6f8c1c7, |
1459 | 0x16b00214, | 1456 | 0x16b00214, |
@@ -1472,7 +1469,7 @@ uint32_t nva3_pwr_code[] = { | |||
1472 | 0x56bb0465, | 1469 | 0x56bb0465, |
1473 | 0xfd50bd02, | 1470 | 0xfd50bd02, |
1474 | 0x50fc0475, | 1471 | 0x50fc0475, |
1475 | 0x080721f5, | 1472 | 0x07fb21f5, |
1476 | 0xfc0464b6, | 1473 | 0xfc0464b6, |
1477 | 0x00d6b0d0, | 1474 | 0x00d6b0d0, |
1478 | 0x00b31bf5, | 1475 | 0x00b31bf5, |
@@ -1482,7 +1479,7 @@ uint32_t nva3_pwr_code[] = { | |||
1482 | 0x56bb0465, | 1479 | 0x56bb0465, |
1483 | 0xfd50bd02, | 1480 | 0xfd50bd02, |
1484 | 0x50fc0475, | 1481 | 0x50fc0475, |
1485 | 0x07b121f5, | 1482 | 0x07a521f5, |
1486 | 0xf50464b6, | 1483 | 0xf50464b6, |
1487 | 0xc700d011, | 1484 | 0xc700d011, |
1488 | 0x76bbe0c5, | 1485 | 0x76bbe0c5, |
@@ -1491,7 +1488,7 @@ uint32_t nva3_pwr_code[] = { | |||
1491 | 0x0256bb04, | 1488 | 0x0256bb04, |
1492 | 0x75fd50bd, | 1489 | 0x75fd50bd, |
1493 | 0xf550fc04, | 1490 | 0xf550fc04, |
1494 | 0xb6075621, | 1491 | 0xb6074a21, |
1495 | 0x11f50464, | 1492 | 0x11f50464, |
1496 | 0x57f000ad, | 1493 | 0x57f000ad, |
1497 | 0x0076bb01, | 1494 | 0x0076bb01, |
@@ -1500,7 +1497,7 @@ uint32_t nva3_pwr_code[] = { | |||
1500 | 0xbd0256bb, | 1497 | 0xbd0256bb, |
1501 | 0x0475fd50, | 1498 | 0x0475fd50, |
1502 | 0x21f550fc, | 1499 | 0x21f550fc, |
1503 | 0x64b607b1, | 1500 | 0x64b607a5, |
1504 | 0x8a11f504, | 1501 | 0x8a11f504, |
1505 | 0x0076bb00, | 1502 | 0x0076bb00, |
1506 | 0xf90465b6, | 1503 | 0xf90465b6, |
@@ -1508,7 +1505,7 @@ uint32_t nva3_pwr_code[] = { | |||
1508 | 0xbd0256bb, | 1505 | 0xbd0256bb, |
1509 | 0x0475fd50, | 1506 | 0x0475fd50, |
1510 | 0x21f550fc, | 1507 | 0x21f550fc, |
1511 | 0x64b60704, | 1508 | 0x64b606f8, |
1512 | 0x6a11f404, | 1509 | 0x6a11f404, |
1513 | 0xbbe05bcb, | 1510 | 0xbbe05bcb, |
1514 | 0x65b60076, | 1511 | 0x65b60076, |
@@ -1516,38 +1513,38 @@ uint32_t nva3_pwr_code[] = { | |||
1516 | 0x56bb0465, | 1513 | 0x56bb0465, |
1517 | 0xfd50bd02, | 1514 | 0xfd50bd02, |
1518 | 0x50fc0475, | 1515 | 0x50fc0475, |
1519 | 0x064921f5, | 1516 | 0x063d21f5, |
1520 | 0xb90464b6, | 1517 | 0xb90464b6, |
1521 | 0x74bd025b, | 1518 | 0x74bd025b, |
1522 | /* 0x092b: i2c_recv_not_rd08 */ | 1519 | /* 0x091f: i2c_recv_not_rd08 */ |
1523 | 0xb0430ef4, | 1520 | 0xb0430ef4, |
1524 | 0x1bf401d6, | 1521 | 0x1bf401d6, |
1525 | 0x0057f03d, | 1522 | 0x0057f03d, |
1526 | 0x07b121f5, | 1523 | 0x07a521f5, |
1527 | 0xc73311f4, | 1524 | 0xc73311f4, |
1528 | 0x21f5e0c5, | 1525 | 0x21f5e0c5, |
1529 | 0x11f40756, | 1526 | 0x11f4074a, |
1530 | 0x0057f029, | 1527 | 0x0057f029, |
1531 | 0x07b121f5, | 1528 | 0x07a521f5, |
1532 | 0xc71f11f4, | 1529 | 0xc71f11f4, |
1533 | 0x21f5e0b5, | 1530 | 0x21f5e0b5, |
1534 | 0x11f40756, | 1531 | 0x11f4074a, |
1535 | 0x4921f515, | 1532 | 0x3d21f515, |
1536 | 0xc774bd06, | 1533 | 0xc774bd06, |
1537 | 0x1bf408c5, | 1534 | 0x1bf408c5, |
1538 | 0x0232f409, | 1535 | 0x0232f409, |
1539 | /* 0x096b: i2c_recv_not_wr08 */ | 1536 | /* 0x095f: i2c_recv_not_wr08 */ |
1540 | /* 0x096b: i2c_recv_done */ | 1537 | /* 0x095f: i2c_recv_done */ |
1541 | 0xc7030ef4, | 1538 | 0xc7030ef4, |
1542 | 0x21f5f8ce, | 1539 | 0x21f5f8ce, |
1543 | 0xe0fc0816, | 1540 | 0xe0fc080a, |
1544 | 0x12f4d0fc, | 1541 | 0x12f4d0fc, |
1545 | 0x027cb90a, | 1542 | 0x027cb90a, |
1546 | 0x02b921f5, | 1543 | 0x02b921f5, |
1547 | /* 0x0980: i2c_recv_exit */ | 1544 | /* 0x0974: i2c_recv_exit */ |
1548 | /* 0x0982: i2c_init */ | 1545 | /* 0x0976: i2c_init */ |
1549 | 0x00f800f8, | 1546 | 0x00f800f8, |
1550 | /* 0x0984: test_recv */ | 1547 | /* 0x0978: test_recv */ |
1551 | 0x05d817f1, | 1548 | 0x05d817f1, |
1552 | 0xcf0614b6, | 1549 | 0xcf0614b6, |
1553 | 0x10b60011, | 1550 | 0x10b60011, |
@@ -1557,12 +1554,12 @@ uint32_t nva3_pwr_code[] = { | |||
1557 | 0x00e7f104, | 1554 | 0x00e7f104, |
1558 | 0x4fe3f1d9, | 1555 | 0x4fe3f1d9, |
1559 | 0xf521f513, | 1556 | 0xf521f513, |
1560 | /* 0x09ab: test_init */ | 1557 | /* 0x099f: test_init */ |
1561 | 0xf100f801, | 1558 | 0xf100f801, |
1562 | 0xf50800e7, | 1559 | 0xf50800e7, |
1563 | 0xf801f521, | 1560 | 0xf801f521, |
1564 | /* 0x09b5: idle_recv */ | 1561 | /* 0x09a9: idle_recv */ |
1565 | /* 0x09b7: idle */ | 1562 | /* 0x09ab: idle */ |
1566 | 0xf400f800, | 1563 | 0xf400f800, |
1567 | 0x17f10031, | 1564 | 0x17f10031, |
1568 | 0x14b605d4, | 1565 | 0x14b605d4, |
@@ -1570,20 +1567,23 @@ uint32_t nva3_pwr_code[] = { | |||
1570 | 0xf10110b6, | 1567 | 0xf10110b6, |
1571 | 0xb605d407, | 1568 | 0xb605d407, |
1572 | 0x01d00604, | 1569 | 0x01d00604, |
1573 | /* 0x09d3: idle_loop */ | 1570 | /* 0x09c7: idle_loop */ |
1574 | 0xf004bd00, | 1571 | 0xf004bd00, |
1575 | 0x32f45817, | 1572 | 0x32f45817, |
1576 | /* 0x09d9: idle_proc */ | 1573 | /* 0x09cd: idle_proc */ |
1577 | /* 0x09d9: idle_proc_exec */ | 1574 | /* 0x09cd: idle_proc_exec */ |
1578 | 0xb910f902, | 1575 | 0xb910f902, |
1579 | 0x21f5021e, | 1576 | 0x21f5021e, |
1580 | 0x10fc02c2, | 1577 | 0x10fc02c2, |
1581 | 0xf40911f4, | 1578 | 0xf40911f4, |
1582 | 0x0ef40231, | 1579 | 0x0ef40231, |
1583 | /* 0x09ed: idle_proc_next */ | 1580 | /* 0x09e1: idle_proc_next */ |
1584 | 0x5810b6ef, | 1581 | 0x5810b6ef, |
1585 | 0xf4061fb8, | 1582 | 0xf4061fb8, |
1586 | 0x02f4e61b, | 1583 | 0x02f4e61b, |
1587 | 0x0028f4dd, | 1584 | 0x0028f4dd, |
1588 | 0x00bb0ef4, | 1585 | 0x00bb0ef4, |
1586 | 0x00000000, | ||
1587 | 0x00000000, | ||
1588 | 0x00000000, | ||
1589 | }; | 1589 | }; |
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvc0.fuc.h b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvc0.fuc.h index 7ac87405d01b..0773ff0e3dc3 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvc0.fuc.h +++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvc0.fuc.h | |||
@@ -46,8 +46,8 @@ uint32_t nvc0_pwr_data[] = { | |||
46 | 0x00000000, | 46 | 0x00000000, |
47 | 0x00000000, | 47 | 0x00000000, |
48 | 0x584d454d, | 48 | 0x584d454d, |
49 | 0x0000054e, | 49 | 0x00000542, |
50 | 0x00000540, | 50 | 0x00000534, |
51 | 0x00000000, | 51 | 0x00000000, |
52 | 0x00000000, | 52 | 0x00000000, |
53 | 0x00000000, | 53 | 0x00000000, |
@@ -68,8 +68,8 @@ uint32_t nvc0_pwr_data[] = { | |||
68 | 0x00000000, | 68 | 0x00000000, |
69 | 0x00000000, | 69 | 0x00000000, |
70 | 0x46524550, | 70 | 0x46524550, |
71 | 0x00000552, | 71 | 0x00000546, |
72 | 0x00000550, | 72 | 0x00000544, |
73 | 0x00000000, | 73 | 0x00000000, |
74 | 0x00000000, | 74 | 0x00000000, |
75 | 0x00000000, | 75 | 0x00000000, |
@@ -90,8 +90,8 @@ uint32_t nvc0_pwr_data[] = { | |||
90 | 0x00000000, | 90 | 0x00000000, |
91 | 0x00000000, | 91 | 0x00000000, |
92 | 0x5f433249, | 92 | 0x5f433249, |
93 | 0x00000982, | 93 | 0x00000976, |
94 | 0x00000825, | 94 | 0x00000819, |
95 | 0x00000000, | 95 | 0x00000000, |
96 | 0x00000000, | 96 | 0x00000000, |
97 | 0x00000000, | 97 | 0x00000000, |
@@ -112,8 +112,8 @@ uint32_t nvc0_pwr_data[] = { | |||
112 | 0x00000000, | 112 | 0x00000000, |
113 | 0x00000000, | 113 | 0x00000000, |
114 | 0x54534554, | 114 | 0x54534554, |
115 | 0x000009ab, | 115 | 0x0000099f, |
116 | 0x00000984, | 116 | 0x00000978, |
117 | 0x00000000, | 117 | 0x00000000, |
118 | 0x00000000, | 118 | 0x00000000, |
119 | 0x00000000, | 119 | 0x00000000, |
@@ -134,8 +134,8 @@ uint32_t nvc0_pwr_data[] = { | |||
134 | 0x00000000, | 134 | 0x00000000, |
135 | 0x00000000, | 135 | 0x00000000, |
136 | 0x454c4449, | 136 | 0x454c4449, |
137 | 0x000009b7, | 137 | 0x000009ab, |
138 | 0x000009b5, | 138 | 0x000009a9, |
139 | 0x00000000, | 139 | 0x00000000, |
140 | 0x00000000, | 140 | 0x00000000, |
141 | 0x00000000, | 141 | 0x00000000, |
@@ -239,10 +239,10 @@ uint32_t nvc0_pwr_data[] = { | |||
239 | 0x000004b7, | 239 | 0x000004b7, |
240 | 0x00040003, | 240 | 0x00040003, |
241 | 0x00000000, | 241 | 0x00000000, |
242 | 0x000004df, | 242 | 0x000004d3, |
243 | 0x00010004, | 243 | 0x00010004, |
244 | 0x00000000, | 244 | 0x00000000, |
245 | 0x000004fc, | 245 | 0x000004f0, |
246 | /* 0x03ac: memx_func_tail */ | 246 | /* 0x03ac: memx_func_tail */ |
247 | /* 0x03ac: memx_data_head */ | 247 | /* 0x03ac: memx_data_head */ |
248 | 0x00000000, | 248 | 0x00000000, |
@@ -1198,13 +1198,10 @@ uint32_t nvc0_pwr_code[] = { | |||
1198 | 0x0810b601, | 1198 | 0x0810b601, |
1199 | 0x50f960f9, | 1199 | 0x50f960f9, |
1200 | 0xe0fcd0fc, | 1200 | 0xe0fcd0fc, |
1201 | 0xf13f21f4, | 1201 | 0xb63f21f4, |
1202 | 0xfd140003, | ||
1203 | 0x05800506, | ||
1204 | 0xb604bd00, | ||
1205 | 0x1bf40242, | 1202 | 0x1bf40242, |
1206 | /* 0x04df: memx_func_wait */ | 1203 | /* 0x04d3: memx_func_wait */ |
1207 | 0xf000f8dd, | 1204 | 0xf000f8e9, |
1208 | 0x84b62c87, | 1205 | 0x84b62c87, |
1209 | 0x0088cf06, | 1206 | 0x0088cf06, |
1210 | 0x98001e98, | 1207 | 0x98001e98, |
@@ -1212,14 +1209,14 @@ uint32_t nvc0_pwr_code[] = { | |||
1212 | 0x031b9802, | 1209 | 0x031b9802, |
1213 | 0xf41010b6, | 1210 | 0xf41010b6, |
1214 | 0x00f89c21, | 1211 | 0x00f89c21, |
1215 | /* 0x04fc: memx_func_delay */ | 1212 | /* 0x04f0: memx_func_delay */ |
1216 | 0xb6001e98, | 1213 | 0xb6001e98, |
1217 | 0x21f40410, | 1214 | 0x21f40410, |
1218 | /* 0x0507: memx_exec */ | 1215 | /* 0x04fb: memx_exec */ |
1219 | 0xf900f87f, | 1216 | 0xf900f87f, |
1220 | 0xb9d0f9e0, | 1217 | 0xb9d0f9e0, |
1221 | 0xb2b902c1, | 1218 | 0xb2b902c1, |
1222 | /* 0x0511: memx_exec_next */ | 1219 | /* 0x0505: memx_exec_next */ |
1223 | 0x00139802, | 1220 | 0x00139802, |
1224 | 0x950410b6, | 1221 | 0x950410b6, |
1225 | 0x30f01034, | 1222 | 0x30f01034, |
@@ -1228,112 +1225,112 @@ uint32_t nvc0_pwr_code[] = { | |||
1228 | 0xec1ef406, | 1225 | 0xec1ef406, |
1229 | 0xe0fcd0fc, | 1226 | 0xe0fcd0fc, |
1230 | 0x02b921f5, | 1227 | 0x02b921f5, |
1231 | /* 0x0532: memx_info */ | 1228 | /* 0x0526: memx_info */ |
1232 | 0xc7f100f8, | 1229 | 0xc7f100f8, |
1233 | 0xb7f103ac, | 1230 | 0xb7f103ac, |
1234 | 0x21f50800, | 1231 | 0x21f50800, |
1235 | 0x00f802b9, | 1232 | 0x00f802b9, |
1236 | /* 0x0540: memx_recv */ | 1233 | /* 0x0534: memx_recv */ |
1237 | 0xf401d6b0, | 1234 | 0xf401d6b0, |
1238 | 0xd6b0c40b, | 1235 | 0xd6b0c40b, |
1239 | 0xe90bf400, | 1236 | 0xe90bf400, |
1240 | /* 0x054e: memx_init */ | 1237 | /* 0x0542: memx_init */ |
1241 | 0x00f800f8, | 1238 | 0x00f800f8, |
1242 | /* 0x0550: perf_recv */ | 1239 | /* 0x0544: perf_recv */ |
1243 | /* 0x0552: perf_init */ | 1240 | /* 0x0546: perf_init */ |
1244 | 0x00f800f8, | 1241 | 0x00f800f8, |
1245 | /* 0x0554: i2c_drive_scl */ | 1242 | /* 0x0548: i2c_drive_scl */ |
1246 | 0xf40036b0, | 1243 | 0xf40036b0, |
1247 | 0x07f1110b, | 1244 | 0x07f1110b, |
1248 | 0x04b607e0, | 1245 | 0x04b607e0, |
1249 | 0x0001d006, | 1246 | 0x0001d006, |
1250 | 0x00f804bd, | 1247 | 0x00f804bd, |
1251 | /* 0x0568: i2c_drive_scl_lo */ | 1248 | /* 0x055c: i2c_drive_scl_lo */ |
1252 | 0x07e407f1, | 1249 | 0x07e407f1, |
1253 | 0xd00604b6, | 1250 | 0xd00604b6, |
1254 | 0x04bd0001, | 1251 | 0x04bd0001, |
1255 | /* 0x0576: i2c_drive_sda */ | 1252 | /* 0x056a: i2c_drive_sda */ |
1256 | 0x36b000f8, | 1253 | 0x36b000f8, |
1257 | 0x110bf400, | 1254 | 0x110bf400, |
1258 | 0x07e007f1, | 1255 | 0x07e007f1, |
1259 | 0xd00604b6, | 1256 | 0xd00604b6, |
1260 | 0x04bd0002, | 1257 | 0x04bd0002, |
1261 | /* 0x058a: i2c_drive_sda_lo */ | 1258 | /* 0x057e: i2c_drive_sda_lo */ |
1262 | 0x07f100f8, | 1259 | 0x07f100f8, |
1263 | 0x04b607e4, | 1260 | 0x04b607e4, |
1264 | 0x0002d006, | 1261 | 0x0002d006, |
1265 | 0x00f804bd, | 1262 | 0x00f804bd, |
1266 | /* 0x0598: i2c_sense_scl */ | 1263 | /* 0x058c: i2c_sense_scl */ |
1267 | 0xf10132f4, | 1264 | 0xf10132f4, |
1268 | 0xb607c437, | 1265 | 0xb607c437, |
1269 | 0x33cf0634, | 1266 | 0x33cf0634, |
1270 | 0x0431fd00, | 1267 | 0x0431fd00, |
1271 | 0xf4060bf4, | 1268 | 0xf4060bf4, |
1272 | /* 0x05ae: i2c_sense_scl_done */ | 1269 | /* 0x05a2: i2c_sense_scl_done */ |
1273 | 0x00f80131, | 1270 | 0x00f80131, |
1274 | /* 0x05b0: i2c_sense_sda */ | 1271 | /* 0x05a4: i2c_sense_sda */ |
1275 | 0xf10132f4, | 1272 | 0xf10132f4, |
1276 | 0xb607c437, | 1273 | 0xb607c437, |
1277 | 0x33cf0634, | 1274 | 0x33cf0634, |
1278 | 0x0432fd00, | 1275 | 0x0432fd00, |
1279 | 0xf4060bf4, | 1276 | 0xf4060bf4, |
1280 | /* 0x05c6: i2c_sense_sda_done */ | 1277 | /* 0x05ba: i2c_sense_sda_done */ |
1281 | 0x00f80131, | 1278 | 0x00f80131, |
1282 | /* 0x05c8: i2c_raise_scl */ | 1279 | /* 0x05bc: i2c_raise_scl */ |
1283 | 0x47f140f9, | 1280 | 0x47f140f9, |
1284 | 0x37f00898, | 1281 | 0x37f00898, |
1285 | 0x5421f501, | 1282 | 0x4821f501, |
1286 | /* 0x05d5: i2c_raise_scl_wait */ | 1283 | /* 0x05c9: i2c_raise_scl_wait */ |
1287 | 0xe8e7f105, | 1284 | 0xe8e7f105, |
1288 | 0x7f21f403, | 1285 | 0x7f21f403, |
1289 | 0x059821f5, | 1286 | 0x058c21f5, |
1290 | 0xb60901f4, | 1287 | 0xb60901f4, |
1291 | 0x1bf40142, | 1288 | 0x1bf40142, |
1292 | /* 0x05e9: i2c_raise_scl_done */ | 1289 | /* 0x05dd: i2c_raise_scl_done */ |
1293 | 0xf840fcef, | 1290 | 0xf840fcef, |
1294 | /* 0x05ed: i2c_start */ | 1291 | /* 0x05e1: i2c_start */ |
1295 | 0x9821f500, | 1292 | 0x8c21f500, |
1296 | 0x0d11f405, | 1293 | 0x0d11f405, |
1297 | 0x05b021f5, | 1294 | 0x05a421f5, |
1298 | 0xf40611f4, | 1295 | 0xf40611f4, |
1299 | /* 0x05fe: i2c_start_rep */ | 1296 | /* 0x05f2: i2c_start_rep */ |
1300 | 0x37f0300e, | 1297 | 0x37f0300e, |
1301 | 0x5421f500, | 1298 | 0x4821f500, |
1302 | 0x0137f005, | 1299 | 0x0137f005, |
1303 | 0x057621f5, | 1300 | 0x056a21f5, |
1304 | 0xb60076bb, | 1301 | 0xb60076bb, |
1305 | 0x50f90465, | 1302 | 0x50f90465, |
1306 | 0xbb046594, | 1303 | 0xbb046594, |
1307 | 0x50bd0256, | 1304 | 0x50bd0256, |
1308 | 0xfc0475fd, | 1305 | 0xfc0475fd, |
1309 | 0xc821f550, | 1306 | 0xbc21f550, |
1310 | 0x0464b605, | 1307 | 0x0464b605, |
1311 | /* 0x062b: i2c_start_send */ | 1308 | /* 0x061f: i2c_start_send */ |
1312 | 0xf01f11f4, | 1309 | 0xf01f11f4, |
1313 | 0x21f50037, | 1310 | 0x21f50037, |
1314 | 0xe7f10576, | 1311 | 0xe7f1056a, |
1315 | 0x21f41388, | 1312 | 0x21f41388, |
1316 | 0x0037f07f, | 1313 | 0x0037f07f, |
1317 | 0x055421f5, | 1314 | 0x054821f5, |
1318 | 0x1388e7f1, | 1315 | 0x1388e7f1, |
1319 | /* 0x0647: i2c_start_out */ | 1316 | /* 0x063b: i2c_start_out */ |
1320 | 0xf87f21f4, | 1317 | 0xf87f21f4, |
1321 | /* 0x0649: i2c_stop */ | 1318 | /* 0x063d: i2c_stop */ |
1322 | 0x0037f000, | 1319 | 0x0037f000, |
1323 | 0x055421f5, | 1320 | 0x054821f5, |
1324 | 0xf50037f0, | 1321 | 0xf50037f0, |
1325 | 0xf1057621, | 1322 | 0xf1056a21, |
1326 | 0xf403e8e7, | 1323 | 0xf403e8e7, |
1327 | 0x37f07f21, | 1324 | 0x37f07f21, |
1328 | 0x5421f501, | 1325 | 0x4821f501, |
1329 | 0x88e7f105, | 1326 | 0x88e7f105, |
1330 | 0x7f21f413, | 1327 | 0x7f21f413, |
1331 | 0xf50137f0, | 1328 | 0xf50137f0, |
1332 | 0xf1057621, | 1329 | 0xf1056a21, |
1333 | 0xf41388e7, | 1330 | 0xf41388e7, |
1334 | 0x00f87f21, | 1331 | 0x00f87f21, |
1335 | /* 0x067c: i2c_bitw */ | 1332 | /* 0x0670: i2c_bitw */ |
1336 | 0x057621f5, | 1333 | 0x056a21f5, |
1337 | 0x03e8e7f1, | 1334 | 0x03e8e7f1, |
1338 | 0xbb7f21f4, | 1335 | 0xbb7f21f4, |
1339 | 0x65b60076, | 1336 | 0x65b60076, |
@@ -1341,18 +1338,18 @@ uint32_t nvc0_pwr_code[] = { | |||
1341 | 0x56bb0465, | 1338 | 0x56bb0465, |
1342 | 0xfd50bd02, | 1339 | 0xfd50bd02, |
1343 | 0x50fc0475, | 1340 | 0x50fc0475, |
1344 | 0x05c821f5, | 1341 | 0x05bc21f5, |
1345 | 0xf40464b6, | 1342 | 0xf40464b6, |
1346 | 0xe7f11811, | 1343 | 0xe7f11811, |
1347 | 0x21f41388, | 1344 | 0x21f41388, |
1348 | 0x0037f07f, | 1345 | 0x0037f07f, |
1349 | 0x055421f5, | 1346 | 0x054821f5, |
1350 | 0x1388e7f1, | 1347 | 0x1388e7f1, |
1351 | /* 0x06bb: i2c_bitw_out */ | 1348 | /* 0x06af: i2c_bitw_out */ |
1352 | 0xf87f21f4, | 1349 | 0xf87f21f4, |
1353 | /* 0x06bd: i2c_bitr */ | 1350 | /* 0x06b1: i2c_bitr */ |
1354 | 0x0137f000, | 1351 | 0x0137f000, |
1355 | 0x057621f5, | 1352 | 0x056a21f5, |
1356 | 0x03e8e7f1, | 1353 | 0x03e8e7f1, |
1357 | 0xbb7f21f4, | 1354 | 0xbb7f21f4, |
1358 | 0x65b60076, | 1355 | 0x65b60076, |
@@ -1360,19 +1357,19 @@ uint32_t nvc0_pwr_code[] = { | |||
1360 | 0x56bb0465, | 1357 | 0x56bb0465, |
1361 | 0xfd50bd02, | 1358 | 0xfd50bd02, |
1362 | 0x50fc0475, | 1359 | 0x50fc0475, |
1363 | 0x05c821f5, | 1360 | 0x05bc21f5, |
1364 | 0xf40464b6, | 1361 | 0xf40464b6, |
1365 | 0x21f51b11, | 1362 | 0x21f51b11, |
1366 | 0x37f005b0, | 1363 | 0x37f005a4, |
1367 | 0x5421f500, | 1364 | 0x4821f500, |
1368 | 0x88e7f105, | 1365 | 0x88e7f105, |
1369 | 0x7f21f413, | 1366 | 0x7f21f413, |
1370 | 0xf4013cf0, | 1367 | 0xf4013cf0, |
1371 | /* 0x0702: i2c_bitr_done */ | 1368 | /* 0x06f6: i2c_bitr_done */ |
1372 | 0x00f80131, | 1369 | 0x00f80131, |
1373 | /* 0x0704: i2c_get_byte */ | 1370 | /* 0x06f8: i2c_get_byte */ |
1374 | 0xf00057f0, | 1371 | 0xf00057f0, |
1375 | /* 0x070a: i2c_get_byte_next */ | 1372 | /* 0x06fe: i2c_get_byte_next */ |
1376 | 0x54b60847, | 1373 | 0x54b60847, |
1377 | 0x0076bb01, | 1374 | 0x0076bb01, |
1378 | 0xf90465b6, | 1375 | 0xf90465b6, |
@@ -1380,7 +1377,7 @@ uint32_t nvc0_pwr_code[] = { | |||
1380 | 0xbd0256bb, | 1377 | 0xbd0256bb, |
1381 | 0x0475fd50, | 1378 | 0x0475fd50, |
1382 | 0x21f550fc, | 1379 | 0x21f550fc, |
1383 | 0x64b606bd, | 1380 | 0x64b606b1, |
1384 | 0x2b11f404, | 1381 | 0x2b11f404, |
1385 | 0xb60553fd, | 1382 | 0xb60553fd, |
1386 | 0x1bf40142, | 1383 | 0x1bf40142, |
@@ -1390,12 +1387,12 @@ uint32_t nvc0_pwr_code[] = { | |||
1390 | 0xbb046594, | 1387 | 0xbb046594, |
1391 | 0x50bd0256, | 1388 | 0x50bd0256, |
1392 | 0xfc0475fd, | 1389 | 0xfc0475fd, |
1393 | 0x7c21f550, | 1390 | 0x7021f550, |
1394 | 0x0464b606, | 1391 | 0x0464b606, |
1395 | /* 0x0754: i2c_get_byte_done */ | 1392 | /* 0x0748: i2c_get_byte_done */ |
1396 | /* 0x0756: i2c_put_byte */ | 1393 | /* 0x074a: i2c_put_byte */ |
1397 | 0x47f000f8, | 1394 | 0x47f000f8, |
1398 | /* 0x0759: i2c_put_byte_next */ | 1395 | /* 0x074d: i2c_put_byte_next */ |
1399 | 0x0142b608, | 1396 | 0x0142b608, |
1400 | 0xbb3854ff, | 1397 | 0xbb3854ff, |
1401 | 0x65b60076, | 1398 | 0x65b60076, |
@@ -1403,7 +1400,7 @@ uint32_t nvc0_pwr_code[] = { | |||
1403 | 0x56bb0465, | 1400 | 0x56bb0465, |
1404 | 0xfd50bd02, | 1401 | 0xfd50bd02, |
1405 | 0x50fc0475, | 1402 | 0x50fc0475, |
1406 | 0x067c21f5, | 1403 | 0x067021f5, |
1407 | 0xf40464b6, | 1404 | 0xf40464b6, |
1408 | 0x46b03411, | 1405 | 0x46b03411, |
1409 | 0xd81bf400, | 1406 | 0xd81bf400, |
@@ -1412,21 +1409,21 @@ uint32_t nvc0_pwr_code[] = { | |||
1412 | 0xbb046594, | 1409 | 0xbb046594, |
1413 | 0x50bd0256, | 1410 | 0x50bd0256, |
1414 | 0xfc0475fd, | 1411 | 0xfc0475fd, |
1415 | 0xbd21f550, | 1412 | 0xb121f550, |
1416 | 0x0464b606, | 1413 | 0x0464b606, |
1417 | 0xbb0f11f4, | 1414 | 0xbb0f11f4, |
1418 | 0x36b00076, | 1415 | 0x36b00076, |
1419 | 0x061bf401, | 1416 | 0x061bf401, |
1420 | /* 0x07af: i2c_put_byte_done */ | 1417 | /* 0x07a3: i2c_put_byte_done */ |
1421 | 0xf80132f4, | 1418 | 0xf80132f4, |
1422 | /* 0x07b1: i2c_addr */ | 1419 | /* 0x07a5: i2c_addr */ |
1423 | 0x0076bb00, | 1420 | 0x0076bb00, |
1424 | 0xf90465b6, | 1421 | 0xf90465b6, |
1425 | 0x04659450, | 1422 | 0x04659450, |
1426 | 0xbd0256bb, | 1423 | 0xbd0256bb, |
1427 | 0x0475fd50, | 1424 | 0x0475fd50, |
1428 | 0x21f550fc, | 1425 | 0x21f550fc, |
1429 | 0x64b605ed, | 1426 | 0x64b605e1, |
1430 | 0x2911f404, | 1427 | 0x2911f404, |
1431 | 0x012ec3e7, | 1428 | 0x012ec3e7, |
1432 | 0xfd0134b6, | 1429 | 0xfd0134b6, |
@@ -1436,24 +1433,24 @@ uint32_t nvc0_pwr_code[] = { | |||
1436 | 0x0256bb04, | 1433 | 0x0256bb04, |
1437 | 0x75fd50bd, | 1434 | 0x75fd50bd, |
1438 | 0xf550fc04, | 1435 | 0xf550fc04, |
1439 | 0xb6075621, | 1436 | 0xb6074a21, |
1440 | /* 0x07f6: i2c_addr_done */ | 1437 | /* 0x07ea: i2c_addr_done */ |
1441 | 0x00f80464, | 1438 | 0x00f80464, |
1442 | /* 0x07f8: i2c_acquire_addr */ | 1439 | /* 0x07ec: i2c_acquire_addr */ |
1443 | 0xb6f8cec7, | 1440 | 0xb6f8cec7, |
1444 | 0xe0b702e4, | 1441 | 0xe0b702e4, |
1445 | 0xee980bfc, | 1442 | 0xee980bfc, |
1446 | /* 0x0807: i2c_acquire */ | 1443 | /* 0x07fb: i2c_acquire */ |
1447 | 0xf500f800, | 1444 | 0xf500f800, |
1448 | 0xf407f821, | 1445 | 0xf407ec21, |
1449 | 0xd9f00421, | 1446 | 0xd9f00421, |
1450 | 0x3f21f403, | 1447 | 0x3f21f403, |
1451 | /* 0x0816: i2c_release */ | 1448 | /* 0x080a: i2c_release */ |
1452 | 0x21f500f8, | 1449 | 0x21f500f8, |
1453 | 0x21f407f8, | 1450 | 0x21f407ec, |
1454 | 0x03daf004, | 1451 | 0x03daf004, |
1455 | 0xf83f21f4, | 1452 | 0xf83f21f4, |
1456 | /* 0x0825: i2c_recv */ | 1453 | /* 0x0819: i2c_recv */ |
1457 | 0x0132f400, | 1454 | 0x0132f400, |
1458 | 0xb6f8c1c7, | 1455 | 0xb6f8c1c7, |
1459 | 0x16b00214, | 1456 | 0x16b00214, |
@@ -1472,7 +1469,7 @@ uint32_t nvc0_pwr_code[] = { | |||
1472 | 0x56bb0465, | 1469 | 0x56bb0465, |
1473 | 0xfd50bd02, | 1470 | 0xfd50bd02, |
1474 | 0x50fc0475, | 1471 | 0x50fc0475, |
1475 | 0x080721f5, | 1472 | 0x07fb21f5, |
1476 | 0xfc0464b6, | 1473 | 0xfc0464b6, |
1477 | 0x00d6b0d0, | 1474 | 0x00d6b0d0, |
1478 | 0x00b31bf5, | 1475 | 0x00b31bf5, |
@@ -1482,7 +1479,7 @@ uint32_t nvc0_pwr_code[] = { | |||
1482 | 0x56bb0465, | 1479 | 0x56bb0465, |
1483 | 0xfd50bd02, | 1480 | 0xfd50bd02, |
1484 | 0x50fc0475, | 1481 | 0x50fc0475, |
1485 | 0x07b121f5, | 1482 | 0x07a521f5, |
1486 | 0xf50464b6, | 1483 | 0xf50464b6, |
1487 | 0xc700d011, | 1484 | 0xc700d011, |
1488 | 0x76bbe0c5, | 1485 | 0x76bbe0c5, |
@@ -1491,7 +1488,7 @@ uint32_t nvc0_pwr_code[] = { | |||
1491 | 0x0256bb04, | 1488 | 0x0256bb04, |
1492 | 0x75fd50bd, | 1489 | 0x75fd50bd, |
1493 | 0xf550fc04, | 1490 | 0xf550fc04, |
1494 | 0xb6075621, | 1491 | 0xb6074a21, |
1495 | 0x11f50464, | 1492 | 0x11f50464, |
1496 | 0x57f000ad, | 1493 | 0x57f000ad, |
1497 | 0x0076bb01, | 1494 | 0x0076bb01, |
@@ -1500,7 +1497,7 @@ uint32_t nvc0_pwr_code[] = { | |||
1500 | 0xbd0256bb, | 1497 | 0xbd0256bb, |
1501 | 0x0475fd50, | 1498 | 0x0475fd50, |
1502 | 0x21f550fc, | 1499 | 0x21f550fc, |
1503 | 0x64b607b1, | 1500 | 0x64b607a5, |
1504 | 0x8a11f504, | 1501 | 0x8a11f504, |
1505 | 0x0076bb00, | 1502 | 0x0076bb00, |
1506 | 0xf90465b6, | 1503 | 0xf90465b6, |
@@ -1508,7 +1505,7 @@ uint32_t nvc0_pwr_code[] = { | |||
1508 | 0xbd0256bb, | 1505 | 0xbd0256bb, |
1509 | 0x0475fd50, | 1506 | 0x0475fd50, |
1510 | 0x21f550fc, | 1507 | 0x21f550fc, |
1511 | 0x64b60704, | 1508 | 0x64b606f8, |
1512 | 0x6a11f404, | 1509 | 0x6a11f404, |
1513 | 0xbbe05bcb, | 1510 | 0xbbe05bcb, |
1514 | 0x65b60076, | 1511 | 0x65b60076, |
@@ -1516,38 +1513,38 @@ uint32_t nvc0_pwr_code[] = { | |||
1516 | 0x56bb0465, | 1513 | 0x56bb0465, |
1517 | 0xfd50bd02, | 1514 | 0xfd50bd02, |
1518 | 0x50fc0475, | 1515 | 0x50fc0475, |
1519 | 0x064921f5, | 1516 | 0x063d21f5, |
1520 | 0xb90464b6, | 1517 | 0xb90464b6, |
1521 | 0x74bd025b, | 1518 | 0x74bd025b, |
1522 | /* 0x092b: i2c_recv_not_rd08 */ | 1519 | /* 0x091f: i2c_recv_not_rd08 */ |
1523 | 0xb0430ef4, | 1520 | 0xb0430ef4, |
1524 | 0x1bf401d6, | 1521 | 0x1bf401d6, |
1525 | 0x0057f03d, | 1522 | 0x0057f03d, |
1526 | 0x07b121f5, | 1523 | 0x07a521f5, |
1527 | 0xc73311f4, | 1524 | 0xc73311f4, |
1528 | 0x21f5e0c5, | 1525 | 0x21f5e0c5, |
1529 | 0x11f40756, | 1526 | 0x11f4074a, |
1530 | 0x0057f029, | 1527 | 0x0057f029, |
1531 | 0x07b121f5, | 1528 | 0x07a521f5, |
1532 | 0xc71f11f4, | 1529 | 0xc71f11f4, |
1533 | 0x21f5e0b5, | 1530 | 0x21f5e0b5, |
1534 | 0x11f40756, | 1531 | 0x11f4074a, |
1535 | 0x4921f515, | 1532 | 0x3d21f515, |
1536 | 0xc774bd06, | 1533 | 0xc774bd06, |
1537 | 0x1bf408c5, | 1534 | 0x1bf408c5, |
1538 | 0x0232f409, | 1535 | 0x0232f409, |
1539 | /* 0x096b: i2c_recv_not_wr08 */ | 1536 | /* 0x095f: i2c_recv_not_wr08 */ |
1540 | /* 0x096b: i2c_recv_done */ | 1537 | /* 0x095f: i2c_recv_done */ |
1541 | 0xc7030ef4, | 1538 | 0xc7030ef4, |
1542 | 0x21f5f8ce, | 1539 | 0x21f5f8ce, |
1543 | 0xe0fc0816, | 1540 | 0xe0fc080a, |
1544 | 0x12f4d0fc, | 1541 | 0x12f4d0fc, |
1545 | 0x027cb90a, | 1542 | 0x027cb90a, |
1546 | 0x02b921f5, | 1543 | 0x02b921f5, |
1547 | /* 0x0980: i2c_recv_exit */ | 1544 | /* 0x0974: i2c_recv_exit */ |
1548 | /* 0x0982: i2c_init */ | 1545 | /* 0x0976: i2c_init */ |
1549 | 0x00f800f8, | 1546 | 0x00f800f8, |
1550 | /* 0x0984: test_recv */ | 1547 | /* 0x0978: test_recv */ |
1551 | 0x05d817f1, | 1548 | 0x05d817f1, |
1552 | 0xcf0614b6, | 1549 | 0xcf0614b6, |
1553 | 0x10b60011, | 1550 | 0x10b60011, |
@@ -1557,12 +1554,12 @@ uint32_t nvc0_pwr_code[] = { | |||
1557 | 0x00e7f104, | 1554 | 0x00e7f104, |
1558 | 0x4fe3f1d9, | 1555 | 0x4fe3f1d9, |
1559 | 0xf521f513, | 1556 | 0xf521f513, |
1560 | /* 0x09ab: test_init */ | 1557 | /* 0x099f: test_init */ |
1561 | 0xf100f801, | 1558 | 0xf100f801, |
1562 | 0xf50800e7, | 1559 | 0xf50800e7, |
1563 | 0xf801f521, | 1560 | 0xf801f521, |
1564 | /* 0x09b5: idle_recv */ | 1561 | /* 0x09a9: idle_recv */ |
1565 | /* 0x09b7: idle */ | 1562 | /* 0x09ab: idle */ |
1566 | 0xf400f800, | 1563 | 0xf400f800, |
1567 | 0x17f10031, | 1564 | 0x17f10031, |
1568 | 0x14b605d4, | 1565 | 0x14b605d4, |
@@ -1570,20 +1567,23 @@ uint32_t nvc0_pwr_code[] = { | |||
1570 | 0xf10110b6, | 1567 | 0xf10110b6, |
1571 | 0xb605d407, | 1568 | 0xb605d407, |
1572 | 0x01d00604, | 1569 | 0x01d00604, |
1573 | /* 0x09d3: idle_loop */ | 1570 | /* 0x09c7: idle_loop */ |
1574 | 0xf004bd00, | 1571 | 0xf004bd00, |
1575 | 0x32f45817, | 1572 | 0x32f45817, |
1576 | /* 0x09d9: idle_proc */ | 1573 | /* 0x09cd: idle_proc */ |
1577 | /* 0x09d9: idle_proc_exec */ | 1574 | /* 0x09cd: idle_proc_exec */ |
1578 | 0xb910f902, | 1575 | 0xb910f902, |
1579 | 0x21f5021e, | 1576 | 0x21f5021e, |
1580 | 0x10fc02c2, | 1577 | 0x10fc02c2, |
1581 | 0xf40911f4, | 1578 | 0xf40911f4, |
1582 | 0x0ef40231, | 1579 | 0x0ef40231, |
1583 | /* 0x09ed: idle_proc_next */ | 1580 | /* 0x09e1: idle_proc_next */ |
1584 | 0x5810b6ef, | 1581 | 0x5810b6ef, |
1585 | 0xf4061fb8, | 1582 | 0xf4061fb8, |
1586 | 0x02f4e61b, | 1583 | 0x02f4e61b, |
1587 | 0x0028f4dd, | 1584 | 0x0028f4dd, |
1588 | 0x00bb0ef4, | 1585 | 0x00bb0ef4, |
1586 | 0x00000000, | ||
1587 | 0x00000000, | ||
1588 | 0x00000000, | ||
1589 | }; | 1589 | }; |
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvd0.fuc.h b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvd0.fuc.h index cd9ff1a73284..8d369b3faaba 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvd0.fuc.h +++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvd0.fuc.h | |||
@@ -46,8 +46,8 @@ uint32_t nvd0_pwr_data[] = { | |||
46 | 0x00000000, | 46 | 0x00000000, |
47 | 0x00000000, | 47 | 0x00000000, |
48 | 0x584d454d, | 48 | 0x584d454d, |
49 | 0x000004c4, | 49 | 0x000004b8, |
50 | 0x000004b6, | 50 | 0x000004aa, |
51 | 0x00000000, | 51 | 0x00000000, |
52 | 0x00000000, | 52 | 0x00000000, |
53 | 0x00000000, | 53 | 0x00000000, |
@@ -68,8 +68,8 @@ uint32_t nvd0_pwr_data[] = { | |||
68 | 0x00000000, | 68 | 0x00000000, |
69 | 0x00000000, | 69 | 0x00000000, |
70 | 0x46524550, | 70 | 0x46524550, |
71 | 0x000004c8, | 71 | 0x000004bc, |
72 | 0x000004c6, | 72 | 0x000004ba, |
73 | 0x00000000, | 73 | 0x00000000, |
74 | 0x00000000, | 74 | 0x00000000, |
75 | 0x00000000, | 75 | 0x00000000, |
@@ -90,8 +90,8 @@ uint32_t nvd0_pwr_data[] = { | |||
90 | 0x00000000, | 90 | 0x00000000, |
91 | 0x00000000, | 91 | 0x00000000, |
92 | 0x5f433249, | 92 | 0x5f433249, |
93 | 0x000008e3, | 93 | 0x000008d7, |
94 | 0x00000786, | 94 | 0x0000077a, |
95 | 0x00000000, | 95 | 0x00000000, |
96 | 0x00000000, | 96 | 0x00000000, |
97 | 0x00000000, | 97 | 0x00000000, |
@@ -112,8 +112,8 @@ uint32_t nvd0_pwr_data[] = { | |||
112 | 0x00000000, | 112 | 0x00000000, |
113 | 0x00000000, | 113 | 0x00000000, |
114 | 0x54534554, | 114 | 0x54534554, |
115 | 0x00000906, | 115 | 0x000008fa, |
116 | 0x000008e5, | 116 | 0x000008d9, |
117 | 0x00000000, | 117 | 0x00000000, |
118 | 0x00000000, | 118 | 0x00000000, |
119 | 0x00000000, | 119 | 0x00000000, |
@@ -134,8 +134,8 @@ uint32_t nvd0_pwr_data[] = { | |||
134 | 0x00000000, | 134 | 0x00000000, |
135 | 0x00000000, | 135 | 0x00000000, |
136 | 0x454c4449, | 136 | 0x454c4449, |
137 | 0x00000912, | 137 | 0x00000906, |
138 | 0x00000910, | 138 | 0x00000904, |
139 | 0x00000000, | 139 | 0x00000000, |
140 | 0x00000000, | 140 | 0x00000000, |
141 | 0x00000000, | 141 | 0x00000000, |
@@ -239,10 +239,10 @@ uint32_t nvd0_pwr_data[] = { | |||
239 | 0x00000430, | 239 | 0x00000430, |
240 | 0x00040003, | 240 | 0x00040003, |
241 | 0x00000000, | 241 | 0x00000000, |
242 | 0x00000458, | 242 | 0x0000044c, |
243 | 0x00010004, | 243 | 0x00010004, |
244 | 0x00000000, | 244 | 0x00000000, |
245 | 0x00000472, | 245 | 0x00000466, |
246 | /* 0x03ac: memx_func_tail */ | 246 | /* 0x03ac: memx_func_tail */ |
247 | /* 0x03ac: memx_data_head */ | 247 | /* 0x03ac: memx_data_head */ |
248 | 0x00000000, | 248 | 0x00000000, |
@@ -1100,26 +1100,23 @@ uint32_t nvd0_pwr_code[] = { | |||
1100 | 0xf960f908, | 1100 | 0xf960f908, |
1101 | 0xfcd0fc50, | 1101 | 0xfcd0fc50, |
1102 | 0x3321f4e0, | 1102 | 0x3321f4e0, |
1103 | 0x140003f1, | ||
1104 | 0x800506fd, | ||
1105 | 0x04bd0005, | ||
1106 | 0xf40242b6, | 1103 | 0xf40242b6, |
1107 | 0x00f8dd1b, | 1104 | 0x00f8e91b, |
1108 | /* 0x0458: memx_func_wait */ | 1105 | /* 0x044c: memx_func_wait */ |
1109 | 0xcf2c87f0, | 1106 | 0xcf2c87f0, |
1110 | 0x1e980088, | 1107 | 0x1e980088, |
1111 | 0x011d9800, | 1108 | 0x011d9800, |
1112 | 0x98021c98, | 1109 | 0x98021c98, |
1113 | 0x10b6031b, | 1110 | 0x10b6031b, |
1114 | 0x7e21f410, | 1111 | 0x7e21f410, |
1115 | /* 0x0472: memx_func_delay */ | 1112 | /* 0x0466: memx_func_delay */ |
1116 | 0x1e9800f8, | 1113 | 0x1e9800f8, |
1117 | 0x0410b600, | 1114 | 0x0410b600, |
1118 | 0xf86721f4, | 1115 | 0xf86721f4, |
1119 | /* 0x047d: memx_exec */ | 1116 | /* 0x0471: memx_exec */ |
1120 | 0xf9e0f900, | 1117 | 0xf9e0f900, |
1121 | 0x02c1b9d0, | 1118 | 0x02c1b9d0, |
1122 | /* 0x0487: memx_exec_next */ | 1119 | /* 0x047b: memx_exec_next */ |
1123 | 0x9802b2b9, | 1120 | 0x9802b2b9, |
1124 | 0x10b60013, | 1121 | 0x10b60013, |
1125 | 0x10349504, | 1122 | 0x10349504, |
@@ -1129,107 +1126,107 @@ uint32_t nvd0_pwr_code[] = { | |||
1129 | 0xd0fcec1e, | 1126 | 0xd0fcec1e, |
1130 | 0x21f5e0fc, | 1127 | 0x21f5e0fc, |
1131 | 0x00f8026b, | 1128 | 0x00f8026b, |
1132 | /* 0x04a8: memx_info */ | 1129 | /* 0x049c: memx_info */ |
1133 | 0x03acc7f1, | 1130 | 0x03acc7f1, |
1134 | 0x0800b7f1, | 1131 | 0x0800b7f1, |
1135 | 0x026b21f5, | 1132 | 0x026b21f5, |
1136 | /* 0x04b6: memx_recv */ | 1133 | /* 0x04aa: memx_recv */ |
1137 | 0xd6b000f8, | 1134 | 0xd6b000f8, |
1138 | 0xc40bf401, | 1135 | 0xc40bf401, |
1139 | 0xf400d6b0, | 1136 | 0xf400d6b0, |
1140 | 0x00f8e90b, | 1137 | 0x00f8e90b, |
1141 | /* 0x04c4: memx_init */ | 1138 | /* 0x04b8: memx_init */ |
1142 | /* 0x04c6: perf_recv */ | 1139 | /* 0x04ba: perf_recv */ |
1143 | 0x00f800f8, | 1140 | 0x00f800f8, |
1144 | /* 0x04c8: perf_init */ | 1141 | /* 0x04bc: perf_init */ |
1145 | /* 0x04ca: i2c_drive_scl */ | 1142 | /* 0x04be: i2c_drive_scl */ |
1146 | 0x36b000f8, | 1143 | 0x36b000f8, |
1147 | 0x0e0bf400, | 1144 | 0x0e0bf400, |
1148 | 0x07e007f1, | 1145 | 0x07e007f1, |
1149 | 0xbd0001d0, | 1146 | 0xbd0001d0, |
1150 | /* 0x04db: i2c_drive_scl_lo */ | 1147 | /* 0x04cf: i2c_drive_scl_lo */ |
1151 | 0xf100f804, | 1148 | 0xf100f804, |
1152 | 0xd007e407, | 1149 | 0xd007e407, |
1153 | 0x04bd0001, | 1150 | 0x04bd0001, |
1154 | /* 0x04e6: i2c_drive_sda */ | 1151 | /* 0x04da: i2c_drive_sda */ |
1155 | 0x36b000f8, | 1152 | 0x36b000f8, |
1156 | 0x0e0bf400, | 1153 | 0x0e0bf400, |
1157 | 0x07e007f1, | 1154 | 0x07e007f1, |
1158 | 0xbd0002d0, | 1155 | 0xbd0002d0, |
1159 | /* 0x04f7: i2c_drive_sda_lo */ | 1156 | /* 0x04eb: i2c_drive_sda_lo */ |
1160 | 0xf100f804, | 1157 | 0xf100f804, |
1161 | 0xd007e407, | 1158 | 0xd007e407, |
1162 | 0x04bd0002, | 1159 | 0x04bd0002, |
1163 | /* 0x0502: i2c_sense_scl */ | 1160 | /* 0x04f6: i2c_sense_scl */ |
1164 | 0x32f400f8, | 1161 | 0x32f400f8, |
1165 | 0xc437f101, | 1162 | 0xc437f101, |
1166 | 0x0033cf07, | 1163 | 0x0033cf07, |
1167 | 0xf40431fd, | 1164 | 0xf40431fd, |
1168 | 0x31f4060b, | 1165 | 0x31f4060b, |
1169 | /* 0x0515: i2c_sense_scl_done */ | 1166 | /* 0x0509: i2c_sense_scl_done */ |
1170 | /* 0x0517: i2c_sense_sda */ | 1167 | /* 0x050b: i2c_sense_sda */ |
1171 | 0xf400f801, | 1168 | 0xf400f801, |
1172 | 0x37f10132, | 1169 | 0x37f10132, |
1173 | 0x33cf07c4, | 1170 | 0x33cf07c4, |
1174 | 0x0432fd00, | 1171 | 0x0432fd00, |
1175 | 0xf4060bf4, | 1172 | 0xf4060bf4, |
1176 | /* 0x052a: i2c_sense_sda_done */ | 1173 | /* 0x051e: i2c_sense_sda_done */ |
1177 | 0x00f80131, | 1174 | 0x00f80131, |
1178 | /* 0x052c: i2c_raise_scl */ | 1175 | /* 0x0520: i2c_raise_scl */ |
1179 | 0x47f140f9, | 1176 | 0x47f140f9, |
1180 | 0x37f00898, | 1177 | 0x37f00898, |
1181 | 0xca21f501, | 1178 | 0xbe21f501, |
1182 | /* 0x0539: i2c_raise_scl_wait */ | 1179 | /* 0x052d: i2c_raise_scl_wait */ |
1183 | 0xe8e7f104, | 1180 | 0xe8e7f104, |
1184 | 0x6721f403, | 1181 | 0x6721f403, |
1185 | 0x050221f5, | 1182 | 0x04f621f5, |
1186 | 0xb60901f4, | 1183 | 0xb60901f4, |
1187 | 0x1bf40142, | 1184 | 0x1bf40142, |
1188 | /* 0x054d: i2c_raise_scl_done */ | 1185 | /* 0x0541: i2c_raise_scl_done */ |
1189 | 0xf840fcef, | 1186 | 0xf840fcef, |
1190 | /* 0x0551: i2c_start */ | 1187 | /* 0x0545: i2c_start */ |
1191 | 0x0221f500, | 1188 | 0xf621f500, |
1192 | 0x0d11f405, | 1189 | 0x0d11f404, |
1193 | 0x051721f5, | 1190 | 0x050b21f5, |
1194 | 0xf40611f4, | 1191 | 0xf40611f4, |
1195 | /* 0x0562: i2c_start_rep */ | 1192 | /* 0x0556: i2c_start_rep */ |
1196 | 0x37f0300e, | 1193 | 0x37f0300e, |
1197 | 0xca21f500, | 1194 | 0xbe21f500, |
1198 | 0x0137f004, | 1195 | 0x0137f004, |
1199 | 0x04e621f5, | 1196 | 0x04da21f5, |
1200 | 0xb60076bb, | 1197 | 0xb60076bb, |
1201 | 0x50f90465, | 1198 | 0x50f90465, |
1202 | 0xbb046594, | 1199 | 0xbb046594, |
1203 | 0x50bd0256, | 1200 | 0x50bd0256, |
1204 | 0xfc0475fd, | 1201 | 0xfc0475fd, |
1205 | 0x2c21f550, | 1202 | 0x2021f550, |
1206 | 0x0464b605, | 1203 | 0x0464b605, |
1207 | /* 0x058f: i2c_start_send */ | 1204 | /* 0x0583: i2c_start_send */ |
1208 | 0xf01f11f4, | 1205 | 0xf01f11f4, |
1209 | 0x21f50037, | 1206 | 0x21f50037, |
1210 | 0xe7f104e6, | 1207 | 0xe7f104da, |
1211 | 0x21f41388, | 1208 | 0x21f41388, |
1212 | 0x0037f067, | 1209 | 0x0037f067, |
1213 | 0x04ca21f5, | 1210 | 0x04be21f5, |
1214 | 0x1388e7f1, | 1211 | 0x1388e7f1, |
1215 | /* 0x05ab: i2c_start_out */ | 1212 | /* 0x059f: i2c_start_out */ |
1216 | 0xf86721f4, | 1213 | 0xf86721f4, |
1217 | /* 0x05ad: i2c_stop */ | 1214 | /* 0x05a1: i2c_stop */ |
1218 | 0x0037f000, | 1215 | 0x0037f000, |
1219 | 0x04ca21f5, | 1216 | 0x04be21f5, |
1220 | 0xf50037f0, | 1217 | 0xf50037f0, |
1221 | 0xf104e621, | 1218 | 0xf104da21, |
1222 | 0xf403e8e7, | 1219 | 0xf403e8e7, |
1223 | 0x37f06721, | 1220 | 0x37f06721, |
1224 | 0xca21f501, | 1221 | 0xbe21f501, |
1225 | 0x88e7f104, | 1222 | 0x88e7f104, |
1226 | 0x6721f413, | 1223 | 0x6721f413, |
1227 | 0xf50137f0, | 1224 | 0xf50137f0, |
1228 | 0xf104e621, | 1225 | 0xf104da21, |
1229 | 0xf41388e7, | 1226 | 0xf41388e7, |
1230 | 0x00f86721, | 1227 | 0x00f86721, |
1231 | /* 0x05e0: i2c_bitw */ | 1228 | /* 0x05d4: i2c_bitw */ |
1232 | 0x04e621f5, | 1229 | 0x04da21f5, |
1233 | 0x03e8e7f1, | 1230 | 0x03e8e7f1, |
1234 | 0xbb6721f4, | 1231 | 0xbb6721f4, |
1235 | 0x65b60076, | 1232 | 0x65b60076, |
@@ -1237,18 +1234,18 @@ uint32_t nvd0_pwr_code[] = { | |||
1237 | 0x56bb0465, | 1234 | 0x56bb0465, |
1238 | 0xfd50bd02, | 1235 | 0xfd50bd02, |
1239 | 0x50fc0475, | 1236 | 0x50fc0475, |
1240 | 0x052c21f5, | 1237 | 0x052021f5, |
1241 | 0xf40464b6, | 1238 | 0xf40464b6, |
1242 | 0xe7f11811, | 1239 | 0xe7f11811, |
1243 | 0x21f41388, | 1240 | 0x21f41388, |
1244 | 0x0037f067, | 1241 | 0x0037f067, |
1245 | 0x04ca21f5, | 1242 | 0x04be21f5, |
1246 | 0x1388e7f1, | 1243 | 0x1388e7f1, |
1247 | /* 0x061f: i2c_bitw_out */ | 1244 | /* 0x0613: i2c_bitw_out */ |
1248 | 0xf86721f4, | 1245 | 0xf86721f4, |
1249 | /* 0x0621: i2c_bitr */ | 1246 | /* 0x0615: i2c_bitr */ |
1250 | 0x0137f000, | 1247 | 0x0137f000, |
1251 | 0x04e621f5, | 1248 | 0x04da21f5, |
1252 | 0x03e8e7f1, | 1249 | 0x03e8e7f1, |
1253 | 0xbb6721f4, | 1250 | 0xbb6721f4, |
1254 | 0x65b60076, | 1251 | 0x65b60076, |
@@ -1256,19 +1253,19 @@ uint32_t nvd0_pwr_code[] = { | |||
1256 | 0x56bb0465, | 1253 | 0x56bb0465, |
1257 | 0xfd50bd02, | 1254 | 0xfd50bd02, |
1258 | 0x50fc0475, | 1255 | 0x50fc0475, |
1259 | 0x052c21f5, | 1256 | 0x052021f5, |
1260 | 0xf40464b6, | 1257 | 0xf40464b6, |
1261 | 0x21f51b11, | 1258 | 0x21f51b11, |
1262 | 0x37f00517, | 1259 | 0x37f0050b, |
1263 | 0xca21f500, | 1260 | 0xbe21f500, |
1264 | 0x88e7f104, | 1261 | 0x88e7f104, |
1265 | 0x6721f413, | 1262 | 0x6721f413, |
1266 | 0xf4013cf0, | 1263 | 0xf4013cf0, |
1267 | /* 0x0666: i2c_bitr_done */ | 1264 | /* 0x065a: i2c_bitr_done */ |
1268 | 0x00f80131, | 1265 | 0x00f80131, |
1269 | /* 0x0668: i2c_get_byte */ | 1266 | /* 0x065c: i2c_get_byte */ |
1270 | 0xf00057f0, | 1267 | 0xf00057f0, |
1271 | /* 0x066e: i2c_get_byte_next */ | 1268 | /* 0x0662: i2c_get_byte_next */ |
1272 | 0x54b60847, | 1269 | 0x54b60847, |
1273 | 0x0076bb01, | 1270 | 0x0076bb01, |
1274 | 0xf90465b6, | 1271 | 0xf90465b6, |
@@ -1276,7 +1273,7 @@ uint32_t nvd0_pwr_code[] = { | |||
1276 | 0xbd0256bb, | 1273 | 0xbd0256bb, |
1277 | 0x0475fd50, | 1274 | 0x0475fd50, |
1278 | 0x21f550fc, | 1275 | 0x21f550fc, |
1279 | 0x64b60621, | 1276 | 0x64b60615, |
1280 | 0x2b11f404, | 1277 | 0x2b11f404, |
1281 | 0xb60553fd, | 1278 | 0xb60553fd, |
1282 | 0x1bf40142, | 1279 | 0x1bf40142, |
@@ -1286,12 +1283,12 @@ uint32_t nvd0_pwr_code[] = { | |||
1286 | 0xbb046594, | 1283 | 0xbb046594, |
1287 | 0x50bd0256, | 1284 | 0x50bd0256, |
1288 | 0xfc0475fd, | 1285 | 0xfc0475fd, |
1289 | 0xe021f550, | 1286 | 0xd421f550, |
1290 | 0x0464b605, | 1287 | 0x0464b605, |
1291 | /* 0x06b8: i2c_get_byte_done */ | 1288 | /* 0x06ac: i2c_get_byte_done */ |
1292 | /* 0x06ba: i2c_put_byte */ | 1289 | /* 0x06ae: i2c_put_byte */ |
1293 | 0x47f000f8, | 1290 | 0x47f000f8, |
1294 | /* 0x06bd: i2c_put_byte_next */ | 1291 | /* 0x06b1: i2c_put_byte_next */ |
1295 | 0x0142b608, | 1292 | 0x0142b608, |
1296 | 0xbb3854ff, | 1293 | 0xbb3854ff, |
1297 | 0x65b60076, | 1294 | 0x65b60076, |
@@ -1299,7 +1296,7 @@ uint32_t nvd0_pwr_code[] = { | |||
1299 | 0x56bb0465, | 1296 | 0x56bb0465, |
1300 | 0xfd50bd02, | 1297 | 0xfd50bd02, |
1301 | 0x50fc0475, | 1298 | 0x50fc0475, |
1302 | 0x05e021f5, | 1299 | 0x05d421f5, |
1303 | 0xf40464b6, | 1300 | 0xf40464b6, |
1304 | 0x46b03411, | 1301 | 0x46b03411, |
1305 | 0xd81bf400, | 1302 | 0xd81bf400, |
@@ -1308,21 +1305,21 @@ uint32_t nvd0_pwr_code[] = { | |||
1308 | 0xbb046594, | 1305 | 0xbb046594, |
1309 | 0x50bd0256, | 1306 | 0x50bd0256, |
1310 | 0xfc0475fd, | 1307 | 0xfc0475fd, |
1311 | 0x2121f550, | 1308 | 0x1521f550, |
1312 | 0x0464b606, | 1309 | 0x0464b606, |
1313 | 0xbb0f11f4, | 1310 | 0xbb0f11f4, |
1314 | 0x36b00076, | 1311 | 0x36b00076, |
1315 | 0x061bf401, | 1312 | 0x061bf401, |
1316 | /* 0x0713: i2c_put_byte_done */ | 1313 | /* 0x0707: i2c_put_byte_done */ |
1317 | 0xf80132f4, | 1314 | 0xf80132f4, |
1318 | /* 0x0715: i2c_addr */ | 1315 | /* 0x0709: i2c_addr */ |
1319 | 0x0076bb00, | 1316 | 0x0076bb00, |
1320 | 0xf90465b6, | 1317 | 0xf90465b6, |
1321 | 0x04659450, | 1318 | 0x04659450, |
1322 | 0xbd0256bb, | 1319 | 0xbd0256bb, |
1323 | 0x0475fd50, | 1320 | 0x0475fd50, |
1324 | 0x21f550fc, | 1321 | 0x21f550fc, |
1325 | 0x64b60551, | 1322 | 0x64b60545, |
1326 | 0x2911f404, | 1323 | 0x2911f404, |
1327 | 0x012ec3e7, | 1324 | 0x012ec3e7, |
1328 | 0xfd0134b6, | 1325 | 0xfd0134b6, |
@@ -1332,23 +1329,23 @@ uint32_t nvd0_pwr_code[] = { | |||
1332 | 0x0256bb04, | 1329 | 0x0256bb04, |
1333 | 0x75fd50bd, | 1330 | 0x75fd50bd, |
1334 | 0xf550fc04, | 1331 | 0xf550fc04, |
1335 | 0xb606ba21, | 1332 | 0xb606ae21, |
1336 | /* 0x075a: i2c_addr_done */ | 1333 | /* 0x074e: i2c_addr_done */ |
1337 | 0x00f80464, | 1334 | 0x00f80464, |
1338 | /* 0x075c: i2c_acquire_addr */ | 1335 | /* 0x0750: i2c_acquire_addr */ |
1339 | 0xb6f8cec7, | 1336 | 0xb6f8cec7, |
1340 | 0xe0b705e4, | 1337 | 0xe0b705e4, |
1341 | 0x00f8d014, | 1338 | 0x00f8d014, |
1342 | /* 0x0768: i2c_acquire */ | 1339 | /* 0x075c: i2c_acquire */ |
1343 | 0x075c21f5, | 1340 | 0x075021f5, |
1344 | 0xf00421f4, | 1341 | 0xf00421f4, |
1345 | 0x21f403d9, | 1342 | 0x21f403d9, |
1346 | /* 0x0777: i2c_release */ | 1343 | /* 0x076b: i2c_release */ |
1347 | 0xf500f833, | 1344 | 0xf500f833, |
1348 | 0xf4075c21, | 1345 | 0xf4075021, |
1349 | 0xdaf00421, | 1346 | 0xdaf00421, |
1350 | 0x3321f403, | 1347 | 0x3321f403, |
1351 | /* 0x0786: i2c_recv */ | 1348 | /* 0x077a: i2c_recv */ |
1352 | 0x32f400f8, | 1349 | 0x32f400f8, |
1353 | 0xf8c1c701, | 1350 | 0xf8c1c701, |
1354 | 0xb00214b6, | 1351 | 0xb00214b6, |
@@ -1367,7 +1364,7 @@ uint32_t nvd0_pwr_code[] = { | |||
1367 | 0xbb046594, | 1364 | 0xbb046594, |
1368 | 0x50bd0256, | 1365 | 0x50bd0256, |
1369 | 0xfc0475fd, | 1366 | 0xfc0475fd, |
1370 | 0x6821f550, | 1367 | 0x5c21f550, |
1371 | 0x0464b607, | 1368 | 0x0464b607, |
1372 | 0xd6b0d0fc, | 1369 | 0xd6b0d0fc, |
1373 | 0xb31bf500, | 1370 | 0xb31bf500, |
@@ -1377,7 +1374,7 @@ uint32_t nvd0_pwr_code[] = { | |||
1377 | 0xbb046594, | 1374 | 0xbb046594, |
1378 | 0x50bd0256, | 1375 | 0x50bd0256, |
1379 | 0xfc0475fd, | 1376 | 0xfc0475fd, |
1380 | 0x1521f550, | 1377 | 0x0921f550, |
1381 | 0x0464b607, | 1378 | 0x0464b607, |
1382 | 0x00d011f5, | 1379 | 0x00d011f5, |
1383 | 0xbbe0c5c7, | 1380 | 0xbbe0c5c7, |
@@ -1386,7 +1383,7 @@ uint32_t nvd0_pwr_code[] = { | |||
1386 | 0x56bb0465, | 1383 | 0x56bb0465, |
1387 | 0xfd50bd02, | 1384 | 0xfd50bd02, |
1388 | 0x50fc0475, | 1385 | 0x50fc0475, |
1389 | 0x06ba21f5, | 1386 | 0x06ae21f5, |
1390 | 0xf50464b6, | 1387 | 0xf50464b6, |
1391 | 0xf000ad11, | 1388 | 0xf000ad11, |
1392 | 0x76bb0157, | 1389 | 0x76bb0157, |
@@ -1395,7 +1392,7 @@ uint32_t nvd0_pwr_code[] = { | |||
1395 | 0x0256bb04, | 1392 | 0x0256bb04, |
1396 | 0x75fd50bd, | 1393 | 0x75fd50bd, |
1397 | 0xf550fc04, | 1394 | 0xf550fc04, |
1398 | 0xb6071521, | 1395 | 0xb6070921, |
1399 | 0x11f50464, | 1396 | 0x11f50464, |
1400 | 0x76bb008a, | 1397 | 0x76bb008a, |
1401 | 0x0465b600, | 1398 | 0x0465b600, |
@@ -1403,7 +1400,7 @@ uint32_t nvd0_pwr_code[] = { | |||
1403 | 0x0256bb04, | 1400 | 0x0256bb04, |
1404 | 0x75fd50bd, | 1401 | 0x75fd50bd, |
1405 | 0xf550fc04, | 1402 | 0xf550fc04, |
1406 | 0xb6066821, | 1403 | 0xb6065c21, |
1407 | 0x11f40464, | 1404 | 0x11f40464, |
1408 | 0xe05bcb6a, | 1405 | 0xe05bcb6a, |
1409 | 0xb60076bb, | 1406 | 0xb60076bb, |
@@ -1411,38 +1408,38 @@ uint32_t nvd0_pwr_code[] = { | |||
1411 | 0xbb046594, | 1408 | 0xbb046594, |
1412 | 0x50bd0256, | 1409 | 0x50bd0256, |
1413 | 0xfc0475fd, | 1410 | 0xfc0475fd, |
1414 | 0xad21f550, | 1411 | 0xa121f550, |
1415 | 0x0464b605, | 1412 | 0x0464b605, |
1416 | 0xbd025bb9, | 1413 | 0xbd025bb9, |
1417 | 0x430ef474, | 1414 | 0x430ef474, |
1418 | /* 0x088c: i2c_recv_not_rd08 */ | 1415 | /* 0x0880: i2c_recv_not_rd08 */ |
1419 | 0xf401d6b0, | 1416 | 0xf401d6b0, |
1420 | 0x57f03d1b, | 1417 | 0x57f03d1b, |
1421 | 0x1521f500, | 1418 | 0x0921f500, |
1422 | 0x3311f407, | 1419 | 0x3311f407, |
1423 | 0xf5e0c5c7, | 1420 | 0xf5e0c5c7, |
1424 | 0xf406ba21, | 1421 | 0xf406ae21, |
1425 | 0x57f02911, | 1422 | 0x57f02911, |
1426 | 0x1521f500, | 1423 | 0x0921f500, |
1427 | 0x1f11f407, | 1424 | 0x1f11f407, |
1428 | 0xf5e0b5c7, | 1425 | 0xf5e0b5c7, |
1429 | 0xf406ba21, | 1426 | 0xf406ae21, |
1430 | 0x21f51511, | 1427 | 0x21f51511, |
1431 | 0x74bd05ad, | 1428 | 0x74bd05a1, |
1432 | 0xf408c5c7, | 1429 | 0xf408c5c7, |
1433 | 0x32f4091b, | 1430 | 0x32f4091b, |
1434 | 0x030ef402, | 1431 | 0x030ef402, |
1435 | /* 0x08cc: i2c_recv_not_wr08 */ | 1432 | /* 0x08c0: i2c_recv_not_wr08 */ |
1436 | /* 0x08cc: i2c_recv_done */ | 1433 | /* 0x08c0: i2c_recv_done */ |
1437 | 0xf5f8cec7, | 1434 | 0xf5f8cec7, |
1438 | 0xfc077721, | 1435 | 0xfc076b21, |
1439 | 0xf4d0fce0, | 1436 | 0xf4d0fce0, |
1440 | 0x7cb90a12, | 1437 | 0x7cb90a12, |
1441 | 0x6b21f502, | 1438 | 0x6b21f502, |
1442 | /* 0x08e1: i2c_recv_exit */ | 1439 | /* 0x08d5: i2c_recv_exit */ |
1443 | /* 0x08e3: i2c_init */ | 1440 | /* 0x08d7: i2c_init */ |
1444 | 0xf800f802, | 1441 | 0xf800f802, |
1445 | /* 0x08e5: test_recv */ | 1442 | /* 0x08d9: test_recv */ |
1446 | 0xd817f100, | 1443 | 0xd817f100, |
1447 | 0x0011cf05, | 1444 | 0x0011cf05, |
1448 | 0xf10110b6, | 1445 | 0xf10110b6, |
@@ -1451,28 +1448,28 @@ uint32_t nvd0_pwr_code[] = { | |||
1451 | 0xd900e7f1, | 1448 | 0xd900e7f1, |
1452 | 0x134fe3f1, | 1449 | 0x134fe3f1, |
1453 | 0x01b621f5, | 1450 | 0x01b621f5, |
1454 | /* 0x0906: test_init */ | 1451 | /* 0x08fa: test_init */ |
1455 | 0xe7f100f8, | 1452 | 0xe7f100f8, |
1456 | 0x21f50800, | 1453 | 0x21f50800, |
1457 | 0x00f801b6, | 1454 | 0x00f801b6, |
1458 | /* 0x0910: idle_recv */ | 1455 | /* 0x0904: idle_recv */ |
1459 | /* 0x0912: idle */ | 1456 | /* 0x0906: idle */ |
1460 | 0x31f400f8, | 1457 | 0x31f400f8, |
1461 | 0xd417f100, | 1458 | 0xd417f100, |
1462 | 0x0011cf05, | 1459 | 0x0011cf05, |
1463 | 0xf10110b6, | 1460 | 0xf10110b6, |
1464 | 0xd005d407, | 1461 | 0xd005d407, |
1465 | 0x04bd0001, | 1462 | 0x04bd0001, |
1466 | /* 0x0928: idle_loop */ | 1463 | /* 0x091c: idle_loop */ |
1467 | 0xf45817f0, | 1464 | 0xf45817f0, |
1468 | /* 0x092e: idle_proc */ | 1465 | /* 0x0922: idle_proc */ |
1469 | /* 0x092e: idle_proc_exec */ | 1466 | /* 0x0922: idle_proc_exec */ |
1470 | 0x10f90232, | 1467 | 0x10f90232, |
1471 | 0xf5021eb9, | 1468 | 0xf5021eb9, |
1472 | 0xfc027421, | 1469 | 0xfc027421, |
1473 | 0x0911f410, | 1470 | 0x0911f410, |
1474 | 0xf40231f4, | 1471 | 0xf40231f4, |
1475 | /* 0x0942: idle_proc_next */ | 1472 | /* 0x0936: idle_proc_next */ |
1476 | 0x10b6ef0e, | 1473 | 0x10b6ef0e, |
1477 | 0x061fb858, | 1474 | 0x061fb858, |
1478 | 0xf4e61bf4, | 1475 | 0xf4e61bf4, |
@@ -1521,4 +1518,7 @@ uint32_t nvd0_pwr_code[] = { | |||
1521 | 0x00000000, | 1518 | 0x00000000, |
1522 | 0x00000000, | 1519 | 0x00000000, |
1523 | 0x00000000, | 1520 | 0x00000000, |
1521 | 0x00000000, | ||
1522 | 0x00000000, | ||
1523 | 0x00000000, | ||
1524 | }; | 1524 | }; |
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/gk104.c b/drivers/gpu/drm/nouveau/core/subdev/pwr/gk104.c new file mode 100644 index 000000000000..d76612999b9f --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/gk104.c | |||
@@ -0,0 +1,69 @@ | |||
1 | /* | ||
2 | * Copyright 2013 Red Hat Inc. | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice shall be included in | ||
12 | * all copies or substantial portions of the Software. | ||
13 | * | ||
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
21 | * | ||
22 | * Authors: Ben Skeggs | ||
23 | */ | ||
24 | |||
25 | #include "priv.h" | ||
26 | |||
27 | #define nvd0_pwr_code gk104_pwr_code | ||
28 | #define nvd0_pwr_data gk104_pwr_data | ||
29 | #include "fuc/nvd0.fuc.h" | ||
30 | |||
31 | static void | ||
32 | gk104_pwr_pgob(struct nouveau_pwr *ppwr, bool enable) | ||
33 | { | ||
34 | nv_mask(ppwr, 0x000200, 0x00001000, 0x00000000); | ||
35 | nv_rd32(ppwr, 0x000200); | ||
36 | nv_mask(ppwr, 0x000200, 0x08000000, 0x08000000); | ||
37 | msleep(50); | ||
38 | |||
39 | nv_mask(ppwr, 0x10a78c, 0x00000002, 0x00000002); | ||
40 | nv_mask(ppwr, 0x10a78c, 0x00000001, 0x00000001); | ||
41 | nv_mask(ppwr, 0x10a78c, 0x00000001, 0x00000000); | ||
42 | |||
43 | nv_mask(ppwr, 0x020004, 0xc0000000, enable ? 0xc0000000 : 0x40000000); | ||
44 | msleep(50); | ||
45 | |||
46 | nv_mask(ppwr, 0x10a78c, 0x00000002, 0x00000000); | ||
47 | nv_mask(ppwr, 0x10a78c, 0x00000001, 0x00000001); | ||
48 | nv_mask(ppwr, 0x10a78c, 0x00000001, 0x00000000); | ||
49 | |||
50 | nv_mask(ppwr, 0x000200, 0x08000000, 0x00000000); | ||
51 | nv_mask(ppwr, 0x000200, 0x00001000, 0x00001000); | ||
52 | nv_rd32(ppwr, 0x000200); | ||
53 | } | ||
54 | |||
55 | struct nouveau_oclass * | ||
56 | gk104_pwr_oclass = &(struct nvkm_pwr_impl) { | ||
57 | .base.handle = NV_SUBDEV(PWR, 0xe4), | ||
58 | .base.ofuncs = &(struct nouveau_ofuncs) { | ||
59 | .ctor = _nouveau_pwr_ctor, | ||
60 | .dtor = _nouveau_pwr_dtor, | ||
61 | .init = _nouveau_pwr_init, | ||
62 | .fini = _nouveau_pwr_fini, | ||
63 | }, | ||
64 | .code.data = gk104_pwr_code, | ||
65 | .code.size = sizeof(gk104_pwr_code), | ||
66 | .data.data = gk104_pwr_data, | ||
67 | .data.size = sizeof(gk104_pwr_data), | ||
68 | .pgob = gk104_pwr_pgob, | ||
69 | }.base; | ||
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/memx.c b/drivers/gpu/drm/nouveau/core/subdev/pwr/memx.c index 03de3107d29f..def6a9ac68cf 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/pwr/memx.c +++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/memx.c | |||
@@ -1,8 +1,7 @@ | |||
1 | #ifndef __NVKM_PWR_MEMX_H__ | 1 | #ifndef __NVKM_PWR_MEMX_H__ |
2 | #define __NVKM_PWR_MEMX_H__ | 2 | #define __NVKM_PWR_MEMX_H__ |
3 | 3 | ||
4 | #include <subdev/pwr.h> | 4 | #include "priv.h" |
5 | #include <subdev/pwr/fuc/os.h> | ||
6 | 5 | ||
7 | struct nouveau_memx { | 6 | struct nouveau_memx { |
8 | struct nouveau_pwr *ppwr; | 7 | struct nouveau_pwr *ppwr; |
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/nv108.c b/drivers/gpu/drm/nouveau/core/subdev/pwr/nv108.c index 52c85414866a..04ff7c3c34e9 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/pwr/nv108.c +++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/nv108.c | |||
@@ -22,41 +22,20 @@ | |||
22 | * Authors: Ben Skeggs | 22 | * Authors: Ben Skeggs |
23 | */ | 23 | */ |
24 | 24 | ||
25 | #include <subdev/pwr.h> | 25 | #include "priv.h" |
26 | |||
27 | #include "fuc/nv108.fuc.h" | 26 | #include "fuc/nv108.fuc.h" |
28 | 27 | ||
29 | struct nv108_pwr_priv { | 28 | struct nouveau_oclass * |
30 | struct nouveau_pwr base; | 29 | nv108_pwr_oclass = &(struct nvkm_pwr_impl) { |
31 | }; | 30 | .base.handle = NV_SUBDEV(PWR, 0x00), |
32 | 31 | .base.ofuncs = &(struct nouveau_ofuncs) { | |
33 | static int | 32 | .ctor = _nouveau_pwr_ctor, |
34 | nv108_pwr_ctor(struct nouveau_object *parent, struct nouveau_object *engine, | ||
35 | struct nouveau_oclass *oclass, void *data, u32 size, | ||
36 | struct nouveau_object **pobject) | ||
37 | { | ||
38 | struct nv108_pwr_priv *priv; | ||
39 | int ret; | ||
40 | |||
41 | ret = nouveau_pwr_create(parent, engine, oclass, &priv); | ||
42 | *pobject = nv_object(priv); | ||
43 | if (ret) | ||
44 | return ret; | ||
45 | |||
46 | priv->base.code.data = nv108_pwr_code; | ||
47 | priv->base.code.size = sizeof(nv108_pwr_code); | ||
48 | priv->base.data.data = nv108_pwr_data; | ||
49 | priv->base.data.size = sizeof(nv108_pwr_data); | ||
50 | return 0; | ||
51 | } | ||
52 | |||
53 | struct nouveau_oclass | ||
54 | nv108_pwr_oclass = { | ||
55 | .handle = NV_SUBDEV(PWR, 0x00), | ||
56 | .ofuncs = &(struct nouveau_ofuncs) { | ||
57 | .ctor = nv108_pwr_ctor, | ||
58 | .dtor = _nouveau_pwr_dtor, | 33 | .dtor = _nouveau_pwr_dtor, |
59 | .init = _nouveau_pwr_init, | 34 | .init = _nouveau_pwr_init, |
60 | .fini = _nouveau_pwr_fini, | 35 | .fini = _nouveau_pwr_fini, |
61 | }, | 36 | }, |
62 | }; | 37 | .code.data = nv108_pwr_code, |
38 | .code.size = sizeof(nv108_pwr_code), | ||
39 | .data.data = nv108_pwr_data, | ||
40 | .data.size = sizeof(nv108_pwr_data), | ||
41 | }.base; | ||
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/nva3.c b/drivers/gpu/drm/nouveau/core/subdev/pwr/nva3.c index c132b7ca9747..998d53076b8b 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/pwr/nva3.c +++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/nva3.c | |||
@@ -22,50 +22,29 @@ | |||
22 | * Authors: Ben Skeggs | 22 | * Authors: Ben Skeggs |
23 | */ | 23 | */ |
24 | 24 | ||
25 | #include <subdev/pwr.h> | 25 | #include "priv.h" |
26 | |||
27 | #include "fuc/nva3.fuc.h" | 26 | #include "fuc/nva3.fuc.h" |
28 | 27 | ||
29 | struct nva3_pwr_priv { | ||
30 | struct nouveau_pwr base; | ||
31 | }; | ||
32 | |||
33 | static int | 28 | static int |
34 | nva3_pwr_init(struct nouveau_object *object) | 29 | nva3_pwr_init(struct nouveau_object *object) |
35 | { | 30 | { |
36 | struct nva3_pwr_priv *priv = (void *)object; | 31 | struct nouveau_pwr *ppwr = (void *)object; |
37 | nv_mask(priv, 0x022210, 0x00000001, 0x00000000); | 32 | nv_mask(ppwr, 0x022210, 0x00000001, 0x00000000); |
38 | nv_mask(priv, 0x022210, 0x00000001, 0x00000001); | 33 | nv_mask(ppwr, 0x022210, 0x00000001, 0x00000001); |
39 | return nouveau_pwr_init(&priv->base); | 34 | return nouveau_pwr_init(ppwr); |
40 | } | ||
41 | |||
42 | static int | ||
43 | nva3_pwr_ctor(struct nouveau_object *parent, struct nouveau_object *engine, | ||
44 | struct nouveau_oclass *oclass, void *data, u32 size, | ||
45 | struct nouveau_object **pobject) | ||
46 | { | ||
47 | struct nva3_pwr_priv *priv; | ||
48 | int ret; | ||
49 | |||
50 | ret = nouveau_pwr_create(parent, engine, oclass, &priv); | ||
51 | *pobject = nv_object(priv); | ||
52 | if (ret) | ||
53 | return ret; | ||
54 | |||
55 | priv->base.code.data = nva3_pwr_code; | ||
56 | priv->base.code.size = sizeof(nva3_pwr_code); | ||
57 | priv->base.data.data = nva3_pwr_data; | ||
58 | priv->base.data.size = sizeof(nva3_pwr_data); | ||
59 | return 0; | ||
60 | } | 35 | } |
61 | 36 | ||
62 | struct nouveau_oclass | 37 | struct nouveau_oclass * |
63 | nva3_pwr_oclass = { | 38 | nva3_pwr_oclass = &(struct nvkm_pwr_impl) { |
64 | .handle = NV_SUBDEV(PWR, 0xa3), | 39 | .base.handle = NV_SUBDEV(PWR, 0xa3), |
65 | .ofuncs = &(struct nouveau_ofuncs) { | 40 | .base.ofuncs = &(struct nouveau_ofuncs) { |
66 | .ctor = nva3_pwr_ctor, | 41 | .ctor = _nouveau_pwr_ctor, |
67 | .dtor = _nouveau_pwr_dtor, | 42 | .dtor = _nouveau_pwr_dtor, |
68 | .init = nva3_pwr_init, | 43 | .init = nva3_pwr_init, |
69 | .fini = _nouveau_pwr_fini, | 44 | .fini = _nouveau_pwr_fini, |
70 | }, | 45 | }, |
71 | }; | 46 | .code.data = nva3_pwr_code, |
47 | .code.size = sizeof(nva3_pwr_code), | ||
48 | .data.data = nva3_pwr_data, | ||
49 | .data.size = sizeof(nva3_pwr_data), | ||
50 | }.base; | ||
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/pwr/nvc0.c index 495f6857428d..9a773e66efa4 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/pwr/nvc0.c +++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/nvc0.c | |||
@@ -22,41 +22,20 @@ | |||
22 | * Authors: Ben Skeggs | 22 | * Authors: Ben Skeggs |
23 | */ | 23 | */ |
24 | 24 | ||
25 | #include <subdev/pwr.h> | 25 | #include "priv.h" |
26 | |||
27 | #include "fuc/nvc0.fuc.h" | 26 | #include "fuc/nvc0.fuc.h" |
28 | 27 | ||
29 | struct nvc0_pwr_priv { | 28 | struct nouveau_oclass * |
30 | struct nouveau_pwr base; | 29 | nvc0_pwr_oclass = &(struct nvkm_pwr_impl) { |
31 | }; | 30 | .base.handle = NV_SUBDEV(PWR, 0xc0), |
32 | 31 | .base.ofuncs = &(struct nouveau_ofuncs) { | |
33 | static int | 32 | .ctor = _nouveau_pwr_ctor, |
34 | nvc0_pwr_ctor(struct nouveau_object *parent, struct nouveau_object *engine, | ||
35 | struct nouveau_oclass *oclass, void *data, u32 size, | ||
36 | struct nouveau_object **pobject) | ||
37 | { | ||
38 | struct nvc0_pwr_priv *priv; | ||
39 | int ret; | ||
40 | |||
41 | ret = nouveau_pwr_create(parent, engine, oclass, &priv); | ||
42 | *pobject = nv_object(priv); | ||
43 | if (ret) | ||
44 | return ret; | ||
45 | |||
46 | priv->base.code.data = nvc0_pwr_code; | ||
47 | priv->base.code.size = sizeof(nvc0_pwr_code); | ||
48 | priv->base.data.data = nvc0_pwr_data; | ||
49 | priv->base.data.size = sizeof(nvc0_pwr_data); | ||
50 | return 0; | ||
51 | } | ||
52 | |||
53 | struct nouveau_oclass | ||
54 | nvc0_pwr_oclass = { | ||
55 | .handle = NV_SUBDEV(PWR, 0xc0), | ||
56 | .ofuncs = &(struct nouveau_ofuncs) { | ||
57 | .ctor = nvc0_pwr_ctor, | ||
58 | .dtor = _nouveau_pwr_dtor, | 33 | .dtor = _nouveau_pwr_dtor, |
59 | .init = _nouveau_pwr_init, | 34 | .init = _nouveau_pwr_init, |
60 | .fini = _nouveau_pwr_fini, | 35 | .fini = _nouveau_pwr_fini, |
61 | }, | 36 | }, |
62 | }; | 37 | .code.data = nvc0_pwr_code, |
38 | .code.size = sizeof(nvc0_pwr_code), | ||
39 | .data.data = nvc0_pwr_data, | ||
40 | .data.size = sizeof(nvc0_pwr_data), | ||
41 | }.base; | ||
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/nvd0.c b/drivers/gpu/drm/nouveau/core/subdev/pwr/nvd0.c index 043aa142fe82..2b29be5d08ac 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/pwr/nvd0.c +++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/nvd0.c | |||
@@ -22,41 +22,20 @@ | |||
22 | * Authors: Ben Skeggs | 22 | * Authors: Ben Skeggs |
23 | */ | 23 | */ |
24 | 24 | ||
25 | #include <subdev/pwr.h> | 25 | #include "priv.h" |
26 | |||
27 | #include "fuc/nvd0.fuc.h" | 26 | #include "fuc/nvd0.fuc.h" |
28 | 27 | ||
29 | struct nvd0_pwr_priv { | 28 | struct nouveau_oclass * |
30 | struct nouveau_pwr base; | 29 | nvd0_pwr_oclass = &(struct nvkm_pwr_impl) { |
31 | }; | 30 | .base.handle = NV_SUBDEV(PWR, 0xd0), |
32 | 31 | .base.ofuncs = &(struct nouveau_ofuncs) { | |
33 | static int | 32 | .ctor = _nouveau_pwr_ctor, |
34 | nvd0_pwr_ctor(struct nouveau_object *parent, struct nouveau_object *engine, | ||
35 | struct nouveau_oclass *oclass, void *data, u32 size, | ||
36 | struct nouveau_object **pobject) | ||
37 | { | ||
38 | struct nvd0_pwr_priv *priv; | ||
39 | int ret; | ||
40 | |||
41 | ret = nouveau_pwr_create(parent, engine, oclass, &priv); | ||
42 | *pobject = nv_object(priv); | ||
43 | if (ret) | ||
44 | return ret; | ||
45 | |||
46 | priv->base.code.data = nvd0_pwr_code; | ||
47 | priv->base.code.size = sizeof(nvd0_pwr_code); | ||
48 | priv->base.data.data = nvd0_pwr_data; | ||
49 | priv->base.data.size = sizeof(nvd0_pwr_data); | ||
50 | return 0; | ||
51 | } | ||
52 | |||
53 | struct nouveau_oclass | ||
54 | nvd0_pwr_oclass = { | ||
55 | .handle = NV_SUBDEV(PWR, 0xd0), | ||
56 | .ofuncs = &(struct nouveau_ofuncs) { | ||
57 | .ctor = nvd0_pwr_ctor, | ||
58 | .dtor = _nouveau_pwr_dtor, | 33 | .dtor = _nouveau_pwr_dtor, |
59 | .init = _nouveau_pwr_init, | 34 | .init = _nouveau_pwr_init, |
60 | .fini = _nouveau_pwr_fini, | 35 | .fini = _nouveau_pwr_fini, |
61 | }, | 36 | }, |
62 | }; | 37 | .code.data = nvd0_pwr_code, |
38 | .code.size = sizeof(nvd0_pwr_code), | ||
39 | .data.data = nvd0_pwr_data, | ||
40 | .data.size = sizeof(nvd0_pwr_data), | ||
41 | }.base; | ||
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/priv.h b/drivers/gpu/drm/nouveau/core/subdev/pwr/priv.h new file mode 100644 index 000000000000..3814a341db32 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/priv.h | |||
@@ -0,0 +1,44 @@ | |||
1 | #ifndef __NVKM_PWR_PRIV_H__ | ||
2 | #define __NVKM_PWR_PRIV_H__ | ||
3 | |||
4 | #include <subdev/pwr.h> | ||
5 | #include <subdev/pwr/fuc/os.h> | ||
6 | |||
7 | #define nouveau_pwr_create(p, e, o, d) \ | ||
8 | nouveau_pwr_create_((p), (e), (o), sizeof(**d), (void **)d) | ||
9 | #define nouveau_pwr_destroy(p) \ | ||
10 | nouveau_subdev_destroy(&(p)->base) | ||
11 | #define nouveau_pwr_init(p) ({ \ | ||
12 | struct nouveau_pwr *_ppwr = (p); \ | ||
13 | _nouveau_pwr_init(nv_object(_ppwr)); \ | ||
14 | }) | ||
15 | #define nouveau_pwr_fini(p,s) ({ \ | ||
16 | struct nouveau_pwr *_ppwr = (p); \ | ||
17 | _nouveau_pwr_fini(nv_object(_ppwr), (s)); \ | ||
18 | }) | ||
19 | |||
20 | int nouveau_pwr_create_(struct nouveau_object *, struct nouveau_object *, | ||
21 | struct nouveau_oclass *, int, void **); | ||
22 | |||
23 | int _nouveau_pwr_ctor(struct nouveau_object *, struct nouveau_object *, | ||
24 | struct nouveau_oclass *, void *, u32, | ||
25 | struct nouveau_object **); | ||
26 | #define _nouveau_pwr_dtor _nouveau_subdev_dtor | ||
27 | int _nouveau_pwr_init(struct nouveau_object *); | ||
28 | int _nouveau_pwr_fini(struct nouveau_object *, bool); | ||
29 | |||
30 | struct nvkm_pwr_impl { | ||
31 | struct nouveau_oclass base; | ||
32 | struct { | ||
33 | u32 *data; | ||
34 | u32 size; | ||
35 | } code; | ||
36 | struct { | ||
37 | u32 *data; | ||
38 | u32 size; | ||
39 | } data; | ||
40 | |||
41 | void (*pgob)(struct nouveau_pwr *, bool); | ||
42 | }; | ||
43 | |||
44 | #endif | ||
diff --git a/drivers/gpu/drm/nouveau/core/subdev/vm/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/vm/nvc0.c index 668cf964e4a9..2d0988755530 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/vm/nvc0.c +++ b/drivers/gpu/drm/nouveau/core/subdev/vm/nvc0.c | |||
@@ -28,7 +28,7 @@ | |||
28 | #include <subdev/timer.h> | 28 | #include <subdev/timer.h> |
29 | #include <subdev/fb.h> | 29 | #include <subdev/fb.h> |
30 | #include <subdev/vm.h> | 30 | #include <subdev/vm.h> |
31 | #include <subdev/ltcg.h> | 31 | #include <subdev/ltc.h> |
32 | #include <subdev/bar.h> | 32 | #include <subdev/bar.h> |
33 | 33 | ||
34 | struct nvc0_vmmgr_priv { | 34 | struct nvc0_vmmgr_priv { |
@@ -116,12 +116,12 @@ nvc0_vm_map(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt, | |||
116 | pte <<= 3; | 116 | pte <<= 3; |
117 | 117 | ||
118 | if (mem->tag) { | 118 | if (mem->tag) { |
119 | struct nouveau_ltcg *ltcg = | 119 | struct nouveau_ltc *ltc = |
120 | nouveau_ltcg(vma->vm->vmm->base.base.parent); | 120 | nouveau_ltc(vma->vm->vmm->base.base.parent); |
121 | u32 tag = mem->tag->offset + (delta >> 17); | 121 | u32 tag = mem->tag->offset + (delta >> 17); |
122 | phys |= (u64)tag << (32 + 12); | 122 | phys |= (u64)tag << (32 + 12); |
123 | next |= (u64)1 << (32 + 12); | 123 | next |= (u64)1 << (32 + 12); |
124 | ltcg->tags_clear(ltcg, tag, cnt); | 124 | ltc->tags_clear(ltc, tag, cnt); |
125 | } | 125 | } |
126 | 126 | ||
127 | while (cnt--) { | 127 | while (cnt--) { |
diff --git a/drivers/gpu/drm/nouveau/dispnv04/arb.c b/drivers/gpu/drm/nouveau/dispnv04/arb.c index 2a15b98b4d2b..c6361422a0b2 100644 --- a/drivers/gpu/drm/nouveau/dispnv04/arb.c +++ b/drivers/gpu/drm/nouveau/dispnv04/arb.c | |||
@@ -198,12 +198,12 @@ nv04_update_arb(struct drm_device *dev, int VClk, int bpp, | |||
198 | int *burst, int *lwm) | 198 | int *burst, int *lwm) |
199 | { | 199 | { |
200 | struct nouveau_drm *drm = nouveau_drm(dev); | 200 | struct nouveau_drm *drm = nouveau_drm(dev); |
201 | struct nouveau_device *device = nouveau_dev(dev); | 201 | struct nvif_device *device = &nouveau_drm(dev)->device; |
202 | struct nv_fifo_info fifo_data; | 202 | struct nv_fifo_info fifo_data; |
203 | struct nv_sim_state sim_data; | 203 | struct nv_sim_state sim_data; |
204 | int MClk = nouveau_hw_get_clock(dev, PLL_MEMORY); | 204 | int MClk = nouveau_hw_get_clock(dev, PLL_MEMORY); |
205 | int NVClk = nouveau_hw_get_clock(dev, PLL_CORE); | 205 | int NVClk = nouveau_hw_get_clock(dev, PLL_CORE); |
206 | uint32_t cfg1 = nv_rd32(device, NV04_PFB_CFG1); | 206 | uint32_t cfg1 = nvif_rd32(device, NV04_PFB_CFG1); |
207 | 207 | ||
208 | sim_data.pclk_khz = VClk; | 208 | sim_data.pclk_khz = VClk; |
209 | sim_data.mclk_khz = MClk; | 209 | sim_data.mclk_khz = MClk; |
@@ -221,13 +221,13 @@ nv04_update_arb(struct drm_device *dev, int VClk, int bpp, | |||
221 | sim_data.mem_latency = 3; | 221 | sim_data.mem_latency = 3; |
222 | sim_data.mem_page_miss = 10; | 222 | sim_data.mem_page_miss = 10; |
223 | } else { | 223 | } else { |
224 | sim_data.memory_type = nv_rd32(device, NV04_PFB_CFG0) & 0x1; | 224 | sim_data.memory_type = nvif_rd32(device, NV04_PFB_CFG0) & 0x1; |
225 | sim_data.memory_width = (nv_rd32(device, NV_PEXTDEV_BOOT_0) & 0x10) ? 128 : 64; | 225 | sim_data.memory_width = (nvif_rd32(device, NV_PEXTDEV_BOOT_0) & 0x10) ? 128 : 64; |
226 | sim_data.mem_latency = cfg1 & 0xf; | 226 | sim_data.mem_latency = cfg1 & 0xf; |
227 | sim_data.mem_page_miss = ((cfg1 >> 4) & 0xf) + ((cfg1 >> 31) & 0x1); | 227 | sim_data.mem_page_miss = ((cfg1 >> 4) & 0xf) + ((cfg1 >> 31) & 0x1); |
228 | } | 228 | } |
229 | 229 | ||
230 | if (nv_device(drm->device)->card_type == NV_04) | 230 | if (drm->device.info.family == NV_DEVICE_INFO_V0_TNT) |
231 | nv04_calc_arb(&fifo_data, &sim_data); | 231 | nv04_calc_arb(&fifo_data, &sim_data); |
232 | else | 232 | else |
233 | nv10_calc_arb(&fifo_data, &sim_data); | 233 | nv10_calc_arb(&fifo_data, &sim_data); |
@@ -254,7 +254,7 @@ nouveau_calc_arb(struct drm_device *dev, int vclk, int bpp, int *burst, int *lwm | |||
254 | { | 254 | { |
255 | struct nouveau_drm *drm = nouveau_drm(dev); | 255 | struct nouveau_drm *drm = nouveau_drm(dev); |
256 | 256 | ||
257 | if (nv_device(drm->device)->card_type < NV_20) | 257 | if (drm->device.info.family < NV_DEVICE_INFO_V0_KELVIN) |
258 | nv04_update_arb(dev, vclk, bpp, burst, lwm); | 258 | nv04_update_arb(dev, vclk, bpp, burst, lwm); |
259 | else if ((dev->pdev->device & 0xfff0) == 0x0240 /*CHIPSET_C51*/ || | 259 | else if ((dev->pdev->device & 0xfff0) == 0x0240 /*CHIPSET_C51*/ || |
260 | (dev->pdev->device & 0xfff0) == 0x03d0 /*CHIPSET_C512*/) { | 260 | (dev->pdev->device & 0xfff0) == 0x03d0 /*CHIPSET_C512*/) { |
diff --git a/drivers/gpu/drm/nouveau/dispnv04/crtc.c b/drivers/gpu/drm/nouveau/dispnv04/crtc.c index 41be3424c906..b90aa5c1f90a 100644 --- a/drivers/gpu/drm/nouveau/dispnv04/crtc.c +++ b/drivers/gpu/drm/nouveau/dispnv04/crtc.c | |||
@@ -111,8 +111,8 @@ static void nv_crtc_calc_state_ext(struct drm_crtc *crtc, struct drm_display_mod | |||
111 | { | 111 | { |
112 | struct drm_device *dev = crtc->dev; | 112 | struct drm_device *dev = crtc->dev; |
113 | struct nouveau_drm *drm = nouveau_drm(dev); | 113 | struct nouveau_drm *drm = nouveau_drm(dev); |
114 | struct nouveau_bios *bios = nouveau_bios(drm->device); | 114 | struct nouveau_bios *bios = nvkm_bios(&drm->device); |
115 | struct nouveau_clock *clk = nouveau_clock(drm->device); | 115 | struct nouveau_clock *clk = nvkm_clock(&drm->device); |
116 | struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); | 116 | struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); |
117 | struct nv04_mode_state *state = &nv04_display(dev)->mode_reg; | 117 | struct nv04_mode_state *state = &nv04_display(dev)->mode_reg; |
118 | struct nv04_crtc_reg *regp = &state->crtc_reg[nv_crtc->index]; | 118 | struct nv04_crtc_reg *regp = &state->crtc_reg[nv_crtc->index]; |
@@ -136,7 +136,7 @@ static void nv_crtc_calc_state_ext(struct drm_crtc *crtc, struct drm_display_mod | |||
136 | * has yet been observed in allowing the use a single stage pll on all | 136 | * has yet been observed in allowing the use a single stage pll on all |
137 | * nv43 however. the behaviour of single stage use is untested on nv40 | 137 | * nv43 however. the behaviour of single stage use is untested on nv40 |
138 | */ | 138 | */ |
139 | if (nv_device(drm->device)->chipset > 0x40 && dot_clock <= (pll_lim.vco1.max_freq / 2)) | 139 | if (drm->device.info.chipset > 0x40 && dot_clock <= (pll_lim.vco1.max_freq / 2)) |
140 | memset(&pll_lim.vco2, 0, sizeof(pll_lim.vco2)); | 140 | memset(&pll_lim.vco2, 0, sizeof(pll_lim.vco2)); |
141 | 141 | ||
142 | 142 | ||
@@ -146,10 +146,10 @@ static void nv_crtc_calc_state_ext(struct drm_crtc *crtc, struct drm_display_mod | |||
146 | state->pllsel &= PLLSEL_VPLL1_MASK | PLLSEL_VPLL2_MASK | PLLSEL_TV_MASK; | 146 | state->pllsel &= PLLSEL_VPLL1_MASK | PLLSEL_VPLL2_MASK | PLLSEL_TV_MASK; |
147 | 147 | ||
148 | /* The blob uses this always, so let's do the same */ | 148 | /* The blob uses this always, so let's do the same */ |
149 | if (nv_device(drm->device)->card_type == NV_40) | 149 | if (drm->device.info.family == NV_DEVICE_INFO_V0_CURIE) |
150 | state->pllsel |= NV_PRAMDAC_PLL_COEFF_SELECT_USE_VPLL2_TRUE; | 150 | state->pllsel |= NV_PRAMDAC_PLL_COEFF_SELECT_USE_VPLL2_TRUE; |
151 | /* again nv40 and some nv43 act more like nv3x as described above */ | 151 | /* again nv40 and some nv43 act more like nv3x as described above */ |
152 | if (nv_device(drm->device)->chipset < 0x41) | 152 | if (drm->device.info.chipset < 0x41) |
153 | state->pllsel |= NV_PRAMDAC_PLL_COEFF_SELECT_SOURCE_PROG_MPLL | | 153 | state->pllsel |= NV_PRAMDAC_PLL_COEFF_SELECT_SOURCE_PROG_MPLL | |
154 | NV_PRAMDAC_PLL_COEFF_SELECT_SOURCE_PROG_NVPLL; | 154 | NV_PRAMDAC_PLL_COEFF_SELECT_SOURCE_PROG_NVPLL; |
155 | state->pllsel |= nv_crtc->index ? PLLSEL_VPLL2_MASK : PLLSEL_VPLL1_MASK; | 155 | state->pllsel |= nv_crtc->index ? PLLSEL_VPLL2_MASK : PLLSEL_VPLL1_MASK; |
@@ -275,7 +275,7 @@ nv_crtc_mode_set_vga(struct drm_crtc *crtc, struct drm_display_mode *mode) | |||
275 | horizEnd = horizTotal - 2; | 275 | horizEnd = horizTotal - 2; |
276 | horizBlankEnd = horizTotal + 4; | 276 | horizBlankEnd = horizTotal + 4; |
277 | #if 0 | 277 | #if 0 |
278 | if (dev->overlayAdaptor && nv_device(drm->device)->card_type >= NV_10) | 278 | if (dev->overlayAdaptor && drm->device.info.family >= NV_DEVICE_INFO_V0_CELSIUS) |
279 | /* This reportedly works around some video overlay bandwidth problems */ | 279 | /* This reportedly works around some video overlay bandwidth problems */ |
280 | horizTotal += 2; | 280 | horizTotal += 2; |
281 | #endif | 281 | #endif |
@@ -509,7 +509,7 @@ nv_crtc_mode_set_regs(struct drm_crtc *crtc, struct drm_display_mode * mode) | |||
509 | regp->cursor_cfg = NV_PCRTC_CURSOR_CONFIG_CUR_LINES_64 | | 509 | regp->cursor_cfg = NV_PCRTC_CURSOR_CONFIG_CUR_LINES_64 | |
510 | NV_PCRTC_CURSOR_CONFIG_CUR_PIXELS_64 | | 510 | NV_PCRTC_CURSOR_CONFIG_CUR_PIXELS_64 | |
511 | NV_PCRTC_CURSOR_CONFIG_ADDRESS_SPACE_PNVM; | 511 | NV_PCRTC_CURSOR_CONFIG_ADDRESS_SPACE_PNVM; |
512 | if (nv_device(drm->device)->chipset >= 0x11) | 512 | if (drm->device.info.chipset >= 0x11) |
513 | regp->cursor_cfg |= NV_PCRTC_CURSOR_CONFIG_CUR_BPP_32; | 513 | regp->cursor_cfg |= NV_PCRTC_CURSOR_CONFIG_CUR_BPP_32; |
514 | if (mode->flags & DRM_MODE_FLAG_DBLSCAN) | 514 | if (mode->flags & DRM_MODE_FLAG_DBLSCAN) |
515 | regp->cursor_cfg |= NV_PCRTC_CURSOR_CONFIG_DOUBLE_SCAN_ENABLE; | 515 | regp->cursor_cfg |= NV_PCRTC_CURSOR_CONFIG_DOUBLE_SCAN_ENABLE; |
@@ -550,26 +550,26 @@ nv_crtc_mode_set_regs(struct drm_crtc *crtc, struct drm_display_mode * mode) | |||
550 | * 1 << 30 on 0x60.830), for no apparent reason */ | 550 | * 1 << 30 on 0x60.830), for no apparent reason */ |
551 | regp->CRTC[NV_CIO_CRE_59] = off_chip_digital; | 551 | regp->CRTC[NV_CIO_CRE_59] = off_chip_digital; |
552 | 552 | ||
553 | if (nv_device(drm->device)->card_type >= NV_30) | 553 | if (drm->device.info.family >= NV_DEVICE_INFO_V0_RANKINE) |
554 | regp->CRTC[0x9f] = off_chip_digital ? 0x11 : 0x1; | 554 | regp->CRTC[0x9f] = off_chip_digital ? 0x11 : 0x1; |
555 | 555 | ||
556 | regp->crtc_830 = mode->crtc_vdisplay - 3; | 556 | regp->crtc_830 = mode->crtc_vdisplay - 3; |
557 | regp->crtc_834 = mode->crtc_vdisplay - 1; | 557 | regp->crtc_834 = mode->crtc_vdisplay - 1; |
558 | 558 | ||
559 | if (nv_device(drm->device)->card_type == NV_40) | 559 | if (drm->device.info.family == NV_DEVICE_INFO_V0_CURIE) |
560 | /* This is what the blob does */ | 560 | /* This is what the blob does */ |
561 | regp->crtc_850 = NVReadCRTC(dev, 0, NV_PCRTC_850); | 561 | regp->crtc_850 = NVReadCRTC(dev, 0, NV_PCRTC_850); |
562 | 562 | ||
563 | if (nv_device(drm->device)->card_type >= NV_30) | 563 | if (drm->device.info.family >= NV_DEVICE_INFO_V0_RANKINE) |
564 | regp->gpio_ext = NVReadCRTC(dev, 0, NV_PCRTC_GPIO_EXT); | 564 | regp->gpio_ext = NVReadCRTC(dev, 0, NV_PCRTC_GPIO_EXT); |
565 | 565 | ||
566 | if (nv_device(drm->device)->card_type >= NV_10) | 566 | if (drm->device.info.family >= NV_DEVICE_INFO_V0_CELSIUS) |
567 | regp->crtc_cfg = NV10_PCRTC_CONFIG_START_ADDRESS_HSYNC; | 567 | regp->crtc_cfg = NV10_PCRTC_CONFIG_START_ADDRESS_HSYNC; |
568 | else | 568 | else |
569 | regp->crtc_cfg = NV04_PCRTC_CONFIG_START_ADDRESS_HSYNC; | 569 | regp->crtc_cfg = NV04_PCRTC_CONFIG_START_ADDRESS_HSYNC; |
570 | 570 | ||
571 | /* Some misc regs */ | 571 | /* Some misc regs */ |
572 | if (nv_device(drm->device)->card_type == NV_40) { | 572 | if (drm->device.info.family == NV_DEVICE_INFO_V0_CURIE) { |
573 | regp->CRTC[NV_CIO_CRE_85] = 0xFF; | 573 | regp->CRTC[NV_CIO_CRE_85] = 0xFF; |
574 | regp->CRTC[NV_CIO_CRE_86] = 0x1; | 574 | regp->CRTC[NV_CIO_CRE_86] = 0x1; |
575 | } | 575 | } |
@@ -581,7 +581,7 @@ nv_crtc_mode_set_regs(struct drm_crtc *crtc, struct drm_display_mode * mode) | |||
581 | 581 | ||
582 | /* Generic PRAMDAC regs */ | 582 | /* Generic PRAMDAC regs */ |
583 | 583 | ||
584 | if (nv_device(drm->device)->card_type >= NV_10) | 584 | if (drm->device.info.family >= NV_DEVICE_INFO_V0_CELSIUS) |
585 | /* Only bit that bios and blob set. */ | 585 | /* Only bit that bios and blob set. */ |
586 | regp->nv10_cursync = (1 << 25); | 586 | regp->nv10_cursync = (1 << 25); |
587 | 587 | ||
@@ -590,7 +590,7 @@ nv_crtc_mode_set_regs(struct drm_crtc *crtc, struct drm_display_mode * mode) | |||
590 | NV_PRAMDAC_GENERAL_CONTROL_PIXMIX_ON; | 590 | NV_PRAMDAC_GENERAL_CONTROL_PIXMIX_ON; |
591 | if (crtc->primary->fb->depth == 16) | 591 | if (crtc->primary->fb->depth == 16) |
592 | regp->ramdac_gen_ctrl |= NV_PRAMDAC_GENERAL_CONTROL_ALT_MODE_SEL; | 592 | regp->ramdac_gen_ctrl |= NV_PRAMDAC_GENERAL_CONTROL_ALT_MODE_SEL; |
593 | if (nv_device(drm->device)->chipset >= 0x11) | 593 | if (drm->device.info.chipset >= 0x11) |
594 | regp->ramdac_gen_ctrl |= NV_PRAMDAC_GENERAL_CONTROL_PIPE_LONG; | 594 | regp->ramdac_gen_ctrl |= NV_PRAMDAC_GENERAL_CONTROL_PIPE_LONG; |
595 | 595 | ||
596 | regp->ramdac_630 = 0; /* turn off green mode (tv test pattern?) */ | 596 | regp->ramdac_630 = 0; /* turn off green mode (tv test pattern?) */ |
@@ -653,7 +653,7 @@ nv_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode, | |||
653 | 653 | ||
654 | nv_crtc_mode_set_vga(crtc, adjusted_mode); | 654 | nv_crtc_mode_set_vga(crtc, adjusted_mode); |
655 | /* calculated in nv04_dfp_prepare, nv40 needs it written before calculating PLLs */ | 655 | /* calculated in nv04_dfp_prepare, nv40 needs it written before calculating PLLs */ |
656 | if (nv_device(drm->device)->card_type == NV_40) | 656 | if (drm->device.info.family == NV_DEVICE_INFO_V0_CURIE) |
657 | NVWriteRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK, nv04_display(dev)->mode_reg.sel_clk); | 657 | NVWriteRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK, nv04_display(dev)->mode_reg.sel_clk); |
658 | nv_crtc_mode_set_regs(crtc, adjusted_mode); | 658 | nv_crtc_mode_set_regs(crtc, adjusted_mode); |
659 | nv_crtc_calc_state_ext(crtc, mode, adjusted_mode->clock); | 659 | nv_crtc_calc_state_ext(crtc, mode, adjusted_mode->clock); |
@@ -714,7 +714,7 @@ static void nv_crtc_prepare(struct drm_crtc *crtc) | |||
714 | 714 | ||
715 | /* Some more preparation. */ | 715 | /* Some more preparation. */ |
716 | NVWriteCRTC(dev, nv_crtc->index, NV_PCRTC_CONFIG, NV_PCRTC_CONFIG_START_ADDRESS_NON_VGA); | 716 | NVWriteCRTC(dev, nv_crtc->index, NV_PCRTC_CONFIG, NV_PCRTC_CONFIG_START_ADDRESS_NON_VGA); |
717 | if (nv_device(drm->device)->card_type == NV_40) { | 717 | if (drm->device.info.family == NV_DEVICE_INFO_V0_CURIE) { |
718 | uint32_t reg900 = NVReadRAMDAC(dev, nv_crtc->index, NV_PRAMDAC_900); | 718 | uint32_t reg900 = NVReadRAMDAC(dev, nv_crtc->index, NV_PRAMDAC_900); |
719 | NVWriteRAMDAC(dev, nv_crtc->index, NV_PRAMDAC_900, reg900 & ~0x10000); | 719 | NVWriteRAMDAC(dev, nv_crtc->index, NV_PRAMDAC_900, reg900 & ~0x10000); |
720 | } | 720 | } |
@@ -888,7 +888,7 @@ nv04_crtc_do_mode_set_base(struct drm_crtc *crtc, | |||
888 | crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_FF_INDEX); | 888 | crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_FF_INDEX); |
889 | crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_FFLWM__INDEX); | 889 | crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_FFLWM__INDEX); |
890 | 890 | ||
891 | if (nv_device(drm->device)->card_type >= NV_20) { | 891 | if (drm->device.info.family >= NV_DEVICE_INFO_V0_KELVIN) { |
892 | regp->CRTC[NV_CIO_CRE_47] = arb_lwm >> 8; | 892 | regp->CRTC[NV_CIO_CRE_47] = arb_lwm >> 8; |
893 | crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_47); | 893 | crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_47); |
894 | } | 894 | } |
@@ -915,9 +915,9 @@ nv04_crtc_mode_set_base_atomic(struct drm_crtc *crtc, | |||
915 | struct drm_device *dev = drm->dev; | 915 | struct drm_device *dev = drm->dev; |
916 | 916 | ||
917 | if (state == ENTER_ATOMIC_MODE_SET) | 917 | if (state == ENTER_ATOMIC_MODE_SET) |
918 | nouveau_fbcon_save_disable_accel(dev); | 918 | nouveau_fbcon_accel_save_disable(dev); |
919 | else | 919 | else |
920 | nouveau_fbcon_restore_accel(dev); | 920 | nouveau_fbcon_accel_restore(dev); |
921 | 921 | ||
922 | return nv04_crtc_do_mode_set_base(crtc, fb, x, y, true); | 922 | return nv04_crtc_do_mode_set_base(crtc, fb, x, y, true); |
923 | } | 923 | } |
@@ -969,7 +969,7 @@ static void nv11_cursor_upload(struct drm_device *dev, struct nouveau_bo *src, | |||
969 | { | 969 | { |
970 | struct nouveau_drm *drm = nouveau_drm(dev); | 970 | struct nouveau_drm *drm = nouveau_drm(dev); |
971 | 971 | ||
972 | if (nv_device(drm->device)->chipset == 0x11) { | 972 | if (drm->device.info.chipset == 0x11) { |
973 | pixel = ((pixel & 0x000000ff) << 24) | | 973 | pixel = ((pixel & 0x000000ff) << 24) | |
974 | ((pixel & 0x0000ff00) << 8) | | 974 | ((pixel & 0x0000ff00) << 8) | |
975 | ((pixel & 0x00ff0000) >> 8) | | 975 | ((pixel & 0x00ff0000) >> 8) | |
@@ -1010,7 +1010,7 @@ nv04_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv, | |||
1010 | if (ret) | 1010 | if (ret) |
1011 | goto out; | 1011 | goto out; |
1012 | 1012 | ||
1013 | if (nv_device(drm->device)->chipset >= 0x11) | 1013 | if (drm->device.info.chipset >= 0x11) |
1014 | nv11_cursor_upload(dev, cursor, nv_crtc->cursor.nvbo); | 1014 | nv11_cursor_upload(dev, cursor, nv_crtc->cursor.nvbo); |
1015 | else | 1015 | else |
1016 | nv04_cursor_upload(dev, cursor, nv_crtc->cursor.nvbo); | 1016 | nv04_cursor_upload(dev, cursor, nv_crtc->cursor.nvbo); |
diff --git a/drivers/gpu/drm/nouveau/dispnv04/cursor.c b/drivers/gpu/drm/nouveau/dispnv04/cursor.c index a810303169de..4e61173c3353 100644 --- a/drivers/gpu/drm/nouveau/dispnv04/cursor.c +++ b/drivers/gpu/drm/nouveau/dispnv04/cursor.c | |||
@@ -55,7 +55,7 @@ nv04_cursor_set_offset(struct nouveau_crtc *nv_crtc, uint32_t offset) | |||
55 | crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_HCUR_ADDR0_INDEX); | 55 | crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_HCUR_ADDR0_INDEX); |
56 | crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_HCUR_ADDR1_INDEX); | 56 | crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_HCUR_ADDR1_INDEX); |
57 | crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_HCUR_ADDR2_INDEX); | 57 | crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_HCUR_ADDR2_INDEX); |
58 | if (nv_device(drm->device)->card_type == NV_40) | 58 | if (drm->device.info.family == NV_DEVICE_INFO_V0_CURIE) |
59 | nv_fix_nv40_hw_cursor(dev, nv_crtc->index); | 59 | nv_fix_nv40_hw_cursor(dev, nv_crtc->index); |
60 | } | 60 | } |
61 | 61 | ||
diff --git a/drivers/gpu/drm/nouveau/dispnv04/dac.c b/drivers/gpu/drm/nouveau/dispnv04/dac.c index a96dda48718e..2d8056cde996 100644 --- a/drivers/gpu/drm/nouveau/dispnv04/dac.c +++ b/drivers/gpu/drm/nouveau/dispnv04/dac.c | |||
@@ -65,8 +65,8 @@ int nv04_dac_output_offset(struct drm_encoder *encoder) | |||
65 | 65 | ||
66 | static int sample_load_twice(struct drm_device *dev, bool sense[2]) | 66 | static int sample_load_twice(struct drm_device *dev, bool sense[2]) |
67 | { | 67 | { |
68 | struct nouveau_device *device = nouveau_dev(dev); | 68 | struct nvif_device *device = &nouveau_drm(dev)->device; |
69 | struct nouveau_timer *ptimer = nouveau_timer(device); | 69 | struct nouveau_timer *ptimer = nvkm_timer(device); |
70 | int i; | 70 | int i; |
71 | 71 | ||
72 | for (i = 0; i < 2; i++) { | 72 | for (i = 0; i < 2; i++) { |
@@ -95,15 +95,15 @@ static int sample_load_twice(struct drm_device *dev, bool sense[2]) | |||
95 | 95 | ||
96 | udelay(100); | 96 | udelay(100); |
97 | /* when level triggers, sense is _LO_ */ | 97 | /* when level triggers, sense is _LO_ */ |
98 | sense_a = nv_rd08(device, NV_PRMCIO_INP0) & 0x10; | 98 | sense_a = nvif_rd08(device, NV_PRMCIO_INP0) & 0x10; |
99 | 99 | ||
100 | /* take another reading until it agrees with sense_a... */ | 100 | /* take another reading until it agrees with sense_a... */ |
101 | do { | 101 | do { |
102 | udelay(100); | 102 | udelay(100); |
103 | sense_b = nv_rd08(device, NV_PRMCIO_INP0) & 0x10; | 103 | sense_b = nvif_rd08(device, NV_PRMCIO_INP0) & 0x10; |
104 | if (sense_a != sense_b) { | 104 | if (sense_a != sense_b) { |
105 | sense_b_prime = | 105 | sense_b_prime = |
106 | nv_rd08(device, NV_PRMCIO_INP0) & 0x10; | 106 | nvif_rd08(device, NV_PRMCIO_INP0) & 0x10; |
107 | if (sense_b == sense_b_prime) { | 107 | if (sense_b == sense_b_prime) { |
108 | /* ... unless two consecutive subsequent | 108 | /* ... unless two consecutive subsequent |
109 | * samples agree; sense_a is replaced */ | 109 | * samples agree; sense_a is replaced */ |
@@ -128,7 +128,7 @@ static enum drm_connector_status nv04_dac_detect(struct drm_encoder *encoder, | |||
128 | struct drm_connector *connector) | 128 | struct drm_connector *connector) |
129 | { | 129 | { |
130 | struct drm_device *dev = encoder->dev; | 130 | struct drm_device *dev = encoder->dev; |
131 | struct nouveau_device *device = nouveau_dev(dev); | 131 | struct nvif_device *device = &nouveau_drm(dev)->device; |
132 | struct nouveau_drm *drm = nouveau_drm(dev); | 132 | struct nouveau_drm *drm = nouveau_drm(dev); |
133 | uint8_t saved_seq1, saved_pi, saved_rpc1, saved_cr_mode; | 133 | uint8_t saved_seq1, saved_pi, saved_rpc1, saved_cr_mode; |
134 | uint8_t saved_palette0[3], saved_palette_mask; | 134 | uint8_t saved_palette0[3], saved_palette_mask; |
@@ -164,11 +164,11 @@ static enum drm_connector_status nv04_dac_detect(struct drm_encoder *encoder, | |||
164 | saved_rpc1 = NVReadVgaCrtc(dev, 0, NV_CIO_CRE_RPC1_INDEX); | 164 | saved_rpc1 = NVReadVgaCrtc(dev, 0, NV_CIO_CRE_RPC1_INDEX); |
165 | NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_RPC1_INDEX, saved_rpc1 & ~0xc0); | 165 | NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_RPC1_INDEX, saved_rpc1 & ~0xc0); |
166 | 166 | ||
167 | nv_wr08(device, NV_PRMDIO_READ_MODE_ADDRESS, 0x0); | 167 | nvif_wr08(device, NV_PRMDIO_READ_MODE_ADDRESS, 0x0); |
168 | for (i = 0; i < 3; i++) | 168 | for (i = 0; i < 3; i++) |
169 | saved_palette0[i] = nv_rd08(device, NV_PRMDIO_PALETTE_DATA); | 169 | saved_palette0[i] = nvif_rd08(device, NV_PRMDIO_PALETTE_DATA); |
170 | saved_palette_mask = nv_rd08(device, NV_PRMDIO_PIXEL_MASK); | 170 | saved_palette_mask = nvif_rd08(device, NV_PRMDIO_PIXEL_MASK); |
171 | nv_wr08(device, NV_PRMDIO_PIXEL_MASK, 0); | 171 | nvif_wr08(device, NV_PRMDIO_PIXEL_MASK, 0); |
172 | 172 | ||
173 | saved_rgen_ctrl = NVReadRAMDAC(dev, 0, NV_PRAMDAC_GENERAL_CONTROL); | 173 | saved_rgen_ctrl = NVReadRAMDAC(dev, 0, NV_PRAMDAC_GENERAL_CONTROL); |
174 | NVWriteRAMDAC(dev, 0, NV_PRAMDAC_GENERAL_CONTROL, | 174 | NVWriteRAMDAC(dev, 0, NV_PRAMDAC_GENERAL_CONTROL, |
@@ -181,11 +181,11 @@ static enum drm_connector_status nv04_dac_detect(struct drm_encoder *encoder, | |||
181 | do { | 181 | do { |
182 | bool sense_pair[2]; | 182 | bool sense_pair[2]; |
183 | 183 | ||
184 | nv_wr08(device, NV_PRMDIO_WRITE_MODE_ADDRESS, 0); | 184 | nvif_wr08(device, NV_PRMDIO_WRITE_MODE_ADDRESS, 0); |
185 | nv_wr08(device, NV_PRMDIO_PALETTE_DATA, 0); | 185 | nvif_wr08(device, NV_PRMDIO_PALETTE_DATA, 0); |
186 | nv_wr08(device, NV_PRMDIO_PALETTE_DATA, 0); | 186 | nvif_wr08(device, NV_PRMDIO_PALETTE_DATA, 0); |
187 | /* testing blue won't find monochrome monitors. I don't care */ | 187 | /* testing blue won't find monochrome monitors. I don't care */ |
188 | nv_wr08(device, NV_PRMDIO_PALETTE_DATA, blue); | 188 | nvif_wr08(device, NV_PRMDIO_PALETTE_DATA, blue); |
189 | 189 | ||
190 | i = 0; | 190 | i = 0; |
191 | /* take sample pairs until both samples in the pair agree */ | 191 | /* take sample pairs until both samples in the pair agree */ |
@@ -208,11 +208,11 @@ static enum drm_connector_status nv04_dac_detect(struct drm_encoder *encoder, | |||
208 | } while (++blue < 0x18 && sense); | 208 | } while (++blue < 0x18 && sense); |
209 | 209 | ||
210 | out: | 210 | out: |
211 | nv_wr08(device, NV_PRMDIO_PIXEL_MASK, saved_palette_mask); | 211 | nvif_wr08(device, NV_PRMDIO_PIXEL_MASK, saved_palette_mask); |
212 | NVWriteRAMDAC(dev, 0, NV_PRAMDAC_GENERAL_CONTROL, saved_rgen_ctrl); | 212 | NVWriteRAMDAC(dev, 0, NV_PRAMDAC_GENERAL_CONTROL, saved_rgen_ctrl); |
213 | nv_wr08(device, NV_PRMDIO_WRITE_MODE_ADDRESS, 0); | 213 | nvif_wr08(device, NV_PRMDIO_WRITE_MODE_ADDRESS, 0); |
214 | for (i = 0; i < 3; i++) | 214 | for (i = 0; i < 3; i++) |
215 | nv_wr08(device, NV_PRMDIO_PALETTE_DATA, saved_palette0[i]); | 215 | nvif_wr08(device, NV_PRMDIO_PALETTE_DATA, saved_palette0[i]); |
216 | NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL, saved_rtest_ctrl); | 216 | NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL, saved_rtest_ctrl); |
217 | NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_PIXEL_INDEX, saved_pi); | 217 | NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_PIXEL_INDEX, saved_pi); |
218 | NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_RPC1_INDEX, saved_rpc1); | 218 | NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_RPC1_INDEX, saved_rpc1); |
@@ -231,8 +231,8 @@ uint32_t nv17_dac_sample_load(struct drm_encoder *encoder) | |||
231 | { | 231 | { |
232 | struct drm_device *dev = encoder->dev; | 232 | struct drm_device *dev = encoder->dev; |
233 | struct nouveau_drm *drm = nouveau_drm(dev); | 233 | struct nouveau_drm *drm = nouveau_drm(dev); |
234 | struct nouveau_device *device = nouveau_dev(dev); | 234 | struct nvif_device *device = &nouveau_drm(dev)->device; |
235 | struct nouveau_gpio *gpio = nouveau_gpio(device); | 235 | struct nouveau_gpio *gpio = nvkm_gpio(device); |
236 | struct dcb_output *dcb = nouveau_encoder(encoder)->dcb; | 236 | struct dcb_output *dcb = nouveau_encoder(encoder)->dcb; |
237 | uint32_t sample, testval, regoffset = nv04_dac_output_offset(encoder); | 237 | uint32_t sample, testval, regoffset = nv04_dac_output_offset(encoder); |
238 | uint32_t saved_powerctrl_2 = 0, saved_powerctrl_4 = 0, saved_routput, | 238 | uint32_t saved_powerctrl_2 = 0, saved_powerctrl_4 = 0, saved_routput, |
@@ -256,12 +256,12 @@ uint32_t nv17_dac_sample_load(struct drm_encoder *encoder) | |||
256 | NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + regoffset, | 256 | NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + regoffset, |
257 | saved_rtest_ctrl & ~NV_PRAMDAC_TEST_CONTROL_PWRDWN_DAC_OFF); | 257 | saved_rtest_ctrl & ~NV_PRAMDAC_TEST_CONTROL_PWRDWN_DAC_OFF); |
258 | 258 | ||
259 | saved_powerctrl_2 = nv_rd32(device, NV_PBUS_POWERCTRL_2); | 259 | saved_powerctrl_2 = nvif_rd32(device, NV_PBUS_POWERCTRL_2); |
260 | 260 | ||
261 | nv_wr32(device, NV_PBUS_POWERCTRL_2, saved_powerctrl_2 & 0xd7ffffff); | 261 | nvif_wr32(device, NV_PBUS_POWERCTRL_2, saved_powerctrl_2 & 0xd7ffffff); |
262 | if (regoffset == 0x68) { | 262 | if (regoffset == 0x68) { |
263 | saved_powerctrl_4 = nv_rd32(device, NV_PBUS_POWERCTRL_4); | 263 | saved_powerctrl_4 = nvif_rd32(device, NV_PBUS_POWERCTRL_4); |
264 | nv_wr32(device, NV_PBUS_POWERCTRL_4, saved_powerctrl_4 & 0xffffffcf); | 264 | nvif_wr32(device, NV_PBUS_POWERCTRL_4, saved_powerctrl_4 & 0xffffffcf); |
265 | } | 265 | } |
266 | 266 | ||
267 | if (gpio) { | 267 | if (gpio) { |
@@ -283,7 +283,7 @@ uint32_t nv17_dac_sample_load(struct drm_encoder *encoder) | |||
283 | /* nv driver and nv31 use 0xfffffeee, nv34 and 6600 use 0xfffffece */ | 283 | /* nv driver and nv31 use 0xfffffeee, nv34 and 6600 use 0xfffffece */ |
284 | routput = (saved_routput & 0xfffffece) | head << 8; | 284 | routput = (saved_routput & 0xfffffece) | head << 8; |
285 | 285 | ||
286 | if (nv_device(drm->device)->card_type >= NV_40) { | 286 | if (drm->device.info.family >= NV_DEVICE_INFO_V0_CURIE) { |
287 | if (dcb->type == DCB_OUTPUT_TV) | 287 | if (dcb->type == DCB_OUTPUT_TV) |
288 | routput |= 0x1a << 16; | 288 | routput |= 0x1a << 16; |
289 | else | 289 | else |
@@ -316,8 +316,8 @@ uint32_t nv17_dac_sample_load(struct drm_encoder *encoder) | |||
316 | NVWriteRAMDAC(dev, 0, NV_PRAMDAC_DACCLK + regoffset, saved_routput); | 316 | NVWriteRAMDAC(dev, 0, NV_PRAMDAC_DACCLK + regoffset, saved_routput); |
317 | NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + regoffset, saved_rtest_ctrl); | 317 | NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + regoffset, saved_rtest_ctrl); |
318 | if (regoffset == 0x68) | 318 | if (regoffset == 0x68) |
319 | nv_wr32(device, NV_PBUS_POWERCTRL_4, saved_powerctrl_4); | 319 | nvif_wr32(device, NV_PBUS_POWERCTRL_4, saved_powerctrl_4); |
320 | nv_wr32(device, NV_PBUS_POWERCTRL_2, saved_powerctrl_2); | 320 | nvif_wr32(device, NV_PBUS_POWERCTRL_2, saved_powerctrl_2); |
321 | 321 | ||
322 | if (gpio) { | 322 | if (gpio) { |
323 | gpio->set(gpio, 0, DCB_GPIO_TVDAC1, 0xff, saved_gpio1); | 323 | gpio->set(gpio, 0, DCB_GPIO_TVDAC1, 0xff, saved_gpio1); |
@@ -398,7 +398,7 @@ static void nv04_dac_mode_set(struct drm_encoder *encoder, | |||
398 | } | 398 | } |
399 | 399 | ||
400 | /* This could use refinement for flatpanels, but it should work this way */ | 400 | /* This could use refinement for flatpanels, but it should work this way */ |
401 | if (nv_device(drm->device)->chipset < 0x44) | 401 | if (drm->device.info.chipset < 0x44) |
402 | NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + nv04_dac_output_offset(encoder), 0xf0000000); | 402 | NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + nv04_dac_output_offset(encoder), 0xf0000000); |
403 | else | 403 | else |
404 | NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + nv04_dac_output_offset(encoder), 0x00100000); | 404 | NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + nv04_dac_output_offset(encoder), 0x00100000); |
diff --git a/drivers/gpu/drm/nouveau/dispnv04/dfp.c b/drivers/gpu/drm/nouveau/dispnv04/dfp.c index e57babb206d3..42a5435259f7 100644 --- a/drivers/gpu/drm/nouveau/dispnv04/dfp.c +++ b/drivers/gpu/drm/nouveau/dispnv04/dfp.c | |||
@@ -281,7 +281,7 @@ static void nv04_dfp_mode_set(struct drm_encoder *encoder, | |||
281 | struct drm_display_mode *adjusted_mode) | 281 | struct drm_display_mode *adjusted_mode) |
282 | { | 282 | { |
283 | struct drm_device *dev = encoder->dev; | 283 | struct drm_device *dev = encoder->dev; |
284 | struct nouveau_device *device = nouveau_dev(dev); | 284 | struct nvif_device *device = &nouveau_drm(dev)->device; |
285 | struct nouveau_drm *drm = nouveau_drm(dev); | 285 | struct nouveau_drm *drm = nouveau_drm(dev); |
286 | struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc); | 286 | struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc); |
287 | struct nv04_crtc_reg *regp = &nv04_display(dev)->mode_reg.crtc_reg[nv_crtc->index]; | 287 | struct nv04_crtc_reg *regp = &nv04_display(dev)->mode_reg.crtc_reg[nv_crtc->index]; |
@@ -335,7 +335,7 @@ static void nv04_dfp_mode_set(struct drm_encoder *encoder, | |||
335 | regp->fp_control |= NV_PRAMDAC_FP_TG_CONTROL_MODE_NATIVE; | 335 | regp->fp_control |= NV_PRAMDAC_FP_TG_CONTROL_MODE_NATIVE; |
336 | else /* gpu needs to scale */ | 336 | else /* gpu needs to scale */ |
337 | regp->fp_control |= NV_PRAMDAC_FP_TG_CONTROL_MODE_SCALE; | 337 | regp->fp_control |= NV_PRAMDAC_FP_TG_CONTROL_MODE_SCALE; |
338 | if (nv_rd32(device, NV_PEXTDEV_BOOT_0) & NV_PEXTDEV_BOOT_0_STRAP_FP_IFACE_12BIT) | 338 | if (nvif_rd32(device, NV_PEXTDEV_BOOT_0) & NV_PEXTDEV_BOOT_0_STRAP_FP_IFACE_12BIT) |
339 | regp->fp_control |= NV_PRAMDAC_FP_TG_CONTROL_WIDTH_12; | 339 | regp->fp_control |= NV_PRAMDAC_FP_TG_CONTROL_WIDTH_12; |
340 | if (nv_encoder->dcb->location != DCB_LOC_ON_CHIP && | 340 | if (nv_encoder->dcb->location != DCB_LOC_ON_CHIP && |
341 | output_mode->clock > 165000) | 341 | output_mode->clock > 165000) |
@@ -416,7 +416,7 @@ static void nv04_dfp_mode_set(struct drm_encoder *encoder, | |||
416 | if ((nv_connector->dithering_mode == DITHERING_MODE_ON) || | 416 | if ((nv_connector->dithering_mode == DITHERING_MODE_ON) || |
417 | (nv_connector->dithering_mode == DITHERING_MODE_AUTO && | 417 | (nv_connector->dithering_mode == DITHERING_MODE_AUTO && |
418 | encoder->crtc->primary->fb->depth > connector->display_info.bpc * 3)) { | 418 | encoder->crtc->primary->fb->depth > connector->display_info.bpc * 3)) { |
419 | if (nv_device(drm->device)->chipset == 0x11) | 419 | if (drm->device.info.chipset == 0x11) |
420 | regp->dither = savep->dither | 0x00010000; | 420 | regp->dither = savep->dither | 0x00010000; |
421 | else { | 421 | else { |
422 | int i; | 422 | int i; |
@@ -427,7 +427,7 @@ static void nv04_dfp_mode_set(struct drm_encoder *encoder, | |||
427 | } | 427 | } |
428 | } | 428 | } |
429 | } else { | 429 | } else { |
430 | if (nv_device(drm->device)->chipset != 0x11) { | 430 | if (drm->device.info.chipset != 0x11) { |
431 | /* reset them */ | 431 | /* reset them */ |
432 | int i; | 432 | int i; |
433 | for (i = 0; i < 3; i++) { | 433 | for (i = 0; i < 3; i++) { |
@@ -463,7 +463,7 @@ static void nv04_dfp_commit(struct drm_encoder *encoder) | |||
463 | NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_TG_CONTROL); | 463 | NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_TG_CONTROL); |
464 | 464 | ||
465 | /* This could use refinement for flatpanels, but it should work this way */ | 465 | /* This could use refinement for flatpanels, but it should work this way */ |
466 | if (nv_device(drm->device)->chipset < 0x44) | 466 | if (drm->device.info.chipset < 0x44) |
467 | NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + nv04_dac_output_offset(encoder), 0xf0000000); | 467 | NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + nv04_dac_output_offset(encoder), 0xf0000000); |
468 | else | 468 | else |
469 | NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + nv04_dac_output_offset(encoder), 0x00100000); | 469 | NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + nv04_dac_output_offset(encoder), 0x00100000); |
@@ -485,7 +485,7 @@ static void nv04_dfp_update_backlight(struct drm_encoder *encoder, int mode) | |||
485 | { | 485 | { |
486 | #ifdef __powerpc__ | 486 | #ifdef __powerpc__ |
487 | struct drm_device *dev = encoder->dev; | 487 | struct drm_device *dev = encoder->dev; |
488 | struct nouveau_device *device = nouveau_dev(dev); | 488 | struct nvif_device *device = &nouveau_drm(dev)->device; |
489 | 489 | ||
490 | /* BIOS scripts usually take care of the backlight, thanks | 490 | /* BIOS scripts usually take care of the backlight, thanks |
491 | * Apple for your consistency. | 491 | * Apple for your consistency. |
@@ -623,7 +623,7 @@ static void nv04_tmds_slave_init(struct drm_encoder *encoder) | |||
623 | struct drm_device *dev = encoder->dev; | 623 | struct drm_device *dev = encoder->dev; |
624 | struct dcb_output *dcb = nouveau_encoder(encoder)->dcb; | 624 | struct dcb_output *dcb = nouveau_encoder(encoder)->dcb; |
625 | struct nouveau_drm *drm = nouveau_drm(dev); | 625 | struct nouveau_drm *drm = nouveau_drm(dev); |
626 | struct nouveau_i2c *i2c = nouveau_i2c(drm->device); | 626 | struct nouveau_i2c *i2c = nvkm_i2c(&drm->device); |
627 | struct nouveau_i2c_port *port = i2c->find(i2c, 2); | 627 | struct nouveau_i2c_port *port = i2c->find(i2c, 2); |
628 | struct nouveau_i2c_board_info info[] = { | 628 | struct nouveau_i2c_board_info info[] = { |
629 | { | 629 | { |
diff --git a/drivers/gpu/drm/nouveau/dispnv04/disp.c b/drivers/gpu/drm/nouveau/dispnv04/disp.c index 4342fdaee707..3d0afa1c6cff 100644 --- a/drivers/gpu/drm/nouveau/dispnv04/disp.c +++ b/drivers/gpu/drm/nouveau/dispnv04/disp.c | |||
@@ -22,9 +22,6 @@ | |||
22 | * Author: Ben Skeggs | 22 | * Author: Ben Skeggs |
23 | */ | 23 | */ |
24 | 24 | ||
25 | #include <core/object.h> | ||
26 | #include <core/class.h> | ||
27 | |||
28 | #include <drm/drmP.h> | 25 | #include <drm/drmP.h> |
29 | #include <drm/drm_crtc_helper.h> | 26 | #include <drm/drm_crtc_helper.h> |
30 | 27 | ||
@@ -34,8 +31,6 @@ | |||
34 | #include "nouveau_encoder.h" | 31 | #include "nouveau_encoder.h" |
35 | #include "nouveau_connector.h" | 32 | #include "nouveau_connector.h" |
36 | 33 | ||
37 | #include <subdev/i2c.h> | ||
38 | |||
39 | int | 34 | int |
40 | nv04_display_early_init(struct drm_device *dev) | 35 | nv04_display_early_init(struct drm_device *dev) |
41 | { | 36 | { |
@@ -58,7 +53,7 @@ int | |||
58 | nv04_display_create(struct drm_device *dev) | 53 | nv04_display_create(struct drm_device *dev) |
59 | { | 54 | { |
60 | struct nouveau_drm *drm = nouveau_drm(dev); | 55 | struct nouveau_drm *drm = nouveau_drm(dev); |
61 | struct nouveau_i2c *i2c = nouveau_i2c(drm->device); | 56 | struct nouveau_i2c *i2c = nvkm_i2c(&drm->device); |
62 | struct dcb_table *dcb = &drm->vbios.dcb; | 57 | struct dcb_table *dcb = &drm->vbios.dcb; |
63 | struct drm_connector *connector, *ct; | 58 | struct drm_connector *connector, *ct; |
64 | struct drm_encoder *encoder; | 59 | struct drm_encoder *encoder; |
@@ -70,6 +65,8 @@ nv04_display_create(struct drm_device *dev) | |||
70 | if (!disp) | 65 | if (!disp) |
71 | return -ENOMEM; | 66 | return -ENOMEM; |
72 | 67 | ||
68 | nvif_object_map(nvif_object(&drm->device)); | ||
69 | |||
73 | nouveau_display(dev)->priv = disp; | 70 | nouveau_display(dev)->priv = disp; |
74 | nouveau_display(dev)->dtor = nv04_display_destroy; | 71 | nouveau_display(dev)->dtor = nv04_display_destroy; |
75 | nouveau_display(dev)->init = nv04_display_init; | 72 | nouveau_display(dev)->init = nv04_display_init; |
@@ -144,6 +141,7 @@ void | |||
144 | nv04_display_destroy(struct drm_device *dev) | 141 | nv04_display_destroy(struct drm_device *dev) |
145 | { | 142 | { |
146 | struct nv04_display *disp = nv04_display(dev); | 143 | struct nv04_display *disp = nv04_display(dev); |
144 | struct nouveau_drm *drm = nouveau_drm(dev); | ||
147 | struct drm_encoder *encoder; | 145 | struct drm_encoder *encoder; |
148 | struct drm_crtc *crtc; | 146 | struct drm_crtc *crtc; |
149 | 147 | ||
@@ -170,6 +168,8 @@ nv04_display_destroy(struct drm_device *dev) | |||
170 | 168 | ||
171 | nouveau_display(dev)->priv = NULL; | 169 | nouveau_display(dev)->priv = NULL; |
172 | kfree(disp); | 170 | kfree(disp); |
171 | |||
172 | nvif_object_unmap(nvif_object(&drm->device)); | ||
173 | } | 173 | } |
174 | 174 | ||
175 | int | 175 | int |
diff --git a/drivers/gpu/drm/nouveau/dispnv04/disp.h b/drivers/gpu/drm/nouveau/dispnv04/disp.h index 4245fc3dab70..17b899d9aba3 100644 --- a/drivers/gpu/drm/nouveau/dispnv04/disp.h +++ b/drivers/gpu/drm/nouveau/dispnv04/disp.h | |||
@@ -131,7 +131,7 @@ nv_two_heads(struct drm_device *dev) | |||
131 | struct nouveau_drm *drm = nouveau_drm(dev); | 131 | struct nouveau_drm *drm = nouveau_drm(dev); |
132 | const int impl = dev->pdev->device & 0x0ff0; | 132 | const int impl = dev->pdev->device & 0x0ff0; |
133 | 133 | ||
134 | if (nv_device(drm->device)->card_type >= NV_10 && impl != 0x0100 && | 134 | if (drm->device.info.family >= NV_DEVICE_INFO_V0_CELSIUS && impl != 0x0100 && |
135 | impl != 0x0150 && impl != 0x01a0 && impl != 0x0200) | 135 | impl != 0x0150 && impl != 0x01a0 && impl != 0x0200) |
136 | return true; | 136 | return true; |
137 | 137 | ||
@@ -150,7 +150,7 @@ nv_two_reg_pll(struct drm_device *dev) | |||
150 | struct nouveau_drm *drm = nouveau_drm(dev); | 150 | struct nouveau_drm *drm = nouveau_drm(dev); |
151 | const int impl = dev->pdev->device & 0x0ff0; | 151 | const int impl = dev->pdev->device & 0x0ff0; |
152 | 152 | ||
153 | if (impl == 0x0310 || impl == 0x0340 || nv_device(drm->device)->card_type >= NV_40) | 153 | if (impl == 0x0310 || impl == 0x0340 || drm->device.info.family >= NV_DEVICE_INFO_V0_CURIE) |
154 | return true; | 154 | return true; |
155 | return false; | 155 | return false; |
156 | } | 156 | } |
@@ -171,8 +171,8 @@ static inline void | |||
171 | nouveau_bios_run_init_table(struct drm_device *dev, u16 table, | 171 | nouveau_bios_run_init_table(struct drm_device *dev, u16 table, |
172 | struct dcb_output *outp, int crtc) | 172 | struct dcb_output *outp, int crtc) |
173 | { | 173 | { |
174 | struct nouveau_device *device = nouveau_dev(dev); | 174 | struct nouveau_drm *drm = nouveau_drm(dev); |
175 | struct nouveau_bios *bios = nouveau_bios(device); | 175 | struct nouveau_bios *bios = nvkm_bios(&drm->device); |
176 | struct nvbios_init init = { | 176 | struct nvbios_init init = { |
177 | .subdev = nv_subdev(bios), | 177 | .subdev = nv_subdev(bios), |
178 | .bios = bios, | 178 | .bios = bios, |
diff --git a/drivers/gpu/drm/nouveau/dispnv04/hw.c b/drivers/gpu/drm/nouveau/dispnv04/hw.c index aca76af115b3..3d4c19300768 100644 --- a/drivers/gpu/drm/nouveau/dispnv04/hw.c +++ b/drivers/gpu/drm/nouveau/dispnv04/hw.c | |||
@@ -27,9 +27,6 @@ | |||
27 | #include "hw.h" | 27 | #include "hw.h" |
28 | 28 | ||
29 | #include <subdev/bios/pll.h> | 29 | #include <subdev/bios/pll.h> |
30 | #include <subdev/fb.h> | ||
31 | #include <subdev/clock.h> | ||
32 | #include <subdev/timer.h> | ||
33 | 30 | ||
34 | #define CHIPSET_NFORCE 0x01a0 | 31 | #define CHIPSET_NFORCE 0x01a0 |
35 | #define CHIPSET_NFORCE2 0x01f0 | 32 | #define CHIPSET_NFORCE2 0x01f0 |
@@ -92,7 +89,7 @@ NVSetOwner(struct drm_device *dev, int owner) | |||
92 | if (owner == 1) | 89 | if (owner == 1) |
93 | owner *= 3; | 90 | owner *= 3; |
94 | 91 | ||
95 | if (nv_device(drm->device)->chipset == 0x11) { | 92 | if (drm->device.info.chipset == 0x11) { |
96 | /* This might seem stupid, but the blob does it and | 93 | /* This might seem stupid, but the blob does it and |
97 | * omitting it often locks the system up. | 94 | * omitting it often locks the system up. |
98 | */ | 95 | */ |
@@ -103,7 +100,7 @@ NVSetOwner(struct drm_device *dev, int owner) | |||
103 | /* CR44 is always changed on CRTC0 */ | 100 | /* CR44 is always changed on CRTC0 */ |
104 | NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_44, owner); | 101 | NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_44, owner); |
105 | 102 | ||
106 | if (nv_device(drm->device)->chipset == 0x11) { /* set me harder */ | 103 | if (drm->device.info.chipset == 0x11) { /* set me harder */ |
107 | NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_2E, owner); | 104 | NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_2E, owner); |
108 | NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_2E, owner); | 105 | NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_2E, owner); |
109 | } | 106 | } |
@@ -152,7 +149,7 @@ nouveau_hw_decode_pll(struct drm_device *dev, uint32_t reg1, uint32_t pll1, | |||
152 | pllvals->NM1 = pll1 & 0xffff; | 149 | pllvals->NM1 = pll1 & 0xffff; |
153 | if (nv_two_reg_pll(dev) && pll2 & NV31_RAMDAC_ENABLE_VCO2) | 150 | if (nv_two_reg_pll(dev) && pll2 & NV31_RAMDAC_ENABLE_VCO2) |
154 | pllvals->NM2 = pll2 & 0xffff; | 151 | pllvals->NM2 = pll2 & 0xffff; |
155 | else if (nv_device(drm->device)->chipset == 0x30 || nv_device(drm->device)->chipset == 0x35) { | 152 | else if (drm->device.info.chipset == 0x30 || drm->device.info.chipset == 0x35) { |
156 | pllvals->M1 &= 0xf; /* only 4 bits */ | 153 | pllvals->M1 &= 0xf; /* only 4 bits */ |
157 | if (pll1 & NV30_RAMDAC_ENABLE_VCO2) { | 154 | if (pll1 & NV30_RAMDAC_ENABLE_VCO2) { |
158 | pllvals->M2 = (pll1 >> 4) & 0x7; | 155 | pllvals->M2 = (pll1 >> 4) & 0x7; |
@@ -168,8 +165,8 @@ nouveau_hw_get_pllvals(struct drm_device *dev, enum nvbios_pll_type plltype, | |||
168 | struct nouveau_pll_vals *pllvals) | 165 | struct nouveau_pll_vals *pllvals) |
169 | { | 166 | { |
170 | struct nouveau_drm *drm = nouveau_drm(dev); | 167 | struct nouveau_drm *drm = nouveau_drm(dev); |
171 | struct nouveau_device *device = nv_device(drm->device); | 168 | struct nvif_device *device = &drm->device; |
172 | struct nouveau_bios *bios = nouveau_bios(device); | 169 | struct nouveau_bios *bios = nvkm_bios(device); |
173 | uint32_t reg1, pll1, pll2 = 0; | 170 | uint32_t reg1, pll1, pll2 = 0; |
174 | struct nvbios_pll pll_lim; | 171 | struct nvbios_pll pll_lim; |
175 | int ret; | 172 | int ret; |
@@ -178,16 +175,16 @@ nouveau_hw_get_pllvals(struct drm_device *dev, enum nvbios_pll_type plltype, | |||
178 | if (ret || !(reg1 = pll_lim.reg)) | 175 | if (ret || !(reg1 = pll_lim.reg)) |
179 | return -ENOENT; | 176 | return -ENOENT; |
180 | 177 | ||
181 | pll1 = nv_rd32(device, reg1); | 178 | pll1 = nvif_rd32(device, reg1); |
182 | if (reg1 <= 0x405c) | 179 | if (reg1 <= 0x405c) |
183 | pll2 = nv_rd32(device, reg1 + 4); | 180 | pll2 = nvif_rd32(device, reg1 + 4); |
184 | else if (nv_two_reg_pll(dev)) { | 181 | else if (nv_two_reg_pll(dev)) { |
185 | uint32_t reg2 = reg1 + (reg1 == NV_RAMDAC_VPLL2 ? 0x5c : 0x70); | 182 | uint32_t reg2 = reg1 + (reg1 == NV_RAMDAC_VPLL2 ? 0x5c : 0x70); |
186 | 183 | ||
187 | pll2 = nv_rd32(device, reg2); | 184 | pll2 = nvif_rd32(device, reg2); |
188 | } | 185 | } |
189 | 186 | ||
190 | if (nv_device(drm->device)->card_type == 0x40 && reg1 >= NV_PRAMDAC_VPLL_COEFF) { | 187 | if (drm->device.info.family == NV_DEVICE_INFO_V0_CELSIUS && reg1 >= NV_PRAMDAC_VPLL_COEFF) { |
191 | uint32_t ramdac580 = NVReadRAMDAC(dev, 0, NV_PRAMDAC_580); | 188 | uint32_t ramdac580 = NVReadRAMDAC(dev, 0, NV_PRAMDAC_580); |
192 | 189 | ||
193 | /* check whether vpll has been forced into single stage mode */ | 190 | /* check whether vpll has been forced into single stage mode */ |
@@ -255,9 +252,9 @@ nouveau_hw_fix_bad_vpll(struct drm_device *dev, int head) | |||
255 | */ | 252 | */ |
256 | 253 | ||
257 | struct nouveau_drm *drm = nouveau_drm(dev); | 254 | struct nouveau_drm *drm = nouveau_drm(dev); |
258 | struct nouveau_device *device = nv_device(drm->device); | 255 | struct nvif_device *device = &drm->device; |
259 | struct nouveau_clock *clk = nouveau_clock(device); | 256 | struct nouveau_clock *clk = nvkm_clock(device); |
260 | struct nouveau_bios *bios = nouveau_bios(device); | 257 | struct nouveau_bios *bios = nvkm_bios(device); |
261 | struct nvbios_pll pll_lim; | 258 | struct nvbios_pll pll_lim; |
262 | struct nouveau_pll_vals pv; | 259 | struct nouveau_pll_vals pv; |
263 | enum nvbios_pll_type pll = head ? PLL_VPLL1 : PLL_VPLL0; | 260 | enum nvbios_pll_type pll = head ? PLL_VPLL1 : PLL_VPLL0; |
@@ -394,21 +391,21 @@ nv_save_state_ramdac(struct drm_device *dev, int head, | |||
394 | struct nv04_crtc_reg *regp = &state->crtc_reg[head]; | 391 | struct nv04_crtc_reg *regp = &state->crtc_reg[head]; |
395 | int i; | 392 | int i; |
396 | 393 | ||
397 | if (nv_device(drm->device)->card_type >= NV_10) | 394 | if (drm->device.info.family >= NV_DEVICE_INFO_V0_CELSIUS) |
398 | regp->nv10_cursync = NVReadRAMDAC(dev, head, NV_RAMDAC_NV10_CURSYNC); | 395 | regp->nv10_cursync = NVReadRAMDAC(dev, head, NV_RAMDAC_NV10_CURSYNC); |
399 | 396 | ||
400 | nouveau_hw_get_pllvals(dev, head ? PLL_VPLL1 : PLL_VPLL0, ®p->pllvals); | 397 | nouveau_hw_get_pllvals(dev, head ? PLL_VPLL1 : PLL_VPLL0, ®p->pllvals); |
401 | state->pllsel = NVReadRAMDAC(dev, 0, NV_PRAMDAC_PLL_COEFF_SELECT); | 398 | state->pllsel = NVReadRAMDAC(dev, 0, NV_PRAMDAC_PLL_COEFF_SELECT); |
402 | if (nv_two_heads(dev)) | 399 | if (nv_two_heads(dev)) |
403 | state->sel_clk = NVReadRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK); | 400 | state->sel_clk = NVReadRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK); |
404 | if (nv_device(drm->device)->chipset == 0x11) | 401 | if (drm->device.info.chipset == 0x11) |
405 | regp->dither = NVReadRAMDAC(dev, head, NV_RAMDAC_DITHER_NV11); | 402 | regp->dither = NVReadRAMDAC(dev, head, NV_RAMDAC_DITHER_NV11); |
406 | 403 | ||
407 | regp->ramdac_gen_ctrl = NVReadRAMDAC(dev, head, NV_PRAMDAC_GENERAL_CONTROL); | 404 | regp->ramdac_gen_ctrl = NVReadRAMDAC(dev, head, NV_PRAMDAC_GENERAL_CONTROL); |
408 | 405 | ||
409 | if (nv_gf4_disp_arch(dev)) | 406 | if (nv_gf4_disp_arch(dev)) |
410 | regp->ramdac_630 = NVReadRAMDAC(dev, head, NV_PRAMDAC_630); | 407 | regp->ramdac_630 = NVReadRAMDAC(dev, head, NV_PRAMDAC_630); |
411 | if (nv_device(drm->device)->chipset >= 0x30) | 408 | if (drm->device.info.chipset >= 0x30) |
412 | regp->ramdac_634 = NVReadRAMDAC(dev, head, NV_PRAMDAC_634); | 409 | regp->ramdac_634 = NVReadRAMDAC(dev, head, NV_PRAMDAC_634); |
413 | 410 | ||
414 | regp->tv_setup = NVReadRAMDAC(dev, head, NV_PRAMDAC_TV_SETUP); | 411 | regp->tv_setup = NVReadRAMDAC(dev, head, NV_PRAMDAC_TV_SETUP); |
@@ -450,7 +447,7 @@ nv_save_state_ramdac(struct drm_device *dev, int head, | |||
450 | if (nv_gf4_disp_arch(dev)) | 447 | if (nv_gf4_disp_arch(dev)) |
451 | regp->ramdac_8c0 = NVReadRAMDAC(dev, head, NV_PRAMDAC_8C0); | 448 | regp->ramdac_8c0 = NVReadRAMDAC(dev, head, NV_PRAMDAC_8C0); |
452 | 449 | ||
453 | if (nv_device(drm->device)->card_type == NV_40) { | 450 | if (drm->device.info.family == NV_DEVICE_INFO_V0_CURIE) { |
454 | regp->ramdac_a20 = NVReadRAMDAC(dev, head, NV_PRAMDAC_A20); | 451 | regp->ramdac_a20 = NVReadRAMDAC(dev, head, NV_PRAMDAC_A20); |
455 | regp->ramdac_a24 = NVReadRAMDAC(dev, head, NV_PRAMDAC_A24); | 452 | regp->ramdac_a24 = NVReadRAMDAC(dev, head, NV_PRAMDAC_A24); |
456 | regp->ramdac_a34 = NVReadRAMDAC(dev, head, NV_PRAMDAC_A34); | 453 | regp->ramdac_a34 = NVReadRAMDAC(dev, head, NV_PRAMDAC_A34); |
@@ -466,26 +463,26 @@ nv_load_state_ramdac(struct drm_device *dev, int head, | |||
466 | struct nv04_mode_state *state) | 463 | struct nv04_mode_state *state) |
467 | { | 464 | { |
468 | struct nouveau_drm *drm = nouveau_drm(dev); | 465 | struct nouveau_drm *drm = nouveau_drm(dev); |
469 | struct nouveau_clock *clk = nouveau_clock(drm->device); | 466 | struct nouveau_clock *clk = nvkm_clock(&drm->device); |
470 | struct nv04_crtc_reg *regp = &state->crtc_reg[head]; | 467 | struct nv04_crtc_reg *regp = &state->crtc_reg[head]; |
471 | uint32_t pllreg = head ? NV_RAMDAC_VPLL2 : NV_PRAMDAC_VPLL_COEFF; | 468 | uint32_t pllreg = head ? NV_RAMDAC_VPLL2 : NV_PRAMDAC_VPLL_COEFF; |
472 | int i; | 469 | int i; |
473 | 470 | ||
474 | if (nv_device(drm->device)->card_type >= NV_10) | 471 | if (drm->device.info.family >= NV_DEVICE_INFO_V0_CELSIUS) |
475 | NVWriteRAMDAC(dev, head, NV_RAMDAC_NV10_CURSYNC, regp->nv10_cursync); | 472 | NVWriteRAMDAC(dev, head, NV_RAMDAC_NV10_CURSYNC, regp->nv10_cursync); |
476 | 473 | ||
477 | clk->pll_prog(clk, pllreg, ®p->pllvals); | 474 | clk->pll_prog(clk, pllreg, ®p->pllvals); |
478 | NVWriteRAMDAC(dev, 0, NV_PRAMDAC_PLL_COEFF_SELECT, state->pllsel); | 475 | NVWriteRAMDAC(dev, 0, NV_PRAMDAC_PLL_COEFF_SELECT, state->pllsel); |
479 | if (nv_two_heads(dev)) | 476 | if (nv_two_heads(dev)) |
480 | NVWriteRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK, state->sel_clk); | 477 | NVWriteRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK, state->sel_clk); |
481 | if (nv_device(drm->device)->chipset == 0x11) | 478 | if (drm->device.info.chipset == 0x11) |
482 | NVWriteRAMDAC(dev, head, NV_RAMDAC_DITHER_NV11, regp->dither); | 479 | NVWriteRAMDAC(dev, head, NV_RAMDAC_DITHER_NV11, regp->dither); |
483 | 480 | ||
484 | NVWriteRAMDAC(dev, head, NV_PRAMDAC_GENERAL_CONTROL, regp->ramdac_gen_ctrl); | 481 | NVWriteRAMDAC(dev, head, NV_PRAMDAC_GENERAL_CONTROL, regp->ramdac_gen_ctrl); |
485 | 482 | ||
486 | if (nv_gf4_disp_arch(dev)) | 483 | if (nv_gf4_disp_arch(dev)) |
487 | NVWriteRAMDAC(dev, head, NV_PRAMDAC_630, regp->ramdac_630); | 484 | NVWriteRAMDAC(dev, head, NV_PRAMDAC_630, regp->ramdac_630); |
488 | if (nv_device(drm->device)->chipset >= 0x30) | 485 | if (drm->device.info.chipset >= 0x30) |
489 | NVWriteRAMDAC(dev, head, NV_PRAMDAC_634, regp->ramdac_634); | 486 | NVWriteRAMDAC(dev, head, NV_PRAMDAC_634, regp->ramdac_634); |
490 | 487 | ||
491 | NVWriteRAMDAC(dev, head, NV_PRAMDAC_TV_SETUP, regp->tv_setup); | 488 | NVWriteRAMDAC(dev, head, NV_PRAMDAC_TV_SETUP, regp->tv_setup); |
@@ -522,7 +519,7 @@ nv_load_state_ramdac(struct drm_device *dev, int head, | |||
522 | if (nv_gf4_disp_arch(dev)) | 519 | if (nv_gf4_disp_arch(dev)) |
523 | NVWriteRAMDAC(dev, head, NV_PRAMDAC_8C0, regp->ramdac_8c0); | 520 | NVWriteRAMDAC(dev, head, NV_PRAMDAC_8C0, regp->ramdac_8c0); |
524 | 521 | ||
525 | if (nv_device(drm->device)->card_type == NV_40) { | 522 | if (drm->device.info.family == NV_DEVICE_INFO_V0_CURIE) { |
526 | NVWriteRAMDAC(dev, head, NV_PRAMDAC_A20, regp->ramdac_a20); | 523 | NVWriteRAMDAC(dev, head, NV_PRAMDAC_A20, regp->ramdac_a20); |
527 | NVWriteRAMDAC(dev, head, NV_PRAMDAC_A24, regp->ramdac_a24); | 524 | NVWriteRAMDAC(dev, head, NV_PRAMDAC_A24, regp->ramdac_a24); |
528 | NVWriteRAMDAC(dev, head, NV_PRAMDAC_A34, regp->ramdac_a34); | 525 | NVWriteRAMDAC(dev, head, NV_PRAMDAC_A34, regp->ramdac_a34); |
@@ -603,10 +600,10 @@ nv_save_state_ext(struct drm_device *dev, int head, | |||
603 | rd_cio_state(dev, head, regp, NV_CIO_CRE_FFLWM__INDEX); | 600 | rd_cio_state(dev, head, regp, NV_CIO_CRE_FFLWM__INDEX); |
604 | rd_cio_state(dev, head, regp, NV_CIO_CRE_21); | 601 | rd_cio_state(dev, head, regp, NV_CIO_CRE_21); |
605 | 602 | ||
606 | if (nv_device(drm->device)->card_type >= NV_20) | 603 | if (drm->device.info.family >= NV_DEVICE_INFO_V0_KELVIN) |
607 | rd_cio_state(dev, head, regp, NV_CIO_CRE_47); | 604 | rd_cio_state(dev, head, regp, NV_CIO_CRE_47); |
608 | 605 | ||
609 | if (nv_device(drm->device)->card_type >= NV_30) | 606 | if (drm->device.info.family >= NV_DEVICE_INFO_V0_RANKINE) |
610 | rd_cio_state(dev, head, regp, 0x9f); | 607 | rd_cio_state(dev, head, regp, 0x9f); |
611 | 608 | ||
612 | rd_cio_state(dev, head, regp, NV_CIO_CRE_49); | 609 | rd_cio_state(dev, head, regp, NV_CIO_CRE_49); |
@@ -615,14 +612,14 @@ nv_save_state_ext(struct drm_device *dev, int head, | |||
615 | rd_cio_state(dev, head, regp, NV_CIO_CRE_HCUR_ADDR2_INDEX); | 612 | rd_cio_state(dev, head, regp, NV_CIO_CRE_HCUR_ADDR2_INDEX); |
616 | rd_cio_state(dev, head, regp, NV_CIO_CRE_ILACE__INDEX); | 613 | rd_cio_state(dev, head, regp, NV_CIO_CRE_ILACE__INDEX); |
617 | 614 | ||
618 | if (nv_device(drm->device)->card_type >= NV_10) { | 615 | if (drm->device.info.family >= NV_DEVICE_INFO_V0_CELSIUS) { |
619 | regp->crtc_830 = NVReadCRTC(dev, head, NV_PCRTC_830); | 616 | regp->crtc_830 = NVReadCRTC(dev, head, NV_PCRTC_830); |
620 | regp->crtc_834 = NVReadCRTC(dev, head, NV_PCRTC_834); | 617 | regp->crtc_834 = NVReadCRTC(dev, head, NV_PCRTC_834); |
621 | 618 | ||
622 | if (nv_device(drm->device)->card_type >= NV_30) | 619 | if (drm->device.info.family >= NV_DEVICE_INFO_V0_RANKINE) |
623 | regp->gpio_ext = NVReadCRTC(dev, head, NV_PCRTC_GPIO_EXT); | 620 | regp->gpio_ext = NVReadCRTC(dev, head, NV_PCRTC_GPIO_EXT); |
624 | 621 | ||
625 | if (nv_device(drm->device)->card_type == NV_40) | 622 | if (drm->device.info.family == NV_DEVICE_INFO_V0_CURIE) |
626 | regp->crtc_850 = NVReadCRTC(dev, head, NV_PCRTC_850); | 623 | regp->crtc_850 = NVReadCRTC(dev, head, NV_PCRTC_850); |
627 | 624 | ||
628 | if (nv_two_heads(dev)) | 625 | if (nv_two_heads(dev)) |
@@ -634,7 +631,7 @@ nv_save_state_ext(struct drm_device *dev, int head, | |||
634 | 631 | ||
635 | rd_cio_state(dev, head, regp, NV_CIO_CRE_SCRATCH3__INDEX); | 632 | rd_cio_state(dev, head, regp, NV_CIO_CRE_SCRATCH3__INDEX); |
636 | rd_cio_state(dev, head, regp, NV_CIO_CRE_SCRATCH4__INDEX); | 633 | rd_cio_state(dev, head, regp, NV_CIO_CRE_SCRATCH4__INDEX); |
637 | if (nv_device(drm->device)->card_type >= NV_10) { | 634 | if (drm->device.info.family >= NV_DEVICE_INFO_V0_CELSIUS) { |
638 | rd_cio_state(dev, head, regp, NV_CIO_CRE_EBR_INDEX); | 635 | rd_cio_state(dev, head, regp, NV_CIO_CRE_EBR_INDEX); |
639 | rd_cio_state(dev, head, regp, NV_CIO_CRE_CSB); | 636 | rd_cio_state(dev, head, regp, NV_CIO_CRE_CSB); |
640 | rd_cio_state(dev, head, regp, NV_CIO_CRE_4B); | 637 | rd_cio_state(dev, head, regp, NV_CIO_CRE_4B); |
@@ -663,14 +660,13 @@ nv_load_state_ext(struct drm_device *dev, int head, | |||
663 | struct nv04_mode_state *state) | 660 | struct nv04_mode_state *state) |
664 | { | 661 | { |
665 | struct nouveau_drm *drm = nouveau_drm(dev); | 662 | struct nouveau_drm *drm = nouveau_drm(dev); |
666 | struct nouveau_device *device = nv_device(drm->device); | 663 | struct nvif_device *device = &drm->device; |
667 | struct nouveau_timer *ptimer = nouveau_timer(device); | 664 | struct nouveau_timer *ptimer = nvkm_timer(device); |
668 | struct nouveau_fb *pfb = nouveau_fb(device); | ||
669 | struct nv04_crtc_reg *regp = &state->crtc_reg[head]; | 665 | struct nv04_crtc_reg *regp = &state->crtc_reg[head]; |
670 | uint32_t reg900; | 666 | uint32_t reg900; |
671 | int i; | 667 | int i; |
672 | 668 | ||
673 | if (nv_device(drm->device)->card_type >= NV_10) { | 669 | if (drm->device.info.family >= NV_DEVICE_INFO_V0_CELSIUS) { |
674 | if (nv_two_heads(dev)) | 670 | if (nv_two_heads(dev)) |
675 | /* setting ENGINE_CTRL (EC) *must* come before | 671 | /* setting ENGINE_CTRL (EC) *must* come before |
676 | * CIO_CRE_LCD, as writing CRE_LCD sets bits 16 & 17 in | 672 | * CIO_CRE_LCD, as writing CRE_LCD sets bits 16 & 17 in |
@@ -678,24 +674,24 @@ nv_load_state_ext(struct drm_device *dev, int head, | |||
678 | */ | 674 | */ |
679 | NVWriteCRTC(dev, head, NV_PCRTC_ENGINE_CTRL, regp->crtc_eng_ctrl); | 675 | NVWriteCRTC(dev, head, NV_PCRTC_ENGINE_CTRL, regp->crtc_eng_ctrl); |
680 | 676 | ||
681 | nv_wr32(device, NV_PVIDEO_STOP, 1); | 677 | nvif_wr32(device, NV_PVIDEO_STOP, 1); |
682 | nv_wr32(device, NV_PVIDEO_INTR_EN, 0); | 678 | nvif_wr32(device, NV_PVIDEO_INTR_EN, 0); |
683 | nv_wr32(device, NV_PVIDEO_OFFSET_BUFF(0), 0); | 679 | nvif_wr32(device, NV_PVIDEO_OFFSET_BUFF(0), 0); |
684 | nv_wr32(device, NV_PVIDEO_OFFSET_BUFF(1), 0); | 680 | nvif_wr32(device, NV_PVIDEO_OFFSET_BUFF(1), 0); |
685 | nv_wr32(device, NV_PVIDEO_LIMIT(0), pfb->ram->size - 1); | 681 | nvif_wr32(device, NV_PVIDEO_LIMIT(0), device->info.ram_size - 1); |
686 | nv_wr32(device, NV_PVIDEO_LIMIT(1), pfb->ram->size - 1); | 682 | nvif_wr32(device, NV_PVIDEO_LIMIT(1), device->info.ram_size - 1); |
687 | nv_wr32(device, NV_PVIDEO_UVPLANE_LIMIT(0), pfb->ram->size - 1); | 683 | nvif_wr32(device, NV_PVIDEO_UVPLANE_LIMIT(0), device->info.ram_size - 1); |
688 | nv_wr32(device, NV_PVIDEO_UVPLANE_LIMIT(1), pfb->ram->size - 1); | 684 | nvif_wr32(device, NV_PVIDEO_UVPLANE_LIMIT(1), device->info.ram_size - 1); |
689 | nv_wr32(device, NV_PBUS_POWERCTRL_2, 0); | 685 | nvif_wr32(device, NV_PBUS_POWERCTRL_2, 0); |
690 | 686 | ||
691 | NVWriteCRTC(dev, head, NV_PCRTC_CURSOR_CONFIG, regp->cursor_cfg); | 687 | NVWriteCRTC(dev, head, NV_PCRTC_CURSOR_CONFIG, regp->cursor_cfg); |
692 | NVWriteCRTC(dev, head, NV_PCRTC_830, regp->crtc_830); | 688 | NVWriteCRTC(dev, head, NV_PCRTC_830, regp->crtc_830); |
693 | NVWriteCRTC(dev, head, NV_PCRTC_834, regp->crtc_834); | 689 | NVWriteCRTC(dev, head, NV_PCRTC_834, regp->crtc_834); |
694 | 690 | ||
695 | if (nv_device(drm->device)->card_type >= NV_30) | 691 | if (drm->device.info.family >= NV_DEVICE_INFO_V0_RANKINE) |
696 | NVWriteCRTC(dev, head, NV_PCRTC_GPIO_EXT, regp->gpio_ext); | 692 | NVWriteCRTC(dev, head, NV_PCRTC_GPIO_EXT, regp->gpio_ext); |
697 | 693 | ||
698 | if (nv_device(drm->device)->card_type == NV_40) { | 694 | if (drm->device.info.family == NV_DEVICE_INFO_V0_CURIE) { |
699 | NVWriteCRTC(dev, head, NV_PCRTC_850, regp->crtc_850); | 695 | NVWriteCRTC(dev, head, NV_PCRTC_850, regp->crtc_850); |
700 | 696 | ||
701 | reg900 = NVReadRAMDAC(dev, head, NV_PRAMDAC_900); | 697 | reg900 = NVReadRAMDAC(dev, head, NV_PRAMDAC_900); |
@@ -718,23 +714,23 @@ nv_load_state_ext(struct drm_device *dev, int head, | |||
718 | wr_cio_state(dev, head, regp, NV_CIO_CRE_FF_INDEX); | 714 | wr_cio_state(dev, head, regp, NV_CIO_CRE_FF_INDEX); |
719 | wr_cio_state(dev, head, regp, NV_CIO_CRE_FFLWM__INDEX); | 715 | wr_cio_state(dev, head, regp, NV_CIO_CRE_FFLWM__INDEX); |
720 | 716 | ||
721 | if (nv_device(drm->device)->card_type >= NV_20) | 717 | if (drm->device.info.family >= NV_DEVICE_INFO_V0_KELVIN) |
722 | wr_cio_state(dev, head, regp, NV_CIO_CRE_47); | 718 | wr_cio_state(dev, head, regp, NV_CIO_CRE_47); |
723 | 719 | ||
724 | if (nv_device(drm->device)->card_type >= NV_30) | 720 | if (drm->device.info.family >= NV_DEVICE_INFO_V0_RANKINE) |
725 | wr_cio_state(dev, head, regp, 0x9f); | 721 | wr_cio_state(dev, head, regp, 0x9f); |
726 | 722 | ||
727 | wr_cio_state(dev, head, regp, NV_CIO_CRE_49); | 723 | wr_cio_state(dev, head, regp, NV_CIO_CRE_49); |
728 | wr_cio_state(dev, head, regp, NV_CIO_CRE_HCUR_ADDR0_INDEX); | 724 | wr_cio_state(dev, head, regp, NV_CIO_CRE_HCUR_ADDR0_INDEX); |
729 | wr_cio_state(dev, head, regp, NV_CIO_CRE_HCUR_ADDR1_INDEX); | 725 | wr_cio_state(dev, head, regp, NV_CIO_CRE_HCUR_ADDR1_INDEX); |
730 | wr_cio_state(dev, head, regp, NV_CIO_CRE_HCUR_ADDR2_INDEX); | 726 | wr_cio_state(dev, head, regp, NV_CIO_CRE_HCUR_ADDR2_INDEX); |
731 | if (nv_device(drm->device)->card_type == NV_40) | 727 | if (drm->device.info.family == NV_DEVICE_INFO_V0_CURIE) |
732 | nv_fix_nv40_hw_cursor(dev, head); | 728 | nv_fix_nv40_hw_cursor(dev, head); |
733 | wr_cio_state(dev, head, regp, NV_CIO_CRE_ILACE__INDEX); | 729 | wr_cio_state(dev, head, regp, NV_CIO_CRE_ILACE__INDEX); |
734 | 730 | ||
735 | wr_cio_state(dev, head, regp, NV_CIO_CRE_SCRATCH3__INDEX); | 731 | wr_cio_state(dev, head, regp, NV_CIO_CRE_SCRATCH3__INDEX); |
736 | wr_cio_state(dev, head, regp, NV_CIO_CRE_SCRATCH4__INDEX); | 732 | wr_cio_state(dev, head, regp, NV_CIO_CRE_SCRATCH4__INDEX); |
737 | if (nv_device(drm->device)->card_type >= NV_10) { | 733 | if (drm->device.info.family >= NV_DEVICE_INFO_V0_CELSIUS) { |
738 | wr_cio_state(dev, head, regp, NV_CIO_CRE_EBR_INDEX); | 734 | wr_cio_state(dev, head, regp, NV_CIO_CRE_EBR_INDEX); |
739 | wr_cio_state(dev, head, regp, NV_CIO_CRE_CSB); | 735 | wr_cio_state(dev, head, regp, NV_CIO_CRE_CSB); |
740 | wr_cio_state(dev, head, regp, NV_CIO_CRE_4B); | 736 | wr_cio_state(dev, head, regp, NV_CIO_CRE_4B); |
@@ -742,7 +738,7 @@ nv_load_state_ext(struct drm_device *dev, int head, | |||
742 | } | 738 | } |
743 | /* NV11 and NV20 stop at 0x52. */ | 739 | /* NV11 and NV20 stop at 0x52. */ |
744 | if (nv_gf4_disp_arch(dev)) { | 740 | if (nv_gf4_disp_arch(dev)) { |
745 | if (nv_device(drm->device)->card_type < NV_20) { | 741 | if (drm->device.info.family < NV_DEVICE_INFO_V0_KELVIN) { |
746 | /* Not waiting for vertical retrace before modifying | 742 | /* Not waiting for vertical retrace before modifying |
747 | CRE_53/CRE_54 causes lockups. */ | 743 | CRE_53/CRE_54 causes lockups. */ |
748 | nouveau_timer_wait_eq(ptimer, 650000000, NV_PRMCIO_INP0__COLOR, 0x8, 0x8); | 744 | nouveau_timer_wait_eq(ptimer, 650000000, NV_PRMCIO_INP0__COLOR, 0x8, 0x8); |
@@ -769,15 +765,15 @@ static void | |||
769 | nv_save_state_palette(struct drm_device *dev, int head, | 765 | nv_save_state_palette(struct drm_device *dev, int head, |
770 | struct nv04_mode_state *state) | 766 | struct nv04_mode_state *state) |
771 | { | 767 | { |
772 | struct nouveau_device *device = nouveau_dev(dev); | 768 | struct nvif_device *device = &nouveau_drm(dev)->device; |
773 | int head_offset = head * NV_PRMDIO_SIZE, i; | 769 | int head_offset = head * NV_PRMDIO_SIZE, i; |
774 | 770 | ||
775 | nv_wr08(device, NV_PRMDIO_PIXEL_MASK + head_offset, | 771 | nvif_wr08(device, NV_PRMDIO_PIXEL_MASK + head_offset, |
776 | NV_PRMDIO_PIXEL_MASK_MASK); | 772 | NV_PRMDIO_PIXEL_MASK_MASK); |
777 | nv_wr08(device, NV_PRMDIO_READ_MODE_ADDRESS + head_offset, 0x0); | 773 | nvif_wr08(device, NV_PRMDIO_READ_MODE_ADDRESS + head_offset, 0x0); |
778 | 774 | ||
779 | for (i = 0; i < 768; i++) { | 775 | for (i = 0; i < 768; i++) { |
780 | state->crtc_reg[head].DAC[i] = nv_rd08(device, | 776 | state->crtc_reg[head].DAC[i] = nvif_rd08(device, |
781 | NV_PRMDIO_PALETTE_DATA + head_offset); | 777 | NV_PRMDIO_PALETTE_DATA + head_offset); |
782 | } | 778 | } |
783 | 779 | ||
@@ -788,15 +784,15 @@ void | |||
788 | nouveau_hw_load_state_palette(struct drm_device *dev, int head, | 784 | nouveau_hw_load_state_palette(struct drm_device *dev, int head, |
789 | struct nv04_mode_state *state) | 785 | struct nv04_mode_state *state) |
790 | { | 786 | { |
791 | struct nouveau_device *device = nouveau_dev(dev); | 787 | struct nvif_device *device = &nouveau_drm(dev)->device; |
792 | int head_offset = head * NV_PRMDIO_SIZE, i; | 788 | int head_offset = head * NV_PRMDIO_SIZE, i; |
793 | 789 | ||
794 | nv_wr08(device, NV_PRMDIO_PIXEL_MASK + head_offset, | 790 | nvif_wr08(device, NV_PRMDIO_PIXEL_MASK + head_offset, |
795 | NV_PRMDIO_PIXEL_MASK_MASK); | 791 | NV_PRMDIO_PIXEL_MASK_MASK); |
796 | nv_wr08(device, NV_PRMDIO_WRITE_MODE_ADDRESS + head_offset, 0x0); | 792 | nvif_wr08(device, NV_PRMDIO_WRITE_MODE_ADDRESS + head_offset, 0x0); |
797 | 793 | ||
798 | for (i = 0; i < 768; i++) { | 794 | for (i = 0; i < 768; i++) { |
799 | nv_wr08(device, NV_PRMDIO_PALETTE_DATA + head_offset, | 795 | nvif_wr08(device, NV_PRMDIO_PALETTE_DATA + head_offset, |
800 | state->crtc_reg[head].DAC[i]); | 796 | state->crtc_reg[head].DAC[i]); |
801 | } | 797 | } |
802 | 798 | ||
@@ -808,7 +804,7 @@ void nouveau_hw_save_state(struct drm_device *dev, int head, | |||
808 | { | 804 | { |
809 | struct nouveau_drm *drm = nouveau_drm(dev); | 805 | struct nouveau_drm *drm = nouveau_drm(dev); |
810 | 806 | ||
811 | if (nv_device(drm->device)->chipset == 0x11) | 807 | if (drm->device.info.chipset == 0x11) |
812 | /* NB: no attempt is made to restore the bad pll later on */ | 808 | /* NB: no attempt is made to restore the bad pll later on */ |
813 | nouveau_hw_fix_bad_vpll(dev, head); | 809 | nouveau_hw_fix_bad_vpll(dev, head); |
814 | nv_save_state_ramdac(dev, head, state); | 810 | nv_save_state_ramdac(dev, head, state); |
diff --git a/drivers/gpu/drm/nouveau/dispnv04/hw.h b/drivers/gpu/drm/nouveau/dispnv04/hw.h index eeb70d912d99..7f53c571f31f 100644 --- a/drivers/gpu/drm/nouveau/dispnv04/hw.h +++ b/drivers/gpu/drm/nouveau/dispnv04/hw.h | |||
@@ -60,41 +60,41 @@ extern void nouveau_calc_arb(struct drm_device *, int vclk, int bpp, | |||
60 | static inline uint32_t NVReadCRTC(struct drm_device *dev, | 60 | static inline uint32_t NVReadCRTC(struct drm_device *dev, |
61 | int head, uint32_t reg) | 61 | int head, uint32_t reg) |
62 | { | 62 | { |
63 | struct nouveau_device *device = nouveau_dev(dev); | 63 | struct nvif_device *device = &nouveau_drm(dev)->device; |
64 | uint32_t val; | 64 | uint32_t val; |
65 | if (head) | 65 | if (head) |
66 | reg += NV_PCRTC0_SIZE; | 66 | reg += NV_PCRTC0_SIZE; |
67 | val = nv_rd32(device, reg); | 67 | val = nvif_rd32(device, reg); |
68 | return val; | 68 | return val; |
69 | } | 69 | } |
70 | 70 | ||
71 | static inline void NVWriteCRTC(struct drm_device *dev, | 71 | static inline void NVWriteCRTC(struct drm_device *dev, |
72 | int head, uint32_t reg, uint32_t val) | 72 | int head, uint32_t reg, uint32_t val) |
73 | { | 73 | { |
74 | struct nouveau_device *device = nouveau_dev(dev); | 74 | struct nvif_device *device = &nouveau_drm(dev)->device; |
75 | if (head) | 75 | if (head) |
76 | reg += NV_PCRTC0_SIZE; | 76 | reg += NV_PCRTC0_SIZE; |
77 | nv_wr32(device, reg, val); | 77 | nvif_wr32(device, reg, val); |
78 | } | 78 | } |
79 | 79 | ||
80 | static inline uint32_t NVReadRAMDAC(struct drm_device *dev, | 80 | static inline uint32_t NVReadRAMDAC(struct drm_device *dev, |
81 | int head, uint32_t reg) | 81 | int head, uint32_t reg) |
82 | { | 82 | { |
83 | struct nouveau_device *device = nouveau_dev(dev); | 83 | struct nvif_device *device = &nouveau_drm(dev)->device; |
84 | uint32_t val; | 84 | uint32_t val; |
85 | if (head) | 85 | if (head) |
86 | reg += NV_PRAMDAC0_SIZE; | 86 | reg += NV_PRAMDAC0_SIZE; |
87 | val = nv_rd32(device, reg); | 87 | val = nvif_rd32(device, reg); |
88 | return val; | 88 | return val; |
89 | } | 89 | } |
90 | 90 | ||
91 | static inline void NVWriteRAMDAC(struct drm_device *dev, | 91 | static inline void NVWriteRAMDAC(struct drm_device *dev, |
92 | int head, uint32_t reg, uint32_t val) | 92 | int head, uint32_t reg, uint32_t val) |
93 | { | 93 | { |
94 | struct nouveau_device *device = nouveau_dev(dev); | 94 | struct nvif_device *device = &nouveau_drm(dev)->device; |
95 | if (head) | 95 | if (head) |
96 | reg += NV_PRAMDAC0_SIZE; | 96 | reg += NV_PRAMDAC0_SIZE; |
97 | nv_wr32(device, reg, val); | 97 | nvif_wr32(device, reg, val); |
98 | } | 98 | } |
99 | 99 | ||
100 | static inline uint8_t nv_read_tmds(struct drm_device *dev, | 100 | static inline uint8_t nv_read_tmds(struct drm_device *dev, |
@@ -120,18 +120,18 @@ static inline void nv_write_tmds(struct drm_device *dev, | |||
120 | static inline void NVWriteVgaCrtc(struct drm_device *dev, | 120 | static inline void NVWriteVgaCrtc(struct drm_device *dev, |
121 | int head, uint8_t index, uint8_t value) | 121 | int head, uint8_t index, uint8_t value) |
122 | { | 122 | { |
123 | struct nouveau_device *device = nouveau_dev(dev); | 123 | struct nvif_device *device = &nouveau_drm(dev)->device; |
124 | nv_wr08(device, NV_PRMCIO_CRX__COLOR + head * NV_PRMCIO_SIZE, index); | 124 | nvif_wr08(device, NV_PRMCIO_CRX__COLOR + head * NV_PRMCIO_SIZE, index); |
125 | nv_wr08(device, NV_PRMCIO_CR__COLOR + head * NV_PRMCIO_SIZE, value); | 125 | nvif_wr08(device, NV_PRMCIO_CR__COLOR + head * NV_PRMCIO_SIZE, value); |
126 | } | 126 | } |
127 | 127 | ||
128 | static inline uint8_t NVReadVgaCrtc(struct drm_device *dev, | 128 | static inline uint8_t NVReadVgaCrtc(struct drm_device *dev, |
129 | int head, uint8_t index) | 129 | int head, uint8_t index) |
130 | { | 130 | { |
131 | struct nouveau_device *device = nouveau_dev(dev); | 131 | struct nvif_device *device = &nouveau_drm(dev)->device; |
132 | uint8_t val; | 132 | uint8_t val; |
133 | nv_wr08(device, NV_PRMCIO_CRX__COLOR + head * NV_PRMCIO_SIZE, index); | 133 | nvif_wr08(device, NV_PRMCIO_CRX__COLOR + head * NV_PRMCIO_SIZE, index); |
134 | val = nv_rd08(device, NV_PRMCIO_CR__COLOR + head * NV_PRMCIO_SIZE); | 134 | val = nvif_rd08(device, NV_PRMCIO_CR__COLOR + head * NV_PRMCIO_SIZE); |
135 | return val; | 135 | return val; |
136 | } | 136 | } |
137 | 137 | ||
@@ -165,74 +165,74 @@ static inline uint8_t NVReadVgaCrtc5758(struct drm_device *dev, int head, uint8_ | |||
165 | static inline uint8_t NVReadPRMVIO(struct drm_device *dev, | 165 | static inline uint8_t NVReadPRMVIO(struct drm_device *dev, |
166 | int head, uint32_t reg) | 166 | int head, uint32_t reg) |
167 | { | 167 | { |
168 | struct nouveau_device *device = nouveau_dev(dev); | 168 | struct nvif_device *device = &nouveau_drm(dev)->device; |
169 | struct nouveau_drm *drm = nouveau_drm(dev); | 169 | struct nouveau_drm *drm = nouveau_drm(dev); |
170 | uint8_t val; | 170 | uint8_t val; |
171 | 171 | ||
172 | /* Only NV4x have two pvio ranges; other twoHeads cards MUST call | 172 | /* Only NV4x have two pvio ranges; other twoHeads cards MUST call |
173 | * NVSetOwner for the relevant head to be programmed */ | 173 | * NVSetOwner for the relevant head to be programmed */ |
174 | if (head && nv_device(drm->device)->card_type == NV_40) | 174 | if (head && drm->device.info.family == NV_DEVICE_INFO_V0_CURIE) |
175 | reg += NV_PRMVIO_SIZE; | 175 | reg += NV_PRMVIO_SIZE; |
176 | 176 | ||
177 | val = nv_rd08(device, reg); | 177 | val = nvif_rd08(device, reg); |
178 | return val; | 178 | return val; |
179 | } | 179 | } |
180 | 180 | ||
181 | static inline void NVWritePRMVIO(struct drm_device *dev, | 181 | static inline void NVWritePRMVIO(struct drm_device *dev, |
182 | int head, uint32_t reg, uint8_t value) | 182 | int head, uint32_t reg, uint8_t value) |
183 | { | 183 | { |
184 | struct nouveau_device *device = nouveau_dev(dev); | 184 | struct nvif_device *device = &nouveau_drm(dev)->device; |
185 | struct nouveau_drm *drm = nouveau_drm(dev); | 185 | struct nouveau_drm *drm = nouveau_drm(dev); |
186 | 186 | ||
187 | /* Only NV4x have two pvio ranges; other twoHeads cards MUST call | 187 | /* Only NV4x have two pvio ranges; other twoHeads cards MUST call |
188 | * NVSetOwner for the relevant head to be programmed */ | 188 | * NVSetOwner for the relevant head to be programmed */ |
189 | if (head && nv_device(drm->device)->card_type == NV_40) | 189 | if (head && drm->device.info.family == NV_DEVICE_INFO_V0_CURIE) |
190 | reg += NV_PRMVIO_SIZE; | 190 | reg += NV_PRMVIO_SIZE; |
191 | 191 | ||
192 | nv_wr08(device, reg, value); | 192 | nvif_wr08(device, reg, value); |
193 | } | 193 | } |
194 | 194 | ||
195 | static inline void NVSetEnablePalette(struct drm_device *dev, int head, bool enable) | 195 | static inline void NVSetEnablePalette(struct drm_device *dev, int head, bool enable) |
196 | { | 196 | { |
197 | struct nouveau_device *device = nouveau_dev(dev); | 197 | struct nvif_device *device = &nouveau_drm(dev)->device; |
198 | nv_rd08(device, NV_PRMCIO_INP0__COLOR + head * NV_PRMCIO_SIZE); | 198 | nvif_rd08(device, NV_PRMCIO_INP0__COLOR + head * NV_PRMCIO_SIZE); |
199 | nv_wr08(device, NV_PRMCIO_ARX + head * NV_PRMCIO_SIZE, enable ? 0 : 0x20); | 199 | nvif_wr08(device, NV_PRMCIO_ARX + head * NV_PRMCIO_SIZE, enable ? 0 : 0x20); |
200 | } | 200 | } |
201 | 201 | ||
202 | static inline bool NVGetEnablePalette(struct drm_device *dev, int head) | 202 | static inline bool NVGetEnablePalette(struct drm_device *dev, int head) |
203 | { | 203 | { |
204 | struct nouveau_device *device = nouveau_dev(dev); | 204 | struct nvif_device *device = &nouveau_drm(dev)->device; |
205 | nv_rd08(device, NV_PRMCIO_INP0__COLOR + head * NV_PRMCIO_SIZE); | 205 | nvif_rd08(device, NV_PRMCIO_INP0__COLOR + head * NV_PRMCIO_SIZE); |
206 | return !(nv_rd08(device, NV_PRMCIO_ARX + head * NV_PRMCIO_SIZE) & 0x20); | 206 | return !(nvif_rd08(device, NV_PRMCIO_ARX + head * NV_PRMCIO_SIZE) & 0x20); |
207 | } | 207 | } |
208 | 208 | ||
209 | static inline void NVWriteVgaAttr(struct drm_device *dev, | 209 | static inline void NVWriteVgaAttr(struct drm_device *dev, |
210 | int head, uint8_t index, uint8_t value) | 210 | int head, uint8_t index, uint8_t value) |
211 | { | 211 | { |
212 | struct nouveau_device *device = nouveau_dev(dev); | 212 | struct nvif_device *device = &nouveau_drm(dev)->device; |
213 | if (NVGetEnablePalette(dev, head)) | 213 | if (NVGetEnablePalette(dev, head)) |
214 | index &= ~0x20; | 214 | index &= ~0x20; |
215 | else | 215 | else |
216 | index |= 0x20; | 216 | index |= 0x20; |
217 | 217 | ||
218 | nv_rd08(device, NV_PRMCIO_INP0__COLOR + head * NV_PRMCIO_SIZE); | 218 | nvif_rd08(device, NV_PRMCIO_INP0__COLOR + head * NV_PRMCIO_SIZE); |
219 | nv_wr08(device, NV_PRMCIO_ARX + head * NV_PRMCIO_SIZE, index); | 219 | nvif_wr08(device, NV_PRMCIO_ARX + head * NV_PRMCIO_SIZE, index); |
220 | nv_wr08(device, NV_PRMCIO_AR__WRITE + head * NV_PRMCIO_SIZE, value); | 220 | nvif_wr08(device, NV_PRMCIO_AR__WRITE + head * NV_PRMCIO_SIZE, value); |
221 | } | 221 | } |
222 | 222 | ||
223 | static inline uint8_t NVReadVgaAttr(struct drm_device *dev, | 223 | static inline uint8_t NVReadVgaAttr(struct drm_device *dev, |
224 | int head, uint8_t index) | 224 | int head, uint8_t index) |
225 | { | 225 | { |
226 | struct nouveau_device *device = nouveau_dev(dev); | 226 | struct nvif_device *device = &nouveau_drm(dev)->device; |
227 | uint8_t val; | 227 | uint8_t val; |
228 | if (NVGetEnablePalette(dev, head)) | 228 | if (NVGetEnablePalette(dev, head)) |
229 | index &= ~0x20; | 229 | index &= ~0x20; |
230 | else | 230 | else |
231 | index |= 0x20; | 231 | index |= 0x20; |
232 | 232 | ||
233 | nv_rd08(device, NV_PRMCIO_INP0__COLOR + head * NV_PRMCIO_SIZE); | 233 | nvif_rd08(device, NV_PRMCIO_INP0__COLOR + head * NV_PRMCIO_SIZE); |
234 | nv_wr08(device, NV_PRMCIO_ARX + head * NV_PRMCIO_SIZE, index); | 234 | nvif_wr08(device, NV_PRMCIO_ARX + head * NV_PRMCIO_SIZE, index); |
235 | val = nv_rd08(device, NV_PRMCIO_AR__READ + head * NV_PRMCIO_SIZE); | 235 | val = nvif_rd08(device, NV_PRMCIO_AR__READ + head * NV_PRMCIO_SIZE); |
236 | return val; | 236 | return val; |
237 | } | 237 | } |
238 | 238 | ||
@@ -259,11 +259,11 @@ static inline void NVVgaProtect(struct drm_device *dev, int head, bool protect) | |||
259 | static inline bool | 259 | static inline bool |
260 | nv_heads_tied(struct drm_device *dev) | 260 | nv_heads_tied(struct drm_device *dev) |
261 | { | 261 | { |
262 | struct nouveau_device *device = nouveau_dev(dev); | 262 | struct nvif_device *device = &nouveau_drm(dev)->device; |
263 | struct nouveau_drm *drm = nouveau_drm(dev); | 263 | struct nouveau_drm *drm = nouveau_drm(dev); |
264 | 264 | ||
265 | if (nv_device(drm->device)->chipset == 0x11) | 265 | if (drm->device.info.chipset == 0x11) |
266 | return !!(nv_rd32(device, NV_PBUS_DEBUG_1) & (1 << 28)); | 266 | return !!(nvif_rd32(device, NV_PBUS_DEBUG_1) & (1 << 28)); |
267 | 267 | ||
268 | return NVReadVgaCrtc(dev, 0, NV_CIO_CRE_44) & 0x4; | 268 | return NVReadVgaCrtc(dev, 0, NV_CIO_CRE_44) & 0x4; |
269 | } | 269 | } |
@@ -318,7 +318,7 @@ NVLockVgaCrtcs(struct drm_device *dev, bool lock) | |||
318 | NVWriteVgaCrtc(dev, 0, NV_CIO_SR_LOCK_INDEX, | 318 | NVWriteVgaCrtc(dev, 0, NV_CIO_SR_LOCK_INDEX, |
319 | lock ? NV_CIO_SR_LOCK_VALUE : NV_CIO_SR_UNLOCK_RW_VALUE); | 319 | lock ? NV_CIO_SR_LOCK_VALUE : NV_CIO_SR_UNLOCK_RW_VALUE); |
320 | /* NV11 has independently lockable extended crtcs, except when tied */ | 320 | /* NV11 has independently lockable extended crtcs, except when tied */ |
321 | if (nv_device(drm->device)->chipset == 0x11 && !nv_heads_tied(dev)) | 321 | if (drm->device.info.chipset == 0x11 && !nv_heads_tied(dev)) |
322 | NVWriteVgaCrtc(dev, 1, NV_CIO_SR_LOCK_INDEX, | 322 | NVWriteVgaCrtc(dev, 1, NV_CIO_SR_LOCK_INDEX, |
323 | lock ? NV_CIO_SR_LOCK_VALUE : | 323 | lock ? NV_CIO_SR_LOCK_VALUE : |
324 | NV_CIO_SR_UNLOCK_RW_VALUE); | 324 | NV_CIO_SR_UNLOCK_RW_VALUE); |
@@ -335,7 +335,7 @@ static inline int nv_cursor_width(struct drm_device *dev) | |||
335 | { | 335 | { |
336 | struct nouveau_drm *drm = nouveau_drm(dev); | 336 | struct nouveau_drm *drm = nouveau_drm(dev); |
337 | 337 | ||
338 | return nv_device(drm->device)->card_type >= NV_10 ? NV10_CURSOR_SIZE : NV04_CURSOR_SIZE; | 338 | return drm->device.info.family >= NV_DEVICE_INFO_V0_CELSIUS ? NV10_CURSOR_SIZE : NV04_CURSOR_SIZE; |
339 | } | 339 | } |
340 | 340 | ||
341 | static inline void | 341 | static inline void |
@@ -357,7 +357,7 @@ nv_set_crtc_base(struct drm_device *dev, int head, uint32_t offset) | |||
357 | 357 | ||
358 | NVWriteCRTC(dev, head, NV_PCRTC_START, offset); | 358 | NVWriteCRTC(dev, head, NV_PCRTC_START, offset); |
359 | 359 | ||
360 | if (nv_device(drm->device)->card_type == NV_04) { | 360 | if (drm->device.info.family == NV_DEVICE_INFO_V0_TNT) { |
361 | /* | 361 | /* |
362 | * Hilarious, the 24th bit doesn't want to stick to | 362 | * Hilarious, the 24th bit doesn't want to stick to |
363 | * PCRTC_START... | 363 | * PCRTC_START... |
@@ -382,7 +382,7 @@ nv_show_cursor(struct drm_device *dev, int head, bool show) | |||
382 | *curctl1 &= ~MASK(NV_CIO_CRE_HCUR_ADDR1_ENABLE); | 382 | *curctl1 &= ~MASK(NV_CIO_CRE_HCUR_ADDR1_ENABLE); |
383 | NVWriteVgaCrtc(dev, head, NV_CIO_CRE_HCUR_ADDR1_INDEX, *curctl1); | 383 | NVWriteVgaCrtc(dev, head, NV_CIO_CRE_HCUR_ADDR1_INDEX, *curctl1); |
384 | 384 | ||
385 | if (nv_device(drm->device)->card_type == NV_40) | 385 | if (drm->device.info.family == NV_DEVICE_INFO_V0_CURIE) |
386 | nv_fix_nv40_hw_cursor(dev, head); | 386 | nv_fix_nv40_hw_cursor(dev, head); |
387 | } | 387 | } |
388 | 388 | ||
@@ -398,7 +398,7 @@ nv_pitch_align(struct drm_device *dev, uint32_t width, int bpp) | |||
398 | bpp = 8; | 398 | bpp = 8; |
399 | 399 | ||
400 | /* Alignment requirements taken from the Haiku driver */ | 400 | /* Alignment requirements taken from the Haiku driver */ |
401 | if (nv_device(drm->device)->card_type == NV_04) | 401 | if (drm->device.info.family == NV_DEVICE_INFO_V0_TNT) |
402 | mask = 128 / bpp - 1; | 402 | mask = 128 / bpp - 1; |
403 | else | 403 | else |
404 | mask = 512 / bpp - 1; | 404 | mask = 512 / bpp - 1; |
diff --git a/drivers/gpu/drm/nouveau/dispnv04/overlay.c b/drivers/gpu/drm/nouveau/dispnv04/overlay.c index ab03f7719d2d..b36afcbbc83f 100644 --- a/drivers/gpu/drm/nouveau/dispnv04/overlay.c +++ b/drivers/gpu/drm/nouveau/dispnv04/overlay.c | |||
@@ -96,7 +96,7 @@ nv10_update_plane(struct drm_plane *plane, struct drm_crtc *crtc, | |||
96 | uint32_t src_x, uint32_t src_y, | 96 | uint32_t src_x, uint32_t src_y, |
97 | uint32_t src_w, uint32_t src_h) | 97 | uint32_t src_w, uint32_t src_h) |
98 | { | 98 | { |
99 | struct nouveau_device *dev = nouveau_dev(plane->dev); | 99 | struct nvif_device *dev = &nouveau_drm(plane->dev)->device; |
100 | struct nouveau_plane *nv_plane = (struct nouveau_plane *)plane; | 100 | struct nouveau_plane *nv_plane = (struct nouveau_plane *)plane; |
101 | struct nouveau_framebuffer *nv_fb = nouveau_framebuffer(fb); | 101 | struct nouveau_framebuffer *nv_fb = nouveau_framebuffer(fb); |
102 | struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); | 102 | struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); |
@@ -117,7 +117,7 @@ nv10_update_plane(struct drm_plane *plane, struct drm_crtc *crtc, | |||
117 | if (format > 0xffff) | 117 | if (format > 0xffff) |
118 | return -ERANGE; | 118 | return -ERANGE; |
119 | 119 | ||
120 | if (dev->chipset >= 0x30) { | 120 | if (dev->info.chipset >= 0x30) { |
121 | if (crtc_w < (src_w >> 1) || crtc_h < (src_h >> 1)) | 121 | if (crtc_w < (src_w >> 1) || crtc_h < (src_h >> 1)) |
122 | return -ERANGE; | 122 | return -ERANGE; |
123 | } else { | 123 | } else { |
@@ -131,17 +131,17 @@ nv10_update_plane(struct drm_plane *plane, struct drm_crtc *crtc, | |||
131 | 131 | ||
132 | nv_plane->cur = nv_fb->nvbo; | 132 | nv_plane->cur = nv_fb->nvbo; |
133 | 133 | ||
134 | nv_mask(dev, NV_PCRTC_ENGINE_CTRL + soff, NV_CRTC_FSEL_OVERLAY, NV_CRTC_FSEL_OVERLAY); | 134 | nvif_mask(dev, NV_PCRTC_ENGINE_CTRL + soff, NV_CRTC_FSEL_OVERLAY, NV_CRTC_FSEL_OVERLAY); |
135 | nv_mask(dev, NV_PCRTC_ENGINE_CTRL + soff2, NV_CRTC_FSEL_OVERLAY, 0); | 135 | nvif_mask(dev, NV_PCRTC_ENGINE_CTRL + soff2, NV_CRTC_FSEL_OVERLAY, 0); |
136 | 136 | ||
137 | nv_wr32(dev, NV_PVIDEO_BASE(flip), 0); | 137 | nvif_wr32(dev, NV_PVIDEO_BASE(flip), 0); |
138 | nv_wr32(dev, NV_PVIDEO_OFFSET_BUFF(flip), nv_fb->nvbo->bo.offset); | 138 | nvif_wr32(dev, NV_PVIDEO_OFFSET_BUFF(flip), nv_fb->nvbo->bo.offset); |
139 | nv_wr32(dev, NV_PVIDEO_SIZE_IN(flip), src_h << 16 | src_w); | 139 | nvif_wr32(dev, NV_PVIDEO_SIZE_IN(flip), src_h << 16 | src_w); |
140 | nv_wr32(dev, NV_PVIDEO_POINT_IN(flip), src_y << 16 | src_x); | 140 | nvif_wr32(dev, NV_PVIDEO_POINT_IN(flip), src_y << 16 | src_x); |
141 | nv_wr32(dev, NV_PVIDEO_DS_DX(flip), (src_w << 20) / crtc_w); | 141 | nvif_wr32(dev, NV_PVIDEO_DS_DX(flip), (src_w << 20) / crtc_w); |
142 | nv_wr32(dev, NV_PVIDEO_DT_DY(flip), (src_h << 20) / crtc_h); | 142 | nvif_wr32(dev, NV_PVIDEO_DT_DY(flip), (src_h << 20) / crtc_h); |
143 | nv_wr32(dev, NV_PVIDEO_POINT_OUT(flip), crtc_y << 16 | crtc_x); | 143 | nvif_wr32(dev, NV_PVIDEO_POINT_OUT(flip), crtc_y << 16 | crtc_x); |
144 | nv_wr32(dev, NV_PVIDEO_SIZE_OUT(flip), crtc_h << 16 | crtc_w); | 144 | nvif_wr32(dev, NV_PVIDEO_SIZE_OUT(flip), crtc_h << 16 | crtc_w); |
145 | 145 | ||
146 | if (fb->pixel_format != DRM_FORMAT_UYVY) | 146 | if (fb->pixel_format != DRM_FORMAT_UYVY) |
147 | format |= NV_PVIDEO_FORMAT_COLOR_LE_CR8YB8CB8YA8; | 147 | format |= NV_PVIDEO_FORMAT_COLOR_LE_CR8YB8CB8YA8; |
@@ -153,14 +153,14 @@ nv10_update_plane(struct drm_plane *plane, struct drm_crtc *crtc, | |||
153 | format |= NV_PVIDEO_FORMAT_DISPLAY_COLOR_KEY; | 153 | format |= NV_PVIDEO_FORMAT_DISPLAY_COLOR_KEY; |
154 | 154 | ||
155 | if (fb->pixel_format == DRM_FORMAT_NV12) { | 155 | if (fb->pixel_format == DRM_FORMAT_NV12) { |
156 | nv_wr32(dev, NV_PVIDEO_UVPLANE_BASE(flip), 0); | 156 | nvif_wr32(dev, NV_PVIDEO_UVPLANE_BASE(flip), 0); |
157 | nv_wr32(dev, NV_PVIDEO_UVPLANE_OFFSET_BUFF(flip), | 157 | nvif_wr32(dev, NV_PVIDEO_UVPLANE_OFFSET_BUFF(flip), |
158 | nv_fb->nvbo->bo.offset + fb->offsets[1]); | 158 | nv_fb->nvbo->bo.offset + fb->offsets[1]); |
159 | } | 159 | } |
160 | nv_wr32(dev, NV_PVIDEO_FORMAT(flip), format); | 160 | nvif_wr32(dev, NV_PVIDEO_FORMAT(flip), format); |
161 | nv_wr32(dev, NV_PVIDEO_STOP, 0); | 161 | nvif_wr32(dev, NV_PVIDEO_STOP, 0); |
162 | /* TODO: wait for vblank? */ | 162 | /* TODO: wait for vblank? */ |
163 | nv_wr32(dev, NV_PVIDEO_BUFFER, flip ? 0x10 : 0x1); | 163 | nvif_wr32(dev, NV_PVIDEO_BUFFER, flip ? 0x10 : 0x1); |
164 | nv_plane->flip = !flip; | 164 | nv_plane->flip = !flip; |
165 | 165 | ||
166 | if (cur) | 166 | if (cur) |
@@ -172,10 +172,10 @@ nv10_update_plane(struct drm_plane *plane, struct drm_crtc *crtc, | |||
172 | static int | 172 | static int |
173 | nv10_disable_plane(struct drm_plane *plane) | 173 | nv10_disable_plane(struct drm_plane *plane) |
174 | { | 174 | { |
175 | struct nouveau_device *dev = nouveau_dev(plane->dev); | 175 | struct nvif_device *dev = &nouveau_drm(plane->dev)->device; |
176 | struct nouveau_plane *nv_plane = (struct nouveau_plane *)plane; | 176 | struct nouveau_plane *nv_plane = (struct nouveau_plane *)plane; |
177 | 177 | ||
178 | nv_wr32(dev, NV_PVIDEO_STOP, 1); | 178 | nvif_wr32(dev, NV_PVIDEO_STOP, 1); |
179 | if (nv_plane->cur) { | 179 | if (nv_plane->cur) { |
180 | nouveau_bo_unpin(nv_plane->cur); | 180 | nouveau_bo_unpin(nv_plane->cur); |
181 | nv_plane->cur = NULL; | 181 | nv_plane->cur = NULL; |
@@ -195,24 +195,24 @@ nv_destroy_plane(struct drm_plane *plane) | |||
195 | static void | 195 | static void |
196 | nv10_set_params(struct nouveau_plane *plane) | 196 | nv10_set_params(struct nouveau_plane *plane) |
197 | { | 197 | { |
198 | struct nouveau_device *dev = nouveau_dev(plane->base.dev); | 198 | struct nvif_device *dev = &nouveau_drm(plane->base.dev)->device; |
199 | u32 luma = (plane->brightness - 512) << 16 | plane->contrast; | 199 | u32 luma = (plane->brightness - 512) << 16 | plane->contrast; |
200 | u32 chroma = ((sin_mul(plane->hue, plane->saturation) & 0xffff) << 16) | | 200 | u32 chroma = ((sin_mul(plane->hue, plane->saturation) & 0xffff) << 16) | |
201 | (cos_mul(plane->hue, plane->saturation) & 0xffff); | 201 | (cos_mul(plane->hue, plane->saturation) & 0xffff); |
202 | u32 format = 0; | 202 | u32 format = 0; |
203 | 203 | ||
204 | nv_wr32(dev, NV_PVIDEO_LUMINANCE(0), luma); | 204 | nvif_wr32(dev, NV_PVIDEO_LUMINANCE(0), luma); |
205 | nv_wr32(dev, NV_PVIDEO_LUMINANCE(1), luma); | 205 | nvif_wr32(dev, NV_PVIDEO_LUMINANCE(1), luma); |
206 | nv_wr32(dev, NV_PVIDEO_CHROMINANCE(0), chroma); | 206 | nvif_wr32(dev, NV_PVIDEO_CHROMINANCE(0), chroma); |
207 | nv_wr32(dev, NV_PVIDEO_CHROMINANCE(1), chroma); | 207 | nvif_wr32(dev, NV_PVIDEO_CHROMINANCE(1), chroma); |
208 | nv_wr32(dev, NV_PVIDEO_COLOR_KEY, plane->colorkey & 0xffffff); | 208 | nvif_wr32(dev, NV_PVIDEO_COLOR_KEY, plane->colorkey & 0xffffff); |
209 | 209 | ||
210 | if (plane->cur) { | 210 | if (plane->cur) { |
211 | if (plane->iturbt_709) | 211 | if (plane->iturbt_709) |
212 | format |= NV_PVIDEO_FORMAT_MATRIX_ITURBT709; | 212 | format |= NV_PVIDEO_FORMAT_MATRIX_ITURBT709; |
213 | if (plane->colorkey & (1 << 24)) | 213 | if (plane->colorkey & (1 << 24)) |
214 | format |= NV_PVIDEO_FORMAT_DISPLAY_COLOR_KEY; | 214 | format |= NV_PVIDEO_FORMAT_DISPLAY_COLOR_KEY; |
215 | nv_mask(dev, NV_PVIDEO_FORMAT(plane->flip), | 215 | nvif_mask(dev, NV_PVIDEO_FORMAT(plane->flip), |
216 | NV_PVIDEO_FORMAT_MATRIX_ITURBT709 | | 216 | NV_PVIDEO_FORMAT_MATRIX_ITURBT709 | |
217 | NV_PVIDEO_FORMAT_DISPLAY_COLOR_KEY, | 217 | NV_PVIDEO_FORMAT_DISPLAY_COLOR_KEY, |
218 | format); | 218 | format); |
@@ -256,7 +256,7 @@ static const struct drm_plane_funcs nv10_plane_funcs = { | |||
256 | static void | 256 | static void |
257 | nv10_overlay_init(struct drm_device *device) | 257 | nv10_overlay_init(struct drm_device *device) |
258 | { | 258 | { |
259 | struct nouveau_device *dev = nouveau_dev(device); | 259 | struct nouveau_drm *drm = nouveau_drm(device); |
260 | struct nouveau_plane *plane = kzalloc(sizeof(struct nouveau_plane), GFP_KERNEL); | 260 | struct nouveau_plane *plane = kzalloc(sizeof(struct nouveau_plane), GFP_KERNEL); |
261 | int num_formats = ARRAY_SIZE(formats); | 261 | int num_formats = ARRAY_SIZE(formats); |
262 | int ret; | 262 | int ret; |
@@ -264,7 +264,7 @@ nv10_overlay_init(struct drm_device *device) | |||
264 | if (!plane) | 264 | if (!plane) |
265 | return; | 265 | return; |
266 | 266 | ||
267 | switch (dev->chipset) { | 267 | switch (drm->device.info.chipset) { |
268 | case 0x10: | 268 | case 0x10: |
269 | case 0x11: | 269 | case 0x11: |
270 | case 0x15: | 270 | case 0x15: |
@@ -333,7 +333,7 @@ cleanup: | |||
333 | drm_plane_cleanup(&plane->base); | 333 | drm_plane_cleanup(&plane->base); |
334 | err: | 334 | err: |
335 | kfree(plane); | 335 | kfree(plane); |
336 | nv_error(dev, "Failed to create plane\n"); | 336 | NV_ERROR(drm, "Failed to create plane\n"); |
337 | } | 337 | } |
338 | 338 | ||
339 | static int | 339 | static int |
@@ -343,7 +343,7 @@ nv04_update_plane(struct drm_plane *plane, struct drm_crtc *crtc, | |||
343 | uint32_t src_x, uint32_t src_y, | 343 | uint32_t src_x, uint32_t src_y, |
344 | uint32_t src_w, uint32_t src_h) | 344 | uint32_t src_w, uint32_t src_h) |
345 | { | 345 | { |
346 | struct nouveau_device *dev = nouveau_dev(plane->dev); | 346 | struct nvif_device *dev = &nouveau_drm(plane->dev)->device; |
347 | struct nouveau_plane *nv_plane = (struct nouveau_plane *)plane; | 347 | struct nouveau_plane *nv_plane = (struct nouveau_plane *)plane; |
348 | struct nouveau_framebuffer *nv_fb = nouveau_framebuffer(fb); | 348 | struct nouveau_framebuffer *nv_fb = nouveau_framebuffer(fb); |
349 | struct nouveau_bo *cur = nv_plane->cur; | 349 | struct nouveau_bo *cur = nv_plane->cur; |
@@ -375,43 +375,43 @@ nv04_update_plane(struct drm_plane *plane, struct drm_crtc *crtc, | |||
375 | 375 | ||
376 | nv_plane->cur = nv_fb->nvbo; | 376 | nv_plane->cur = nv_fb->nvbo; |
377 | 377 | ||
378 | nv_wr32(dev, NV_PVIDEO_OE_STATE, 0); | 378 | nvif_wr32(dev, NV_PVIDEO_OE_STATE, 0); |
379 | nv_wr32(dev, NV_PVIDEO_SU_STATE, 0); | 379 | nvif_wr32(dev, NV_PVIDEO_SU_STATE, 0); |
380 | nv_wr32(dev, NV_PVIDEO_RM_STATE, 0); | 380 | nvif_wr32(dev, NV_PVIDEO_RM_STATE, 0); |
381 | 381 | ||
382 | for (i = 0; i < 2; i++) { | 382 | for (i = 0; i < 2; i++) { |
383 | nv_wr32(dev, NV_PVIDEO_BUFF0_START_ADDRESS + 4 * i, | 383 | nvif_wr32(dev, NV_PVIDEO_BUFF0_START_ADDRESS + 4 * i, |
384 | nv_fb->nvbo->bo.offset); | 384 | nv_fb->nvbo->bo.offset); |
385 | nv_wr32(dev, NV_PVIDEO_BUFF0_PITCH_LENGTH + 4 * i, pitch); | 385 | nvif_wr32(dev, NV_PVIDEO_BUFF0_PITCH_LENGTH + 4 * i, pitch); |
386 | nv_wr32(dev, NV_PVIDEO_BUFF0_OFFSET + 4 * i, 0); | 386 | nvif_wr32(dev, NV_PVIDEO_BUFF0_OFFSET + 4 * i, 0); |
387 | } | 387 | } |
388 | nv_wr32(dev, NV_PVIDEO_WINDOW_START, crtc_y << 16 | crtc_x); | 388 | nvif_wr32(dev, NV_PVIDEO_WINDOW_START, crtc_y << 16 | crtc_x); |
389 | nv_wr32(dev, NV_PVIDEO_WINDOW_SIZE, crtc_h << 16 | crtc_w); | 389 | nvif_wr32(dev, NV_PVIDEO_WINDOW_SIZE, crtc_h << 16 | crtc_w); |
390 | nv_wr32(dev, NV_PVIDEO_STEP_SIZE, | 390 | nvif_wr32(dev, NV_PVIDEO_STEP_SIZE, |
391 | (uint32_t)(((src_h - 1) << 11) / (crtc_h - 1)) << 16 | (uint32_t)(((src_w - 1) << 11) / (crtc_w - 1))); | 391 | (uint32_t)(((src_h - 1) << 11) / (crtc_h - 1)) << 16 | (uint32_t)(((src_w - 1) << 11) / (crtc_w - 1))); |
392 | 392 | ||
393 | /* It should be possible to convert hue/contrast to this */ | 393 | /* It should be possible to convert hue/contrast to this */ |
394 | nv_wr32(dev, NV_PVIDEO_RED_CSC_OFFSET, 0x69 - brightness); | 394 | nvif_wr32(dev, NV_PVIDEO_RED_CSC_OFFSET, 0x69 - brightness); |
395 | nv_wr32(dev, NV_PVIDEO_GREEN_CSC_OFFSET, 0x3e + brightness); | 395 | nvif_wr32(dev, NV_PVIDEO_GREEN_CSC_OFFSET, 0x3e + brightness); |
396 | nv_wr32(dev, NV_PVIDEO_BLUE_CSC_OFFSET, 0x89 - brightness); | 396 | nvif_wr32(dev, NV_PVIDEO_BLUE_CSC_OFFSET, 0x89 - brightness); |
397 | nv_wr32(dev, NV_PVIDEO_CSC_ADJUST, 0); | 397 | nvif_wr32(dev, NV_PVIDEO_CSC_ADJUST, 0); |
398 | 398 | ||
399 | nv_wr32(dev, NV_PVIDEO_CONTROL_Y, 0x001); /* (BLUR_ON, LINE_HALF) */ | 399 | nvif_wr32(dev, NV_PVIDEO_CONTROL_Y, 0x001); /* (BLUR_ON, LINE_HALF) */ |
400 | nv_wr32(dev, NV_PVIDEO_CONTROL_X, 0x111); /* (WEIGHT_HEAVY, SHARPENING_ON, SMOOTHING_ON) */ | 400 | nvif_wr32(dev, NV_PVIDEO_CONTROL_X, 0x111); /* (WEIGHT_HEAVY, SHARPENING_ON, SMOOTHING_ON) */ |
401 | 401 | ||
402 | nv_wr32(dev, NV_PVIDEO_FIFO_BURST_LENGTH, 0x03); | 402 | nvif_wr32(dev, NV_PVIDEO_FIFO_BURST_LENGTH, 0x03); |
403 | nv_wr32(dev, NV_PVIDEO_FIFO_THRES_SIZE, 0x38); | 403 | nvif_wr32(dev, NV_PVIDEO_FIFO_THRES_SIZE, 0x38); |
404 | 404 | ||
405 | nv_wr32(dev, NV_PVIDEO_KEY, nv_plane->colorkey); | 405 | nvif_wr32(dev, NV_PVIDEO_KEY, nv_plane->colorkey); |
406 | 406 | ||
407 | if (nv_plane->colorkey & (1 << 24)) | 407 | if (nv_plane->colorkey & (1 << 24)) |
408 | overlay |= 0x10; | 408 | overlay |= 0x10; |
409 | if (fb->pixel_format == DRM_FORMAT_YUYV) | 409 | if (fb->pixel_format == DRM_FORMAT_YUYV) |
410 | overlay |= 0x100; | 410 | overlay |= 0x100; |
411 | 411 | ||
412 | nv_wr32(dev, NV_PVIDEO_OVERLAY, overlay); | 412 | nvif_wr32(dev, NV_PVIDEO_OVERLAY, overlay); |
413 | 413 | ||
414 | nv_wr32(dev, NV_PVIDEO_SU_STATE, nv_rd32(dev, NV_PVIDEO_SU_STATE) ^ (1 << 16)); | 414 | nvif_wr32(dev, NV_PVIDEO_SU_STATE, nvif_rd32(dev, NV_PVIDEO_SU_STATE) ^ (1 << 16)); |
415 | 415 | ||
416 | if (cur) | 416 | if (cur) |
417 | nouveau_bo_unpin(cur); | 417 | nouveau_bo_unpin(cur); |
@@ -422,13 +422,13 @@ nv04_update_plane(struct drm_plane *plane, struct drm_crtc *crtc, | |||
422 | static int | 422 | static int |
423 | nv04_disable_plane(struct drm_plane *plane) | 423 | nv04_disable_plane(struct drm_plane *plane) |
424 | { | 424 | { |
425 | struct nouveau_device *dev = nouveau_dev(plane->dev); | 425 | struct nvif_device *dev = &nouveau_drm(plane->dev)->device; |
426 | struct nouveau_plane *nv_plane = (struct nouveau_plane *)plane; | 426 | struct nouveau_plane *nv_plane = (struct nouveau_plane *)plane; |
427 | 427 | ||
428 | nv_mask(dev, NV_PVIDEO_OVERLAY, 1, 0); | 428 | nvif_mask(dev, NV_PVIDEO_OVERLAY, 1, 0); |
429 | nv_wr32(dev, NV_PVIDEO_OE_STATE, 0); | 429 | nvif_wr32(dev, NV_PVIDEO_OE_STATE, 0); |
430 | nv_wr32(dev, NV_PVIDEO_SU_STATE, 0); | 430 | nvif_wr32(dev, NV_PVIDEO_SU_STATE, 0); |
431 | nv_wr32(dev, NV_PVIDEO_RM_STATE, 0); | 431 | nvif_wr32(dev, NV_PVIDEO_RM_STATE, 0); |
432 | if (nv_plane->cur) { | 432 | if (nv_plane->cur) { |
433 | nouveau_bo_unpin(nv_plane->cur); | 433 | nouveau_bo_unpin(nv_plane->cur); |
434 | nv_plane->cur = NULL; | 434 | nv_plane->cur = NULL; |
@@ -447,7 +447,7 @@ static const struct drm_plane_funcs nv04_plane_funcs = { | |||
447 | static void | 447 | static void |
448 | nv04_overlay_init(struct drm_device *device) | 448 | nv04_overlay_init(struct drm_device *device) |
449 | { | 449 | { |
450 | struct nouveau_device *dev = nouveau_dev(device); | 450 | struct nouveau_drm *drm = nouveau_drm(device); |
451 | struct nouveau_plane *plane = kzalloc(sizeof(struct nouveau_plane), GFP_KERNEL); | 451 | struct nouveau_plane *plane = kzalloc(sizeof(struct nouveau_plane), GFP_KERNEL); |
452 | int ret; | 452 | int ret; |
453 | 453 | ||
@@ -483,15 +483,15 @@ cleanup: | |||
483 | drm_plane_cleanup(&plane->base); | 483 | drm_plane_cleanup(&plane->base); |
484 | err: | 484 | err: |
485 | kfree(plane); | 485 | kfree(plane); |
486 | nv_error(dev, "Failed to create plane\n"); | 486 | NV_ERROR(drm, "Failed to create plane\n"); |
487 | } | 487 | } |
488 | 488 | ||
489 | void | 489 | void |
490 | nouveau_overlay_init(struct drm_device *device) | 490 | nouveau_overlay_init(struct drm_device *device) |
491 | { | 491 | { |
492 | struct nouveau_device *dev = nouveau_dev(device); | 492 | struct nvif_device *dev = &nouveau_drm(device)->device; |
493 | if (dev->chipset < 0x10) | 493 | if (dev->info.chipset < 0x10) |
494 | nv04_overlay_init(device); | 494 | nv04_overlay_init(device); |
495 | else if (dev->chipset <= 0x40) | 495 | else if (dev->info.chipset <= 0x40) |
496 | nv10_overlay_init(device); | 496 | nv10_overlay_init(device); |
497 | } | 497 | } |
diff --git a/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c b/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c index 8667620b703a..8061d8d0ce79 100644 --- a/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c +++ b/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c | |||
@@ -35,8 +35,6 @@ | |||
35 | 35 | ||
36 | #include <drm/i2c/ch7006.h> | 36 | #include <drm/i2c/ch7006.h> |
37 | 37 | ||
38 | #include <subdev/i2c.h> | ||
39 | |||
40 | static struct nouveau_i2c_board_info nv04_tv_encoder_info[] = { | 38 | static struct nouveau_i2c_board_info nv04_tv_encoder_info[] = { |
41 | { | 39 | { |
42 | { | 40 | { |
@@ -56,7 +54,7 @@ static struct nouveau_i2c_board_info nv04_tv_encoder_info[] = { | |||
56 | int nv04_tv_identify(struct drm_device *dev, int i2c_index) | 54 | int nv04_tv_identify(struct drm_device *dev, int i2c_index) |
57 | { | 55 | { |
58 | struct nouveau_drm *drm = nouveau_drm(dev); | 56 | struct nouveau_drm *drm = nouveau_drm(dev); |
59 | struct nouveau_i2c *i2c = nouveau_i2c(drm->device); | 57 | struct nouveau_i2c *i2c = nvkm_i2c(&drm->device); |
60 | 58 | ||
61 | return i2c->identify(i2c, i2c_index, "TV encoder", | 59 | return i2c->identify(i2c, i2c_index, "TV encoder", |
62 | nv04_tv_encoder_info, NULL, NULL); | 60 | nv04_tv_encoder_info, NULL, NULL); |
@@ -206,7 +204,7 @@ nv04_tv_create(struct drm_connector *connector, struct dcb_output *entry) | |||
206 | struct drm_encoder *encoder; | 204 | struct drm_encoder *encoder; |
207 | struct drm_device *dev = connector->dev; | 205 | struct drm_device *dev = connector->dev; |
208 | struct nouveau_drm *drm = nouveau_drm(dev); | 206 | struct nouveau_drm *drm = nouveau_drm(dev); |
209 | struct nouveau_i2c *i2c = nouveau_i2c(drm->device); | 207 | struct nouveau_i2c *i2c = nvkm_i2c(&drm->device); |
210 | struct nouveau_i2c_port *port = i2c->find(i2c, entry->i2c_index); | 208 | struct nouveau_i2c_port *port = i2c->find(i2c, entry->i2c_index); |
211 | int type, ret; | 209 | int type, ret; |
212 | 210 | ||
diff --git a/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c b/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c index 195bd8e86c6a..72d2ab04db47 100644 --- a/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c +++ b/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c | |||
@@ -34,11 +34,6 @@ | |||
34 | #include "hw.h" | 34 | #include "hw.h" |
35 | #include "tvnv17.h" | 35 | #include "tvnv17.h" |
36 | 36 | ||
37 | #include <core/device.h> | ||
38 | |||
39 | #include <subdev/bios/gpio.h> | ||
40 | #include <subdev/gpio.h> | ||
41 | |||
42 | MODULE_PARM_DESC(tv_norm, "Default TV norm.\n" | 37 | MODULE_PARM_DESC(tv_norm, "Default TV norm.\n" |
43 | "\t\tSupported: PAL, PAL-M, PAL-N, PAL-Nc, NTSC-M, NTSC-J,\n" | 38 | "\t\tSupported: PAL, PAL-M, PAL-N, PAL-Nc, NTSC-M, NTSC-J,\n" |
44 | "\t\t\thd480i, hd480p, hd576i, hd576p, hd720p, hd1080i.\n" | 39 | "\t\t\thd480i, hd480p, hd576i, hd576p, hd720p, hd1080i.\n" |
@@ -51,7 +46,7 @@ static uint32_t nv42_tv_sample_load(struct drm_encoder *encoder) | |||
51 | { | 46 | { |
52 | struct drm_device *dev = encoder->dev; | 47 | struct drm_device *dev = encoder->dev; |
53 | struct nouveau_drm *drm = nouveau_drm(dev); | 48 | struct nouveau_drm *drm = nouveau_drm(dev); |
54 | struct nouveau_gpio *gpio = nouveau_gpio(drm->device); | 49 | struct nouveau_gpio *gpio = nvkm_gpio(&drm->device); |
55 | uint32_t testval, regoffset = nv04_dac_output_offset(encoder); | 50 | uint32_t testval, regoffset = nv04_dac_output_offset(encoder); |
56 | uint32_t gpio0, gpio1, fp_htotal, fp_hsync_start, fp_hsync_end, | 51 | uint32_t gpio0, gpio1, fp_htotal, fp_hsync_start, fp_hsync_end, |
57 | fp_control, test_ctrl, dacclk, ctv_14, ctv_1c, ctv_6c; | 52 | fp_control, test_ctrl, dacclk, ctv_14, ctv_1c, ctv_6c; |
@@ -135,17 +130,17 @@ static bool | |||
135 | get_tv_detect_quirks(struct drm_device *dev, uint32_t *pin_mask) | 130 | get_tv_detect_quirks(struct drm_device *dev, uint32_t *pin_mask) |
136 | { | 131 | { |
137 | struct nouveau_drm *drm = nouveau_drm(dev); | 132 | struct nouveau_drm *drm = nouveau_drm(dev); |
138 | struct nouveau_object *device = drm->device; | 133 | struct nvif_device *device = &drm->device; |
139 | 134 | ||
140 | /* Zotac FX5200 */ | 135 | /* Zotac FX5200 */ |
141 | if (nv_device_match(device, 0x0322, 0x19da, 0x1035) || | 136 | if (nv_device_match(nvkm_object(device), 0x0322, 0x19da, 0x1035) || |
142 | nv_device_match(device, 0x0322, 0x19da, 0x2035)) { | 137 | nv_device_match(nvkm_object(device), 0x0322, 0x19da, 0x2035)) { |
143 | *pin_mask = 0xc; | 138 | *pin_mask = 0xc; |
144 | return false; | 139 | return false; |
145 | } | 140 | } |
146 | 141 | ||
147 | /* MSI nForce2 IGP */ | 142 | /* MSI nForce2 IGP */ |
148 | if (nv_device_match(device, 0x01f0, 0x1462, 0x5710)) { | 143 | if (nv_device_match(nvkm_object(device), 0x01f0, 0x1462, 0x5710)) { |
149 | *pin_mask = 0xc; | 144 | *pin_mask = 0xc; |
150 | return false; | 145 | return false; |
151 | } | 146 | } |
@@ -167,8 +162,8 @@ nv17_tv_detect(struct drm_encoder *encoder, struct drm_connector *connector) | |||
167 | return connector_status_disconnected; | 162 | return connector_status_disconnected; |
168 | 163 | ||
169 | if (reliable) { | 164 | if (reliable) { |
170 | if (nv_device(drm->device)->chipset == 0x42 || | 165 | if (drm->device.info.chipset == 0x42 || |
171 | nv_device(drm->device)->chipset == 0x43) | 166 | drm->device.info.chipset == 0x43) |
172 | tv_enc->pin_mask = | 167 | tv_enc->pin_mask = |
173 | nv42_tv_sample_load(encoder) >> 28 & 0xe; | 168 | nv42_tv_sample_load(encoder) >> 28 & 0xe; |
174 | else | 169 | else |
@@ -375,7 +370,7 @@ static void nv17_tv_dpms(struct drm_encoder *encoder, int mode) | |||
375 | { | 370 | { |
376 | struct drm_device *dev = encoder->dev; | 371 | struct drm_device *dev = encoder->dev; |
377 | struct nouveau_drm *drm = nouveau_drm(dev); | 372 | struct nouveau_drm *drm = nouveau_drm(dev); |
378 | struct nouveau_gpio *gpio = nouveau_gpio(drm->device); | 373 | struct nouveau_gpio *gpio = nvkm_gpio(&drm->device); |
379 | struct nv17_tv_state *regs = &to_tv_enc(encoder)->state; | 374 | struct nv17_tv_state *regs = &to_tv_enc(encoder)->state; |
380 | struct nv17_tv_norm_params *tv_norm = get_tv_norm(encoder); | 375 | struct nv17_tv_norm_params *tv_norm = get_tv_norm(encoder); |
381 | 376 | ||
@@ -448,7 +443,7 @@ static void nv17_tv_prepare(struct drm_encoder *encoder) | |||
448 | /* Set the DACCLK register */ | 443 | /* Set the DACCLK register */ |
449 | dacclk = (NVReadRAMDAC(dev, 0, dacclk_off) & ~0x30) | 0x1; | 444 | dacclk = (NVReadRAMDAC(dev, 0, dacclk_off) & ~0x30) | 0x1; |
450 | 445 | ||
451 | if (nv_device(drm->device)->card_type == NV_40) | 446 | if (drm->device.info.family == NV_DEVICE_INFO_V0_CURIE) |
452 | dacclk |= 0x1a << 16; | 447 | dacclk |= 0x1a << 16; |
453 | 448 | ||
454 | if (tv_norm->kind == CTV_ENC_MODE) { | 449 | if (tv_norm->kind == CTV_ENC_MODE) { |
@@ -505,7 +500,7 @@ static void nv17_tv_mode_set(struct drm_encoder *encoder, | |||
505 | tv_regs->ptv_614 = 0x13; | 500 | tv_regs->ptv_614 = 0x13; |
506 | } | 501 | } |
507 | 502 | ||
508 | if (nv_device(drm->device)->card_type >= NV_30) { | 503 | if (drm->device.info.family >= NV_DEVICE_INFO_V0_RANKINE) { |
509 | tv_regs->ptv_500 = 0xe8e0; | 504 | tv_regs->ptv_500 = 0xe8e0; |
510 | tv_regs->ptv_504 = 0x1710; | 505 | tv_regs->ptv_504 = 0x1710; |
511 | tv_regs->ptv_604 = 0x0; | 506 | tv_regs->ptv_604 = 0x0; |
@@ -600,7 +595,7 @@ static void nv17_tv_commit(struct drm_encoder *encoder) | |||
600 | nv17_tv_state_load(dev, &to_tv_enc(encoder)->state); | 595 | nv17_tv_state_load(dev, &to_tv_enc(encoder)->state); |
601 | 596 | ||
602 | /* This could use refinement for flatpanels, but it should work */ | 597 | /* This could use refinement for flatpanels, but it should work */ |
603 | if (nv_device(drm->device)->chipset < 0x44) | 598 | if (drm->device.info.chipset < 0x44) |
604 | NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + | 599 | NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + |
605 | nv04_dac_output_offset(encoder), | 600 | nv04_dac_output_offset(encoder), |
606 | 0xf0000000); | 601 | 0xf0000000); |
diff --git a/drivers/gpu/drm/nouveau/dispnv04/tvnv17.h b/drivers/gpu/drm/nouveau/dispnv04/tvnv17.h index 7b331543a41b..225894cdcac2 100644 --- a/drivers/gpu/drm/nouveau/dispnv04/tvnv17.h +++ b/drivers/gpu/drm/nouveau/dispnv04/tvnv17.h | |||
@@ -130,14 +130,14 @@ void nv17_ctv_update_rescaler(struct drm_encoder *encoder); | |||
130 | static inline void nv_write_ptv(struct drm_device *dev, uint32_t reg, | 130 | static inline void nv_write_ptv(struct drm_device *dev, uint32_t reg, |
131 | uint32_t val) | 131 | uint32_t val) |
132 | { | 132 | { |
133 | struct nouveau_device *device = nouveau_dev(dev); | 133 | struct nvif_device *device = &nouveau_drm(dev)->device; |
134 | nv_wr32(device, reg, val); | 134 | nvif_wr32(device, reg, val); |
135 | } | 135 | } |
136 | 136 | ||
137 | static inline uint32_t nv_read_ptv(struct drm_device *dev, uint32_t reg) | 137 | static inline uint32_t nv_read_ptv(struct drm_device *dev, uint32_t reg) |
138 | { | 138 | { |
139 | struct nouveau_device *device = nouveau_dev(dev); | 139 | struct nvif_device *device = &nouveau_drm(dev)->device; |
140 | return nv_rd32(device, reg); | 140 | return nvif_rd32(device, reg); |
141 | } | 141 | } |
142 | 142 | ||
143 | static inline void nv_write_tv_enc(struct drm_device *dev, uint8_t reg, | 143 | static inline void nv_write_tv_enc(struct drm_device *dev, uint8_t reg, |
diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.c b/drivers/gpu/drm/nouveau/nouveau_abi16.c index b13f441c6431..615714c1727d 100644 --- a/drivers/gpu/drm/nouveau/nouveau_abi16.c +++ b/drivers/gpu/drm/nouveau/nouveau_abi16.c | |||
@@ -21,16 +21,10 @@ | |||
21 | * | 21 | * |
22 | */ | 22 | */ |
23 | 23 | ||
24 | #include <core/object.h> | 24 | #include <nvif/client.h> |
25 | #include <core/client.h> | 25 | #include <nvif/driver.h> |
26 | #include <core/device.h> | 26 | #include <nvif/ioctl.h> |
27 | #include <core/class.h> | 27 | #include <nvif/class.h> |
28 | #include <core/mm.h> | ||
29 | |||
30 | #include <subdev/fb.h> | ||
31 | #include <subdev/timer.h> | ||
32 | #include <subdev/instmem.h> | ||
33 | #include <engine/graph.h> | ||
34 | 28 | ||
35 | #include "nouveau_drm.h" | 29 | #include "nouveau_drm.h" |
36 | #include "nouveau_dma.h" | 30 | #include "nouveau_dma.h" |
@@ -47,20 +41,20 @@ nouveau_abi16_get(struct drm_file *file_priv, struct drm_device *dev) | |||
47 | struct nouveau_abi16 *abi16; | 41 | struct nouveau_abi16 *abi16; |
48 | cli->abi16 = abi16 = kzalloc(sizeof(*abi16), GFP_KERNEL); | 42 | cli->abi16 = abi16 = kzalloc(sizeof(*abi16), GFP_KERNEL); |
49 | if (cli->abi16) { | 43 | if (cli->abi16) { |
44 | struct nv_device_v0 args = { | ||
45 | .device = ~0ULL, | ||
46 | }; | ||
47 | |||
50 | INIT_LIST_HEAD(&abi16->channels); | 48 | INIT_LIST_HEAD(&abi16->channels); |
51 | abi16->client = nv_object(cli); | ||
52 | 49 | ||
53 | /* allocate device object targeting client's default | 50 | /* allocate device object targeting client's default |
54 | * device (ie. the one that belongs to the fd it | 51 | * device (ie. the one that belongs to the fd it |
55 | * opened) | 52 | * opened) |
56 | */ | 53 | */ |
57 | if (nouveau_object_new(abi16->client, NVDRM_CLIENT, | 54 | if (nvif_device_init(&cli->base.base, NULL, |
58 | NVDRM_DEVICE, 0x0080, | 55 | NOUVEAU_ABI16_DEVICE, NV_DEVICE, |
59 | &(struct nv_device_class) { | 56 | &args, sizeof(args), |
60 | .device = ~0ULL, | 57 | &abi16->device) == 0) |
61 | }, | ||
62 | sizeof(struct nv_device_class), | ||
63 | &abi16->device) == 0) | ||
64 | return cli->abi16; | 58 | return cli->abi16; |
65 | 59 | ||
66 | kfree(cli->abi16); | 60 | kfree(cli->abi16); |
@@ -75,7 +69,7 @@ nouveau_abi16_get(struct drm_file *file_priv, struct drm_device *dev) | |||
75 | int | 69 | int |
76 | nouveau_abi16_put(struct nouveau_abi16 *abi16, int ret) | 70 | nouveau_abi16_put(struct nouveau_abi16 *abi16, int ret) |
77 | { | 71 | { |
78 | struct nouveau_cli *cli = (void *)abi16->client; | 72 | struct nouveau_cli *cli = (void *)nvif_client(&abi16->device.base); |
79 | mutex_unlock(&cli->mutex); | 73 | mutex_unlock(&cli->mutex); |
80 | return ret; | 74 | return ret; |
81 | } | 75 | } |
@@ -83,21 +77,19 @@ nouveau_abi16_put(struct nouveau_abi16 *abi16, int ret) | |||
83 | u16 | 77 | u16 |
84 | nouveau_abi16_swclass(struct nouveau_drm *drm) | 78 | nouveau_abi16_swclass(struct nouveau_drm *drm) |
85 | { | 79 | { |
86 | switch (nv_device(drm->device)->card_type) { | 80 | switch (drm->device.info.family) { |
87 | case NV_04: | 81 | case NV_DEVICE_INFO_V0_TNT: |
88 | return 0x006e; | 82 | return 0x006e; |
89 | case NV_10: | 83 | case NV_DEVICE_INFO_V0_CELSIUS: |
90 | case NV_11: | 84 | case NV_DEVICE_INFO_V0_KELVIN: |
91 | case NV_20: | 85 | case NV_DEVICE_INFO_V0_RANKINE: |
92 | case NV_30: | 86 | case NV_DEVICE_INFO_V0_CURIE: |
93 | case NV_40: | ||
94 | return 0x016e; | 87 | return 0x016e; |
95 | case NV_50: | 88 | case NV_DEVICE_INFO_V0_TESLA: |
96 | return 0x506e; | 89 | return 0x506e; |
97 | case NV_C0: | 90 | case NV_DEVICE_INFO_V0_FERMI: |
98 | case NV_D0: | 91 | case NV_DEVICE_INFO_V0_KEPLER: |
99 | case NV_E0: | 92 | case NV_DEVICE_INFO_V0_MAXWELL: |
100 | case GM100: | ||
101 | return 0x906e; | 93 | return 0x906e; |
102 | } | 94 | } |
103 | 95 | ||
@@ -140,7 +132,7 @@ nouveau_abi16_chan_fini(struct nouveau_abi16 *abi16, | |||
140 | 132 | ||
141 | /* destroy channel object, all children will be killed too */ | 133 | /* destroy channel object, all children will be killed too */ |
142 | if (chan->chan) { | 134 | if (chan->chan) { |
143 | abi16->handles &= ~(1ULL << (chan->chan->handle & 0xffff)); | 135 | abi16->handles &= ~(1ULL << (chan->chan->object->handle & 0xffff)); |
144 | nouveau_channel_del(&chan->chan); | 136 | nouveau_channel_del(&chan->chan); |
145 | } | 137 | } |
146 | 138 | ||
@@ -151,7 +143,7 @@ nouveau_abi16_chan_fini(struct nouveau_abi16 *abi16, | |||
151 | void | 143 | void |
152 | nouveau_abi16_fini(struct nouveau_abi16 *abi16) | 144 | nouveau_abi16_fini(struct nouveau_abi16 *abi16) |
153 | { | 145 | { |
154 | struct nouveau_cli *cli = (void *)abi16->client; | 146 | struct nouveau_cli *cli = (void *)nvif_client(&abi16->device.base); |
155 | struct nouveau_abi16_chan *chan, *temp; | 147 | struct nouveau_abi16_chan *chan, *temp; |
156 | 148 | ||
157 | /* cleanup channels */ | 149 | /* cleanup channels */ |
@@ -160,7 +152,7 @@ nouveau_abi16_fini(struct nouveau_abi16 *abi16) | |||
160 | } | 152 | } |
161 | 153 | ||
162 | /* destroy the device object */ | 154 | /* destroy the device object */ |
163 | nouveau_object_del(abi16->client, NVDRM_CLIENT, NVDRM_DEVICE); | 155 | nvif_device_fini(&abi16->device); |
164 | 156 | ||
165 | kfree(cli->abi16); | 157 | kfree(cli->abi16); |
166 | cli->abi16 = NULL; | 158 | cli->abi16 = NULL; |
@@ -169,30 +161,31 @@ nouveau_abi16_fini(struct nouveau_abi16 *abi16) | |||
169 | int | 161 | int |
170 | nouveau_abi16_ioctl_getparam(ABI16_IOCTL_ARGS) | 162 | nouveau_abi16_ioctl_getparam(ABI16_IOCTL_ARGS) |
171 | { | 163 | { |
164 | struct nouveau_cli *cli = nouveau_cli(file_priv); | ||
172 | struct nouveau_drm *drm = nouveau_drm(dev); | 165 | struct nouveau_drm *drm = nouveau_drm(dev); |
173 | struct nouveau_device *device = nv_device(drm->device); | 166 | struct nvif_device *device = &drm->device; |
174 | struct nouveau_timer *ptimer = nouveau_timer(device); | 167 | struct nouveau_timer *ptimer = nvkm_timer(device); |
175 | struct nouveau_graph *graph = (void *)nouveau_engine(device, NVDEV_ENGINE_GR); | 168 | struct nouveau_graph *graph = nvkm_gr(device); |
176 | struct drm_nouveau_getparam *getparam = data; | 169 | struct drm_nouveau_getparam *getparam = data; |
177 | 170 | ||
178 | switch (getparam->param) { | 171 | switch (getparam->param) { |
179 | case NOUVEAU_GETPARAM_CHIPSET_ID: | 172 | case NOUVEAU_GETPARAM_CHIPSET_ID: |
180 | getparam->value = device->chipset; | 173 | getparam->value = device->info.chipset; |
181 | break; | 174 | break; |
182 | case NOUVEAU_GETPARAM_PCI_VENDOR: | 175 | case NOUVEAU_GETPARAM_PCI_VENDOR: |
183 | if (nv_device_is_pci(device)) | 176 | if (nv_device_is_pci(nvkm_device(device))) |
184 | getparam->value = dev->pdev->vendor; | 177 | getparam->value = dev->pdev->vendor; |
185 | else | 178 | else |
186 | getparam->value = 0; | 179 | getparam->value = 0; |
187 | break; | 180 | break; |
188 | case NOUVEAU_GETPARAM_PCI_DEVICE: | 181 | case NOUVEAU_GETPARAM_PCI_DEVICE: |
189 | if (nv_device_is_pci(device)) | 182 | if (nv_device_is_pci(nvkm_device(device))) |
190 | getparam->value = dev->pdev->device; | 183 | getparam->value = dev->pdev->device; |
191 | else | 184 | else |
192 | getparam->value = 0; | 185 | getparam->value = 0; |
193 | break; | 186 | break; |
194 | case NOUVEAU_GETPARAM_BUS_TYPE: | 187 | case NOUVEAU_GETPARAM_BUS_TYPE: |
195 | if (!nv_device_is_pci(device)) | 188 | if (!nv_device_is_pci(nvkm_device(device))) |
196 | getparam->value = 3; | 189 | getparam->value = 3; |
197 | else | 190 | else |
198 | if (drm_pci_device_is_agp(dev)) | 191 | if (drm_pci_device_is_agp(dev)) |
@@ -225,7 +218,7 @@ nouveau_abi16_ioctl_getparam(ABI16_IOCTL_ARGS) | |||
225 | getparam->value = graph->units ? graph->units(graph) : 0; | 218 | getparam->value = graph->units ? graph->units(graph) : 0; |
226 | break; | 219 | break; |
227 | default: | 220 | default: |
228 | nv_debug(device, "unknown parameter %lld\n", getparam->param); | 221 | NV_PRINTK(debug, cli, "unknown parameter %lld\n", getparam->param); |
229 | return -EINVAL; | 222 | return -EINVAL; |
230 | } | 223 | } |
231 | 224 | ||
@@ -246,10 +239,7 @@ nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS) | |||
246 | struct nouveau_drm *drm = nouveau_drm(dev); | 239 | struct nouveau_drm *drm = nouveau_drm(dev); |
247 | struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv, dev); | 240 | struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv, dev); |
248 | struct nouveau_abi16_chan *chan; | 241 | struct nouveau_abi16_chan *chan; |
249 | struct nouveau_client *client; | 242 | struct nvif_device *device; |
250 | struct nouveau_device *device; | ||
251 | struct nouveau_instmem *imem; | ||
252 | struct nouveau_fb *pfb; | ||
253 | int ret; | 243 | int ret; |
254 | 244 | ||
255 | if (unlikely(!abi16)) | 245 | if (unlikely(!abi16)) |
@@ -258,21 +248,18 @@ nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS) | |||
258 | if (!drm->channel) | 248 | if (!drm->channel) |
259 | return nouveau_abi16_put(abi16, -ENODEV); | 249 | return nouveau_abi16_put(abi16, -ENODEV); |
260 | 250 | ||
261 | client = nv_client(abi16->client); | 251 | device = &abi16->device; |
262 | device = nv_device(abi16->device); | ||
263 | imem = nouveau_instmem(device); | ||
264 | pfb = nouveau_fb(device); | ||
265 | 252 | ||
266 | /* hack to allow channel engine type specification on kepler */ | 253 | /* hack to allow channel engine type specification on kepler */ |
267 | if (device->card_type >= NV_E0) { | 254 | if (device->info.family >= NV_DEVICE_INFO_V0_KEPLER) { |
268 | if (init->fb_ctxdma_handle != ~0) | 255 | if (init->fb_ctxdma_handle != ~0) |
269 | init->fb_ctxdma_handle = NVE0_CHANNEL_IND_ENGINE_GR; | 256 | init->fb_ctxdma_handle = KEPLER_CHANNEL_GPFIFO_A_V0_ENGINE_GR; |
270 | else | 257 | else |
271 | init->fb_ctxdma_handle = init->tt_ctxdma_handle; | 258 | init->fb_ctxdma_handle = init->tt_ctxdma_handle; |
272 | 259 | ||
273 | /* allow flips to be executed if this is a graphics channel */ | 260 | /* allow flips to be executed if this is a graphics channel */ |
274 | init->tt_ctxdma_handle = 0; | 261 | init->tt_ctxdma_handle = 0; |
275 | if (init->fb_ctxdma_handle == NVE0_CHANNEL_IND_ENGINE_GR) | 262 | if (init->fb_ctxdma_handle == KEPLER_CHANNEL_GPFIFO_A_V0_ENGINE_GR) |
276 | init->tt_ctxdma_handle = 1; | 263 | init->tt_ctxdma_handle = 1; |
277 | } | 264 | } |
278 | 265 | ||
@@ -293,13 +280,14 @@ nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS) | |||
293 | abi16->handles |= (1ULL << init->channel); | 280 | abi16->handles |= (1ULL << init->channel); |
294 | 281 | ||
295 | /* create channel object and initialise dma and fence management */ | 282 | /* create channel object and initialise dma and fence management */ |
296 | ret = nouveau_channel_new(drm, cli, NVDRM_DEVICE, NVDRM_CHAN | | 283 | ret = nouveau_channel_new(drm, device, |
297 | init->channel, init->fb_ctxdma_handle, | 284 | NOUVEAU_ABI16_CHAN(init->channel), |
285 | init->fb_ctxdma_handle, | ||
298 | init->tt_ctxdma_handle, &chan->chan); | 286 | init->tt_ctxdma_handle, &chan->chan); |
299 | if (ret) | 287 | if (ret) |
300 | goto done; | 288 | goto done; |
301 | 289 | ||
302 | if (device->card_type >= NV_50) | 290 | if (device->info.family >= NV_DEVICE_INFO_V0_TESLA) |
303 | init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_VRAM | | 291 | init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_VRAM | |
304 | NOUVEAU_GEM_DOMAIN_GART; | 292 | NOUVEAU_GEM_DOMAIN_GART; |
305 | else | 293 | else |
@@ -308,10 +296,10 @@ nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS) | |||
308 | else | 296 | else |
309 | init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_GART; | 297 | init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_GART; |
310 | 298 | ||
311 | if (device->card_type < NV_10) { | 299 | if (device->info.family < NV_DEVICE_INFO_V0_CELSIUS) { |
312 | init->subchan[0].handle = 0x00000000; | 300 | init->subchan[0].handle = 0x00000000; |
313 | init->subchan[0].grclass = 0x0000; | 301 | init->subchan[0].grclass = 0x0000; |
314 | init->subchan[1].handle = NvSw; | 302 | init->subchan[1].handle = chan->chan->nvsw.handle; |
315 | init->subchan[1].grclass = 0x506e; | 303 | init->subchan[1].grclass = 0x506e; |
316 | init->nr_subchan = 2; | 304 | init->nr_subchan = 2; |
317 | } | 305 | } |
@@ -324,8 +312,8 @@ nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS) | |||
324 | if (ret) | 312 | if (ret) |
325 | goto done; | 313 | goto done; |
326 | 314 | ||
327 | if (device->card_type >= NV_50) { | 315 | if (device->info.family >= NV_DEVICE_INFO_V0_TESLA) { |
328 | ret = nouveau_bo_vma_add(chan->ntfy, client->vm, | 316 | ret = nouveau_bo_vma_add(chan->ntfy, cli->vm, |
329 | &chan->ntfy_vma); | 317 | &chan->ntfy_vma); |
330 | if (ret) | 318 | if (ret) |
331 | goto done; | 319 | goto done; |
@@ -343,6 +331,18 @@ done: | |||
343 | return nouveau_abi16_put(abi16, ret); | 331 | return nouveau_abi16_put(abi16, ret); |
344 | } | 332 | } |
345 | 333 | ||
334 | static struct nouveau_abi16_chan * | ||
335 | nouveau_abi16_chan(struct nouveau_abi16 *abi16, int channel) | ||
336 | { | ||
337 | struct nouveau_abi16_chan *chan; | ||
338 | |||
339 | list_for_each_entry(chan, &abi16->channels, head) { | ||
340 | if (chan->chan->object->handle == NOUVEAU_ABI16_CHAN(channel)) | ||
341 | return chan; | ||
342 | } | ||
343 | |||
344 | return NULL; | ||
345 | } | ||
346 | 346 | ||
347 | int | 347 | int |
348 | nouveau_abi16_ioctl_channel_free(ABI16_IOCTL_ARGS) | 348 | nouveau_abi16_ioctl_channel_free(ABI16_IOCTL_ARGS) |
@@ -350,28 +350,38 @@ nouveau_abi16_ioctl_channel_free(ABI16_IOCTL_ARGS) | |||
350 | struct drm_nouveau_channel_free *req = data; | 350 | struct drm_nouveau_channel_free *req = data; |
351 | struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv, dev); | 351 | struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv, dev); |
352 | struct nouveau_abi16_chan *chan; | 352 | struct nouveau_abi16_chan *chan; |
353 | int ret = -ENOENT; | ||
354 | 353 | ||
355 | if (unlikely(!abi16)) | 354 | if (unlikely(!abi16)) |
356 | return -ENOMEM; | 355 | return -ENOMEM; |
357 | 356 | ||
358 | list_for_each_entry(chan, &abi16->channels, head) { | 357 | chan = nouveau_abi16_chan(abi16, req->channel); |
359 | if (chan->chan->handle == (NVDRM_CHAN | req->channel)) { | 358 | if (!chan) |
360 | nouveau_abi16_chan_fini(abi16, chan); | 359 | return nouveau_abi16_put(abi16, -ENOENT); |
361 | return nouveau_abi16_put(abi16, 0); | 360 | nouveau_abi16_chan_fini(abi16, chan); |
362 | } | 361 | return nouveau_abi16_put(abi16, 0); |
363 | } | ||
364 | |||
365 | return nouveau_abi16_put(abi16, ret); | ||
366 | } | 362 | } |
367 | 363 | ||
368 | int | 364 | int |
369 | nouveau_abi16_ioctl_grobj_alloc(ABI16_IOCTL_ARGS) | 365 | nouveau_abi16_ioctl_grobj_alloc(ABI16_IOCTL_ARGS) |
370 | { | 366 | { |
371 | struct drm_nouveau_grobj_alloc *init = data; | 367 | struct drm_nouveau_grobj_alloc *init = data; |
368 | struct { | ||
369 | struct nvif_ioctl_v0 ioctl; | ||
370 | struct nvif_ioctl_new_v0 new; | ||
371 | } args = { | ||
372 | .ioctl.owner = NVIF_IOCTL_V0_OWNER_ANY, | ||
373 | .ioctl.type = NVIF_IOCTL_V0_NEW, | ||
374 | .ioctl.path_nr = 3, | ||
375 | .ioctl.path[2] = NOUVEAU_ABI16_CLIENT, | ||
376 | .ioctl.path[1] = NOUVEAU_ABI16_DEVICE, | ||
377 | .ioctl.path[0] = NOUVEAU_ABI16_CHAN(init->channel), | ||
378 | .new.route = NVDRM_OBJECT_ABI16, | ||
379 | .new.handle = init->handle, | ||
380 | .new.oclass = init->class, | ||
381 | }; | ||
372 | struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv, dev); | 382 | struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv, dev); |
373 | struct nouveau_drm *drm = nouveau_drm(dev); | 383 | struct nouveau_drm *drm = nouveau_drm(dev); |
374 | struct nouveau_object *object; | 384 | struct nvif_client *client; |
375 | int ret; | 385 | int ret; |
376 | 386 | ||
377 | if (unlikely(!abi16)) | 387 | if (unlikely(!abi16)) |
@@ -379,6 +389,7 @@ nouveau_abi16_ioctl_grobj_alloc(ABI16_IOCTL_ARGS) | |||
379 | 389 | ||
380 | if (init->handle == ~0) | 390 | if (init->handle == ~0) |
381 | return nouveau_abi16_put(abi16, -EINVAL); | 391 | return nouveau_abi16_put(abi16, -EINVAL); |
392 | client = nvif_client(nvif_object(&abi16->device)); | ||
382 | 393 | ||
383 | /* compatibility with userspace that assumes 506e for all chipsets */ | 394 | /* compatibility with userspace that assumes 506e for all chipsets */ |
384 | if (init->class == 0x506e) { | 395 | if (init->class == 0x506e) { |
@@ -387,8 +398,7 @@ nouveau_abi16_ioctl_grobj_alloc(ABI16_IOCTL_ARGS) | |||
387 | return nouveau_abi16_put(abi16, 0); | 398 | return nouveau_abi16_put(abi16, 0); |
388 | } | 399 | } |
389 | 400 | ||
390 | ret = nouveau_object_new(abi16->client, NVDRM_CHAN | init->channel, | 401 | ret = nvif_client_ioctl(client, &args, sizeof(args)); |
391 | init->handle, init->class, NULL, 0, &object); | ||
392 | return nouveau_abi16_put(abi16, ret); | 402 | return nouveau_abi16_put(abi16, ret); |
393 | } | 403 | } |
394 | 404 | ||
@@ -396,29 +406,38 @@ int | |||
396 | nouveau_abi16_ioctl_notifierobj_alloc(ABI16_IOCTL_ARGS) | 406 | nouveau_abi16_ioctl_notifierobj_alloc(ABI16_IOCTL_ARGS) |
397 | { | 407 | { |
398 | struct drm_nouveau_notifierobj_alloc *info = data; | 408 | struct drm_nouveau_notifierobj_alloc *info = data; |
409 | struct { | ||
410 | struct nvif_ioctl_v0 ioctl; | ||
411 | struct nvif_ioctl_new_v0 new; | ||
412 | struct nv_dma_v0 ctxdma; | ||
413 | } args = { | ||
414 | .ioctl.owner = NVIF_IOCTL_V0_OWNER_ANY, | ||
415 | .ioctl.type = NVIF_IOCTL_V0_NEW, | ||
416 | .ioctl.path_nr = 3, | ||
417 | .ioctl.path[2] = NOUVEAU_ABI16_CLIENT, | ||
418 | .ioctl.path[1] = NOUVEAU_ABI16_DEVICE, | ||
419 | .ioctl.path[0] = NOUVEAU_ABI16_CHAN(info->channel), | ||
420 | .new.route = NVDRM_OBJECT_ABI16, | ||
421 | .new.handle = info->handle, | ||
422 | .new.oclass = NV_DMA_IN_MEMORY, | ||
423 | }; | ||
399 | struct nouveau_drm *drm = nouveau_drm(dev); | 424 | struct nouveau_drm *drm = nouveau_drm(dev); |
400 | struct nouveau_device *device = nv_device(drm->device); | ||
401 | struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv, dev); | 425 | struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv, dev); |
402 | struct nouveau_abi16_chan *chan = NULL, *temp; | 426 | struct nouveau_abi16_chan *chan; |
403 | struct nouveau_abi16_ntfy *ntfy; | 427 | struct nouveau_abi16_ntfy *ntfy; |
404 | struct nouveau_object *object; | 428 | struct nvif_device *device = &abi16->device; |
405 | struct nv_dma_class args = {}; | 429 | struct nvif_client *client; |
406 | int ret; | 430 | int ret; |
407 | 431 | ||
408 | if (unlikely(!abi16)) | 432 | if (unlikely(!abi16)) |
409 | return -ENOMEM; | 433 | return -ENOMEM; |
410 | 434 | ||
411 | /* completely unnecessary for these chipsets... */ | 435 | /* completely unnecessary for these chipsets... */ |
412 | if (unlikely(nv_device(abi16->device)->card_type >= NV_C0)) | 436 | if (unlikely(device->info.family >= NV_DEVICE_INFO_V0_FERMI)) |
413 | return nouveau_abi16_put(abi16, -EINVAL); | 437 | return nouveau_abi16_put(abi16, -EINVAL); |
438 | client = nvif_client(nvif_object(&abi16->device)); | ||
414 | 439 | ||
415 | list_for_each_entry(temp, &abi16->channels, head) { | 440 | chan = nouveau_abi16_chan(abi16, info->channel); |
416 | if (temp->chan->handle == (NVDRM_CHAN | info->channel)) { | ||
417 | chan = temp; | ||
418 | break; | ||
419 | } | ||
420 | } | ||
421 | |||
422 | if (!chan) | 441 | if (!chan) |
423 | return nouveau_abi16_put(abi16, -ENOENT); | 442 | return nouveau_abi16_put(abi16, -ENOENT); |
424 | 443 | ||
@@ -434,26 +453,29 @@ nouveau_abi16_ioctl_notifierobj_alloc(ABI16_IOCTL_ARGS) | |||
434 | if (ret) | 453 | if (ret) |
435 | goto done; | 454 | goto done; |
436 | 455 | ||
437 | args.start = ntfy->node->offset; | 456 | args.ctxdma.start = ntfy->node->offset; |
438 | args.limit = ntfy->node->offset + ntfy->node->length - 1; | 457 | args.ctxdma.limit = ntfy->node->offset + ntfy->node->length - 1; |
439 | if (device->card_type >= NV_50) { | 458 | if (device->info.family >= NV_DEVICE_INFO_V0_TESLA) { |
440 | args.flags = NV_DMA_TARGET_VM | NV_DMA_ACCESS_VM; | 459 | args.ctxdma.target = NV_DMA_V0_TARGET_VM; |
441 | args.start += chan->ntfy_vma.offset; | 460 | args.ctxdma.access = NV_DMA_V0_ACCESS_VM; |
442 | args.limit += chan->ntfy_vma.offset; | 461 | args.ctxdma.start += chan->ntfy_vma.offset; |
462 | args.ctxdma.limit += chan->ntfy_vma.offset; | ||
443 | } else | 463 | } else |
444 | if (drm->agp.stat == ENABLED) { | 464 | if (drm->agp.stat == ENABLED) { |
445 | args.flags = NV_DMA_TARGET_AGP | NV_DMA_ACCESS_RDWR; | 465 | args.ctxdma.target = NV_DMA_V0_TARGET_AGP; |
446 | args.start += drm->agp.base + chan->ntfy->bo.offset; | 466 | args.ctxdma.access = NV_DMA_V0_ACCESS_RDWR; |
447 | args.limit += drm->agp.base + chan->ntfy->bo.offset; | 467 | args.ctxdma.start += drm->agp.base + chan->ntfy->bo.offset; |
468 | args.ctxdma.limit += drm->agp.base + chan->ntfy->bo.offset; | ||
469 | client->super = true; | ||
448 | } else { | 470 | } else { |
449 | args.flags = NV_DMA_TARGET_VM | NV_DMA_ACCESS_RDWR; | 471 | args.ctxdma.target = NV_DMA_V0_TARGET_VM; |
450 | args.start += chan->ntfy->bo.offset; | 472 | args.ctxdma.access = NV_DMA_V0_ACCESS_RDWR; |
451 | args.limit += chan->ntfy->bo.offset; | 473 | args.ctxdma.start += chan->ntfy->bo.offset; |
474 | args.ctxdma.limit += chan->ntfy->bo.offset; | ||
452 | } | 475 | } |
453 | 476 | ||
454 | ret = nouveau_object_new(abi16->client, chan->chan->handle, | 477 | ret = nvif_client_ioctl(client, &args, sizeof(args)); |
455 | ntfy->handle, 0x003d, &args, | 478 | client->super = false; |
456 | sizeof(args), &object); | ||
457 | if (ret) | 479 | if (ret) |
458 | goto done; | 480 | goto done; |
459 | 481 | ||
@@ -469,28 +491,36 @@ int | |||
469 | nouveau_abi16_ioctl_gpuobj_free(ABI16_IOCTL_ARGS) | 491 | nouveau_abi16_ioctl_gpuobj_free(ABI16_IOCTL_ARGS) |
470 | { | 492 | { |
471 | struct drm_nouveau_gpuobj_free *fini = data; | 493 | struct drm_nouveau_gpuobj_free *fini = data; |
494 | struct { | ||
495 | struct nvif_ioctl_v0 ioctl; | ||
496 | struct nvif_ioctl_del del; | ||
497 | } args = { | ||
498 | .ioctl.owner = NVDRM_OBJECT_ABI16, | ||
499 | .ioctl.type = NVIF_IOCTL_V0_DEL, | ||
500 | .ioctl.path_nr = 4, | ||
501 | .ioctl.path[3] = NOUVEAU_ABI16_CLIENT, | ||
502 | .ioctl.path[2] = NOUVEAU_ABI16_DEVICE, | ||
503 | .ioctl.path[1] = NOUVEAU_ABI16_CHAN(fini->channel), | ||
504 | .ioctl.path[0] = fini->handle, | ||
505 | }; | ||
472 | struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv, dev); | 506 | struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv, dev); |
473 | struct nouveau_abi16_chan *chan = NULL, *temp; | 507 | struct nouveau_abi16_chan *chan; |
474 | struct nouveau_abi16_ntfy *ntfy; | 508 | struct nouveau_abi16_ntfy *ntfy; |
509 | struct nvif_client *client; | ||
475 | int ret; | 510 | int ret; |
476 | 511 | ||
477 | if (unlikely(!abi16)) | 512 | if (unlikely(!abi16)) |
478 | return -ENOMEM; | 513 | return -ENOMEM; |
479 | 514 | ||
480 | list_for_each_entry(temp, &abi16->channels, head) { | 515 | chan = nouveau_abi16_chan(abi16, fini->channel); |
481 | if (temp->chan->handle == (NVDRM_CHAN | fini->channel)) { | ||
482 | chan = temp; | ||
483 | break; | ||
484 | } | ||
485 | } | ||
486 | |||
487 | if (!chan) | 516 | if (!chan) |
488 | return nouveau_abi16_put(abi16, -ENOENT); | 517 | return nouveau_abi16_put(abi16, -ENOENT); |
518 | client = nvif_client(nvif_object(&abi16->device)); | ||
489 | 519 | ||
490 | /* synchronize with the user channel and destroy the gpu object */ | 520 | /* synchronize with the user channel and destroy the gpu object */ |
491 | nouveau_channel_idle(chan->chan); | 521 | nouveau_channel_idle(chan->chan); |
492 | 522 | ||
493 | ret = nouveau_object_del(abi16->client, chan->chan->handle, fini->handle); | 523 | ret = nvif_client_ioctl(client, &args, sizeof(args)); |
494 | if (ret) | 524 | if (ret) |
495 | return nouveau_abi16_put(abi16, ret); | 525 | return nouveau_abi16_put(abi16, ret); |
496 | 526 | ||
diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.h b/drivers/gpu/drm/nouveau/nouveau_abi16.h index 90004081a501..39844e6bfbff 100644 --- a/drivers/gpu/drm/nouveau/nouveau_abi16.h +++ b/drivers/gpu/drm/nouveau/nouveau_abi16.h | |||
@@ -28,8 +28,7 @@ struct nouveau_abi16_chan { | |||
28 | }; | 28 | }; |
29 | 29 | ||
30 | struct nouveau_abi16 { | 30 | struct nouveau_abi16 { |
31 | struct nouveau_object *client; | 31 | struct nvif_device device; |
32 | struct nouveau_object *device; | ||
33 | struct list_head channels; | 32 | struct list_head channels; |
34 | u64 handles; | 33 | u64 handles; |
35 | }; | 34 | }; |
diff --git a/drivers/gpu/drm/nouveau/nouveau_agp.c b/drivers/gpu/drm/nouveau/nouveau_agp.c index 51666daddb94..1f6f6ba6847a 100644 --- a/drivers/gpu/drm/nouveau/nouveau_agp.c +++ b/drivers/gpu/drm/nouveau/nouveau_agp.c | |||
@@ -1,7 +1,5 @@ | |||
1 | #include <linux/module.h> | 1 | #include <linux/module.h> |
2 | 2 | ||
3 | #include <core/device.h> | ||
4 | |||
5 | #include "nouveau_drm.h" | 3 | #include "nouveau_drm.h" |
6 | #include "nouveau_agp.h" | 4 | #include "nouveau_agp.h" |
7 | #include "nouveau_reg.h" | 5 | #include "nouveau_reg.h" |
@@ -29,7 +27,7 @@ static struct nouveau_agpmode_quirk nouveau_agpmode_quirk_list[] = { | |||
29 | static unsigned long | 27 | static unsigned long |
30 | get_agp_mode(struct nouveau_drm *drm, const struct drm_agp_info *info) | 28 | get_agp_mode(struct nouveau_drm *drm, const struct drm_agp_info *info) |
31 | { | 29 | { |
32 | struct nouveau_device *device = nv_device(drm->device); | 30 | struct nvif_device *device = &drm->device; |
33 | struct nouveau_agpmode_quirk *quirk = nouveau_agpmode_quirk_list; | 31 | struct nouveau_agpmode_quirk *quirk = nouveau_agpmode_quirk_list; |
34 | int agpmode = nouveau_agpmode; | 32 | int agpmode = nouveau_agpmode; |
35 | unsigned long mode = info->mode; | 33 | unsigned long mode = info->mode; |
@@ -38,7 +36,7 @@ get_agp_mode(struct nouveau_drm *drm, const struct drm_agp_info *info) | |||
38 | * FW seems to be broken on nv18, it makes the card lock up | 36 | * FW seems to be broken on nv18, it makes the card lock up |
39 | * randomly. | 37 | * randomly. |
40 | */ | 38 | */ |
41 | if (device->chipset == 0x18) | 39 | if (device->info.chipset == 0x18) |
42 | mode &= ~PCI_AGP_COMMAND_FW; | 40 | mode &= ~PCI_AGP_COMMAND_FW; |
43 | 41 | ||
44 | /* | 42 | /* |
@@ -47,10 +45,10 @@ get_agp_mode(struct nouveau_drm *drm, const struct drm_agp_info *info) | |||
47 | while (agpmode == -1 && quirk->hostbridge_vendor) { | 45 | while (agpmode == -1 && quirk->hostbridge_vendor) { |
48 | if (info->id_vendor == quirk->hostbridge_vendor && | 46 | if (info->id_vendor == quirk->hostbridge_vendor && |
49 | info->id_device == quirk->hostbridge_device && | 47 | info->id_device == quirk->hostbridge_device && |
50 | device->pdev->vendor == quirk->chip_vendor && | 48 | nvkm_device(device)->pdev->vendor == quirk->chip_vendor && |
51 | device->pdev->device == quirk->chip_device) { | 49 | nvkm_device(device)->pdev->device == quirk->chip_device) { |
52 | agpmode = quirk->mode; | 50 | agpmode = quirk->mode; |
53 | nv_info(device, "Forcing agp mode to %dX. Use agpmode to override.\n", | 51 | NV_INFO(drm, "Forcing agp mode to %dX. Use agpmode to override.\n", |
54 | agpmode); | 52 | agpmode); |
55 | break; | 53 | break; |
56 | } | 54 | } |
@@ -104,7 +102,7 @@ void | |||
104 | nouveau_agp_reset(struct nouveau_drm *drm) | 102 | nouveau_agp_reset(struct nouveau_drm *drm) |
105 | { | 103 | { |
106 | #if __OS_HAS_AGP | 104 | #if __OS_HAS_AGP |
107 | struct nouveau_device *device = nv_device(drm->device); | 105 | struct nvif_device *device = &drm->device; |
108 | struct drm_device *dev = drm->dev; | 106 | struct drm_device *dev = drm->dev; |
109 | u32 save[2]; | 107 | u32 save[2]; |
110 | int ret; | 108 | int ret; |
@@ -115,7 +113,7 @@ nouveau_agp_reset(struct nouveau_drm *drm) | |||
115 | /* First of all, disable fast writes, otherwise if it's | 113 | /* First of all, disable fast writes, otherwise if it's |
116 | * already enabled in the AGP bridge and we disable the card's | 114 | * already enabled in the AGP bridge and we disable the card's |
117 | * AGP controller we might be locking ourselves out of it. */ | 115 | * AGP controller we might be locking ourselves out of it. */ |
118 | if ((nv_rd32(device, NV04_PBUS_PCI_NV_19) | | 116 | if ((nvif_rd32(device, NV04_PBUS_PCI_NV_19) | |
119 | dev->agp->mode) & PCI_AGP_COMMAND_FW) { | 117 | dev->agp->mode) & PCI_AGP_COMMAND_FW) { |
120 | struct drm_agp_info info; | 118 | struct drm_agp_info info; |
121 | struct drm_agp_mode mode; | 119 | struct drm_agp_mode mode; |
@@ -134,15 +132,15 @@ nouveau_agp_reset(struct nouveau_drm *drm) | |||
134 | 132 | ||
135 | 133 | ||
136 | /* clear busmaster bit, and disable AGP */ | 134 | /* clear busmaster bit, and disable AGP */ |
137 | save[0] = nv_mask(device, NV04_PBUS_PCI_NV_1, 0x00000004, 0x00000000); | 135 | save[0] = nvif_mask(device, NV04_PBUS_PCI_NV_1, 0x00000004, 0x00000000); |
138 | nv_wr32(device, NV04_PBUS_PCI_NV_19, 0); | 136 | nvif_wr32(device, NV04_PBUS_PCI_NV_19, 0); |
139 | 137 | ||
140 | /* reset PGRAPH, PFIFO and PTIMER */ | 138 | /* reset PGRAPH, PFIFO and PTIMER */ |
141 | save[1] = nv_mask(device, 0x000200, 0x00011100, 0x00000000); | 139 | save[1] = nvif_mask(device, 0x000200, 0x00011100, 0x00000000); |
142 | nv_mask(device, 0x000200, 0x00011100, save[1]); | 140 | nvif_mask(device, 0x000200, 0x00011100, save[1]); |
143 | 141 | ||
144 | /* and restore bustmaster bit (gives effect of resetting AGP) */ | 142 | /* and restore bustmaster bit (gives effect of resetting AGP) */ |
145 | nv_wr32(device, NV04_PBUS_PCI_NV_1, save[0]); | 143 | nvif_wr32(device, NV04_PBUS_PCI_NV_1, save[0]); |
146 | #endif | 144 | #endif |
147 | } | 145 | } |
148 | 146 | ||
@@ -150,7 +148,6 @@ void | |||
150 | nouveau_agp_init(struct nouveau_drm *drm) | 148 | nouveau_agp_init(struct nouveau_drm *drm) |
151 | { | 149 | { |
152 | #if __OS_HAS_AGP | 150 | #if __OS_HAS_AGP |
153 | struct nouveau_device *device = nv_device(drm->device); | ||
154 | struct drm_device *dev = drm->dev; | 151 | struct drm_device *dev = drm->dev; |
155 | struct drm_agp_info info; | 152 | struct drm_agp_info info; |
156 | struct drm_agp_mode mode; | 153 | struct drm_agp_mode mode; |
@@ -162,13 +159,13 @@ nouveau_agp_init(struct nouveau_drm *drm) | |||
162 | 159 | ||
163 | ret = drm_agp_acquire(dev); | 160 | ret = drm_agp_acquire(dev); |
164 | if (ret) { | 161 | if (ret) { |
165 | nv_error(device, "unable to acquire AGP: %d\n", ret); | 162 | NV_ERROR(drm, "unable to acquire AGP: %d\n", ret); |
166 | return; | 163 | return; |
167 | } | 164 | } |
168 | 165 | ||
169 | ret = drm_agp_info(dev, &info); | 166 | ret = drm_agp_info(dev, &info); |
170 | if (ret) { | 167 | if (ret) { |
171 | nv_error(device, "unable to get AGP info: %d\n", ret); | 168 | NV_ERROR(drm, "unable to get AGP info: %d\n", ret); |
172 | return; | 169 | return; |
173 | } | 170 | } |
174 | 171 | ||
@@ -177,7 +174,7 @@ nouveau_agp_init(struct nouveau_drm *drm) | |||
177 | 174 | ||
178 | ret = drm_agp_enable(dev, mode); | 175 | ret = drm_agp_enable(dev, mode); |
179 | if (ret) { | 176 | if (ret) { |
180 | nv_error(device, "unable to enable AGP: %d\n", ret); | 177 | NV_ERROR(drm, "unable to enable AGP: %d\n", ret); |
181 | return; | 178 | return; |
182 | } | 179 | } |
183 | 180 | ||
diff --git a/drivers/gpu/drm/nouveau/nouveau_backlight.c b/drivers/gpu/drm/nouveau/nouveau_backlight.c index 2c1e4aad7da3..e566c5b53651 100644 --- a/drivers/gpu/drm/nouveau/nouveau_backlight.c +++ b/drivers/gpu/drm/nouveau/nouveau_backlight.c | |||
@@ -40,8 +40,8 @@ static int | |||
40 | nv40_get_intensity(struct backlight_device *bd) | 40 | nv40_get_intensity(struct backlight_device *bd) |
41 | { | 41 | { |
42 | struct nouveau_drm *drm = bl_get_data(bd); | 42 | struct nouveau_drm *drm = bl_get_data(bd); |
43 | struct nouveau_device *device = nv_device(drm->device); | 43 | struct nvif_device *device = &drm->device; |
44 | int val = (nv_rd32(device, NV40_PMC_BACKLIGHT) & | 44 | int val = (nvif_rd32(device, NV40_PMC_BACKLIGHT) & |
45 | NV40_PMC_BACKLIGHT_MASK) >> 16; | 45 | NV40_PMC_BACKLIGHT_MASK) >> 16; |
46 | 46 | ||
47 | return val; | 47 | return val; |
@@ -51,11 +51,11 @@ static int | |||
51 | nv40_set_intensity(struct backlight_device *bd) | 51 | nv40_set_intensity(struct backlight_device *bd) |
52 | { | 52 | { |
53 | struct nouveau_drm *drm = bl_get_data(bd); | 53 | struct nouveau_drm *drm = bl_get_data(bd); |
54 | struct nouveau_device *device = nv_device(drm->device); | 54 | struct nvif_device *device = &drm->device; |
55 | int val = bd->props.brightness; | 55 | int val = bd->props.brightness; |
56 | int reg = nv_rd32(device, NV40_PMC_BACKLIGHT); | 56 | int reg = nvif_rd32(device, NV40_PMC_BACKLIGHT); |
57 | 57 | ||
58 | nv_wr32(device, NV40_PMC_BACKLIGHT, | 58 | nvif_wr32(device, NV40_PMC_BACKLIGHT, |
59 | (val << 16) | (reg & ~NV40_PMC_BACKLIGHT_MASK)); | 59 | (val << 16) | (reg & ~NV40_PMC_BACKLIGHT_MASK)); |
60 | 60 | ||
61 | return 0; | 61 | return 0; |
@@ -71,11 +71,11 @@ static int | |||
71 | nv40_backlight_init(struct drm_connector *connector) | 71 | nv40_backlight_init(struct drm_connector *connector) |
72 | { | 72 | { |
73 | struct nouveau_drm *drm = nouveau_drm(connector->dev); | 73 | struct nouveau_drm *drm = nouveau_drm(connector->dev); |
74 | struct nouveau_device *device = nv_device(drm->device); | 74 | struct nvif_device *device = &drm->device; |
75 | struct backlight_properties props; | 75 | struct backlight_properties props; |
76 | struct backlight_device *bd; | 76 | struct backlight_device *bd; |
77 | 77 | ||
78 | if (!(nv_rd32(device, NV40_PMC_BACKLIGHT) & NV40_PMC_BACKLIGHT_MASK)) | 78 | if (!(nvif_rd32(device, NV40_PMC_BACKLIGHT) & NV40_PMC_BACKLIGHT_MASK)) |
79 | return 0; | 79 | return 0; |
80 | 80 | ||
81 | memset(&props, 0, sizeof(struct backlight_properties)); | 81 | memset(&props, 0, sizeof(struct backlight_properties)); |
@@ -97,12 +97,12 @@ nv50_get_intensity(struct backlight_device *bd) | |||
97 | { | 97 | { |
98 | struct nouveau_encoder *nv_encoder = bl_get_data(bd); | 98 | struct nouveau_encoder *nv_encoder = bl_get_data(bd); |
99 | struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev); | 99 | struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev); |
100 | struct nouveau_device *device = nv_device(drm->device); | 100 | struct nvif_device *device = &drm->device; |
101 | int or = nv_encoder->or; | 101 | int or = nv_encoder->or; |
102 | u32 div = 1025; | 102 | u32 div = 1025; |
103 | u32 val; | 103 | u32 val; |
104 | 104 | ||
105 | val = nv_rd32(device, NV50_PDISP_SOR_PWM_CTL(or)); | 105 | val = nvif_rd32(device, NV50_PDISP_SOR_PWM_CTL(or)); |
106 | val &= NV50_PDISP_SOR_PWM_CTL_VAL; | 106 | val &= NV50_PDISP_SOR_PWM_CTL_VAL; |
107 | return ((val * 100) + (div / 2)) / div; | 107 | return ((val * 100) + (div / 2)) / div; |
108 | } | 108 | } |
@@ -112,12 +112,12 @@ nv50_set_intensity(struct backlight_device *bd) | |||
112 | { | 112 | { |
113 | struct nouveau_encoder *nv_encoder = bl_get_data(bd); | 113 | struct nouveau_encoder *nv_encoder = bl_get_data(bd); |
114 | struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev); | 114 | struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev); |
115 | struct nouveau_device *device = nv_device(drm->device); | 115 | struct nvif_device *device = &drm->device; |
116 | int or = nv_encoder->or; | 116 | int or = nv_encoder->or; |
117 | u32 div = 1025; | 117 | u32 div = 1025; |
118 | u32 val = (bd->props.brightness * div) / 100; | 118 | u32 val = (bd->props.brightness * div) / 100; |
119 | 119 | ||
120 | nv_wr32(device, NV50_PDISP_SOR_PWM_CTL(or), | 120 | nvif_wr32(device, NV50_PDISP_SOR_PWM_CTL(or), |
121 | NV50_PDISP_SOR_PWM_CTL_NEW | val); | 121 | NV50_PDISP_SOR_PWM_CTL_NEW | val); |
122 | return 0; | 122 | return 0; |
123 | } | 123 | } |
@@ -133,12 +133,12 @@ nva3_get_intensity(struct backlight_device *bd) | |||
133 | { | 133 | { |
134 | struct nouveau_encoder *nv_encoder = bl_get_data(bd); | 134 | struct nouveau_encoder *nv_encoder = bl_get_data(bd); |
135 | struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev); | 135 | struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev); |
136 | struct nouveau_device *device = nv_device(drm->device); | 136 | struct nvif_device *device = &drm->device; |
137 | int or = nv_encoder->or; | 137 | int or = nv_encoder->or; |
138 | u32 div, val; | 138 | u32 div, val; |
139 | 139 | ||
140 | div = nv_rd32(device, NV50_PDISP_SOR_PWM_DIV(or)); | 140 | div = nvif_rd32(device, NV50_PDISP_SOR_PWM_DIV(or)); |
141 | val = nv_rd32(device, NV50_PDISP_SOR_PWM_CTL(or)); | 141 | val = nvif_rd32(device, NV50_PDISP_SOR_PWM_CTL(or)); |
142 | val &= NVA3_PDISP_SOR_PWM_CTL_VAL; | 142 | val &= NVA3_PDISP_SOR_PWM_CTL_VAL; |
143 | if (div && div >= val) | 143 | if (div && div >= val) |
144 | return ((val * 100) + (div / 2)) / div; | 144 | return ((val * 100) + (div / 2)) / div; |
@@ -151,14 +151,14 @@ nva3_set_intensity(struct backlight_device *bd) | |||
151 | { | 151 | { |
152 | struct nouveau_encoder *nv_encoder = bl_get_data(bd); | 152 | struct nouveau_encoder *nv_encoder = bl_get_data(bd); |
153 | struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev); | 153 | struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev); |
154 | struct nouveau_device *device = nv_device(drm->device); | 154 | struct nvif_device *device = &drm->device; |
155 | int or = nv_encoder->or; | 155 | int or = nv_encoder->or; |
156 | u32 div, val; | 156 | u32 div, val; |
157 | 157 | ||
158 | div = nv_rd32(device, NV50_PDISP_SOR_PWM_DIV(or)); | 158 | div = nvif_rd32(device, NV50_PDISP_SOR_PWM_DIV(or)); |
159 | val = (bd->props.brightness * div) / 100; | 159 | val = (bd->props.brightness * div) / 100; |
160 | if (div) { | 160 | if (div) { |
161 | nv_wr32(device, NV50_PDISP_SOR_PWM_CTL(or), val | | 161 | nvif_wr32(device, NV50_PDISP_SOR_PWM_CTL(or), val | |
162 | NV50_PDISP_SOR_PWM_CTL_NEW | | 162 | NV50_PDISP_SOR_PWM_CTL_NEW | |
163 | NVA3_PDISP_SOR_PWM_CTL_UNK); | 163 | NVA3_PDISP_SOR_PWM_CTL_UNK); |
164 | return 0; | 164 | return 0; |
@@ -177,7 +177,7 @@ static int | |||
177 | nv50_backlight_init(struct drm_connector *connector) | 177 | nv50_backlight_init(struct drm_connector *connector) |
178 | { | 178 | { |
179 | struct nouveau_drm *drm = nouveau_drm(connector->dev); | 179 | struct nouveau_drm *drm = nouveau_drm(connector->dev); |
180 | struct nouveau_device *device = nv_device(drm->device); | 180 | struct nvif_device *device = &drm->device; |
181 | struct nouveau_encoder *nv_encoder; | 181 | struct nouveau_encoder *nv_encoder; |
182 | struct backlight_properties props; | 182 | struct backlight_properties props; |
183 | struct backlight_device *bd; | 183 | struct backlight_device *bd; |
@@ -190,12 +190,12 @@ nv50_backlight_init(struct drm_connector *connector) | |||
190 | return -ENODEV; | 190 | return -ENODEV; |
191 | } | 191 | } |
192 | 192 | ||
193 | if (!nv_rd32(device, NV50_PDISP_SOR_PWM_CTL(nv_encoder->or))) | 193 | if (!nvif_rd32(device, NV50_PDISP_SOR_PWM_CTL(nv_encoder->or))) |
194 | return 0; | 194 | return 0; |
195 | 195 | ||
196 | if (device->chipset <= 0xa0 || | 196 | if (device->info.chipset <= 0xa0 || |
197 | device->chipset == 0xaa || | 197 | device->info.chipset == 0xaa || |
198 | device->chipset == 0xac) | 198 | device->info.chipset == 0xac) |
199 | ops = &nv50_bl_ops; | 199 | ops = &nv50_bl_ops; |
200 | else | 200 | else |
201 | ops = &nva3_bl_ops; | 201 | ops = &nva3_bl_ops; |
@@ -218,7 +218,7 @@ int | |||
218 | nouveau_backlight_init(struct drm_device *dev) | 218 | nouveau_backlight_init(struct drm_device *dev) |
219 | { | 219 | { |
220 | struct nouveau_drm *drm = nouveau_drm(dev); | 220 | struct nouveau_drm *drm = nouveau_drm(dev); |
221 | struct nouveau_device *device = nv_device(drm->device); | 221 | struct nvif_device *device = &drm->device; |
222 | struct drm_connector *connector; | 222 | struct drm_connector *connector; |
223 | 223 | ||
224 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { | 224 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { |
@@ -226,13 +226,12 @@ nouveau_backlight_init(struct drm_device *dev) | |||
226 | connector->connector_type != DRM_MODE_CONNECTOR_eDP) | 226 | connector->connector_type != DRM_MODE_CONNECTOR_eDP) |
227 | continue; | 227 | continue; |
228 | 228 | ||
229 | switch (device->card_type) { | 229 | switch (device->info.family) { |
230 | case NV_40: | 230 | case NV_DEVICE_INFO_V0_CURIE: |
231 | return nv40_backlight_init(connector); | 231 | return nv40_backlight_init(connector); |
232 | case NV_50: | 232 | case NV_DEVICE_INFO_V0_TESLA: |
233 | case NV_C0: | 233 | case NV_DEVICE_INFO_V0_FERMI: |
234 | case NV_D0: | 234 | case NV_DEVICE_INFO_V0_KEPLER: |
235 | case NV_E0: | ||
236 | return nv50_backlight_init(connector); | 235 | return nv50_backlight_init(connector); |
237 | default: | 236 | default: |
238 | break; | 237 | break; |
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c index 8268a4ccac15..dae2c96deef8 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bios.c +++ b/drivers/gpu/drm/nouveau/nouveau_bios.c | |||
@@ -22,8 +22,6 @@ | |||
22 | * SOFTWARE. | 22 | * SOFTWARE. |
23 | */ | 23 | */ |
24 | 24 | ||
25 | #include <subdev/bios.h> | ||
26 | |||
27 | #include <drm/drmP.h> | 25 | #include <drm/drmP.h> |
28 | 26 | ||
29 | #include "nouveau_drm.h" | 27 | #include "nouveau_drm.h" |
@@ -217,7 +215,7 @@ int call_lvds_script(struct drm_device *dev, struct dcb_output *dcbent, int head | |||
217 | */ | 215 | */ |
218 | 216 | ||
219 | struct nouveau_drm *drm = nouveau_drm(dev); | 217 | struct nouveau_drm *drm = nouveau_drm(dev); |
220 | struct nouveau_device *device = nv_device(drm->device); | 218 | struct nvif_device *device = &drm->device; |
221 | struct nvbios *bios = &drm->vbios; | 219 | struct nvbios *bios = &drm->vbios; |
222 | uint8_t lvds_ver = bios->data[bios->fp.lvdsmanufacturerpointer]; | 220 | uint8_t lvds_ver = bios->data[bios->fp.lvdsmanufacturerpointer]; |
223 | uint32_t sel_clk_binding, sel_clk; | 221 | uint32_t sel_clk_binding, sel_clk; |
@@ -240,7 +238,7 @@ int call_lvds_script(struct drm_device *dev, struct dcb_output *dcbent, int head | |||
240 | NV_INFO(drm, "Calling LVDS script %d:\n", script); | 238 | NV_INFO(drm, "Calling LVDS script %d:\n", script); |
241 | 239 | ||
242 | /* don't let script change pll->head binding */ | 240 | /* don't let script change pll->head binding */ |
243 | sel_clk_binding = nv_rd32(device, NV_PRAMDAC_SEL_CLK) & 0x50000; | 241 | sel_clk_binding = nvif_rd32(device, NV_PRAMDAC_SEL_CLK) & 0x50000; |
244 | 242 | ||
245 | if (lvds_ver < 0x30) | 243 | if (lvds_ver < 0x30) |
246 | ret = call_lvds_manufacturer_script(dev, dcbent, head, script); | 244 | ret = call_lvds_manufacturer_script(dev, dcbent, head, script); |
@@ -252,7 +250,7 @@ int call_lvds_script(struct drm_device *dev, struct dcb_output *dcbent, int head | |||
252 | sel_clk = NVReadRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK) & ~0x50000; | 250 | sel_clk = NVReadRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK) & ~0x50000; |
253 | NVWriteRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK, sel_clk | sel_clk_binding); | 251 | NVWriteRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK, sel_clk | sel_clk_binding); |
254 | /* some scripts set a value in NV_PBUS_POWERCTRL_2 and break video overlay */ | 252 | /* some scripts set a value in NV_PBUS_POWERCTRL_2 and break video overlay */ |
255 | nv_wr32(device, NV_PBUS_POWERCTRL_2, 0); | 253 | nvif_wr32(device, NV_PBUS_POWERCTRL_2, 0); |
256 | 254 | ||
257 | return ret; | 255 | return ret; |
258 | } | 256 | } |
@@ -320,7 +318,7 @@ static int parse_lvds_manufacturer_table_header(struct drm_device *dev, struct n | |||
320 | static int | 318 | static int |
321 | get_fp_strap(struct drm_device *dev, struct nvbios *bios) | 319 | get_fp_strap(struct drm_device *dev, struct nvbios *bios) |
322 | { | 320 | { |
323 | struct nouveau_device *device = nouveau_dev(dev); | 321 | struct nvif_device *device = &nouveau_drm(dev)->device; |
324 | 322 | ||
325 | /* | 323 | /* |
326 | * The fp strap is normally dictated by the "User Strap" in | 324 | * The fp strap is normally dictated by the "User Strap" in |
@@ -334,10 +332,10 @@ get_fp_strap(struct drm_device *dev, struct nvbios *bios) | |||
334 | if (bios->major_version < 5 && bios->data[0x48] & 0x4) | 332 | if (bios->major_version < 5 && bios->data[0x48] & 0x4) |
335 | return NVReadVgaCrtc5758(dev, 0, 0xf) & 0xf; | 333 | return NVReadVgaCrtc5758(dev, 0, 0xf) & 0xf; |
336 | 334 | ||
337 | if (device->card_type >= NV_50) | 335 | if (device->info.family >= NV_DEVICE_INFO_V0_TESLA) |
338 | return (nv_rd32(device, NV_PEXTDEV_BOOT_0) >> 24) & 0xf; | 336 | return (nvif_rd32(device, NV_PEXTDEV_BOOT_0) >> 24) & 0xf; |
339 | else | 337 | else |
340 | return (nv_rd32(device, NV_PEXTDEV_BOOT_0) >> 16) & 0xf; | 338 | return (nvif_rd32(device, NV_PEXTDEV_BOOT_0) >> 16) & 0xf; |
341 | } | 339 | } |
342 | 340 | ||
343 | static int parse_fp_mode_table(struct drm_device *dev, struct nvbios *bios) | 341 | static int parse_fp_mode_table(struct drm_device *dev, struct nvbios *bios) |
@@ -636,7 +634,7 @@ int run_tmds_table(struct drm_device *dev, struct dcb_output *dcbent, int head, | |||
636 | */ | 634 | */ |
637 | 635 | ||
638 | struct nouveau_drm *drm = nouveau_drm(dev); | 636 | struct nouveau_drm *drm = nouveau_drm(dev); |
639 | struct nouveau_device *device = nv_device(drm->device); | 637 | struct nvif_device *device = &drm->device; |
640 | struct nvbios *bios = &drm->vbios; | 638 | struct nvbios *bios = &drm->vbios; |
641 | int cv = bios->chip_version; | 639 | int cv = bios->chip_version; |
642 | uint16_t clktable = 0, scriptptr; | 640 | uint16_t clktable = 0, scriptptr; |
@@ -670,7 +668,7 @@ int run_tmds_table(struct drm_device *dev, struct dcb_output *dcbent, int head, | |||
670 | } | 668 | } |
671 | 669 | ||
672 | /* don't let script change pll->head binding */ | 670 | /* don't let script change pll->head binding */ |
673 | sel_clk_binding = nv_rd32(device, NV_PRAMDAC_SEL_CLK) & 0x50000; | 671 | sel_clk_binding = nvif_rd32(device, NV_PRAMDAC_SEL_CLK) & 0x50000; |
674 | run_digital_op_script(dev, scriptptr, dcbent, head, pxclk >= 165000); | 672 | run_digital_op_script(dev, scriptptr, dcbent, head, pxclk >= 165000); |
675 | sel_clk = NVReadRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK) & ~0x50000; | 673 | sel_clk = NVReadRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK) & ~0x50000; |
676 | NVWriteRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK, sel_clk | sel_clk_binding); | 674 | NVWriteRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK, sel_clk | sel_clk_binding); |
@@ -1253,7 +1251,7 @@ olddcb_table(struct drm_device *dev) | |||
1253 | struct nouveau_drm *drm = nouveau_drm(dev); | 1251 | struct nouveau_drm *drm = nouveau_drm(dev); |
1254 | u8 *dcb = NULL; | 1252 | u8 *dcb = NULL; |
1255 | 1253 | ||
1256 | if (nv_device(drm->device)->card_type > NV_04) | 1254 | if (drm->device.info.family > NV_DEVICE_INFO_V0_TNT) |
1257 | dcb = ROMPTR(dev, drm->vbios.data[0x36]); | 1255 | dcb = ROMPTR(dev, drm->vbios.data[0x36]); |
1258 | if (!dcb) { | 1256 | if (!dcb) { |
1259 | NV_WARN(drm, "No DCB data found in VBIOS\n"); | 1257 | NV_WARN(drm, "No DCB data found in VBIOS\n"); |
@@ -1399,6 +1397,7 @@ parse_dcb20_entry(struct drm_device *dev, struct dcb_table *dcb, | |||
1399 | uint32_t conn, uint32_t conf, struct dcb_output *entry) | 1397 | uint32_t conn, uint32_t conf, struct dcb_output *entry) |
1400 | { | 1398 | { |
1401 | struct nouveau_drm *drm = nouveau_drm(dev); | 1399 | struct nouveau_drm *drm = nouveau_drm(dev); |
1400 | int link = 0; | ||
1402 | 1401 | ||
1403 | entry->type = conn & 0xf; | 1402 | entry->type = conn & 0xf; |
1404 | entry->i2c_index = (conn >> 4) & 0xf; | 1403 | entry->i2c_index = (conn >> 4) & 0xf; |
@@ -1444,6 +1443,7 @@ parse_dcb20_entry(struct drm_device *dev, struct dcb_table *dcb, | |||
1444 | if (conf & 0x4) | 1443 | if (conf & 0x4) |
1445 | entry->lvdsconf.use_power_scripts = true; | 1444 | entry->lvdsconf.use_power_scripts = true; |
1446 | entry->lvdsconf.sor.link = (conf & 0x00000030) >> 4; | 1445 | entry->lvdsconf.sor.link = (conf & 0x00000030) >> 4; |
1446 | link = entry->lvdsconf.sor.link; | ||
1447 | } | 1447 | } |
1448 | if (conf & mask) { | 1448 | if (conf & mask) { |
1449 | /* | 1449 | /* |
@@ -1492,17 +1492,18 @@ parse_dcb20_entry(struct drm_device *dev, struct dcb_table *dcb, | |||
1492 | entry->dpconf.link_nr = 1; | 1492 | entry->dpconf.link_nr = 1; |
1493 | break; | 1493 | break; |
1494 | } | 1494 | } |
1495 | link = entry->dpconf.sor.link; | ||
1495 | break; | 1496 | break; |
1496 | case DCB_OUTPUT_TMDS: | 1497 | case DCB_OUTPUT_TMDS: |
1497 | if (dcb->version >= 0x40) { | 1498 | if (dcb->version >= 0x40) { |
1498 | entry->tmdsconf.sor.link = (conf & 0x00000030) >> 4; | 1499 | entry->tmdsconf.sor.link = (conf & 0x00000030) >> 4; |
1499 | entry->extdev = (conf & 0x0000ff00) >> 8; | 1500 | entry->extdev = (conf & 0x0000ff00) >> 8; |
1501 | link = entry->tmdsconf.sor.link; | ||
1500 | } | 1502 | } |
1501 | else if (dcb->version >= 0x30) | 1503 | else if (dcb->version >= 0x30) |
1502 | entry->tmdsconf.slave_addr = (conf & 0x00000700) >> 8; | 1504 | entry->tmdsconf.slave_addr = (conf & 0x00000700) >> 8; |
1503 | else if (dcb->version >= 0x22) | 1505 | else if (dcb->version >= 0x22) |
1504 | entry->tmdsconf.slave_addr = (conf & 0x00000070) >> 4; | 1506 | entry->tmdsconf.slave_addr = (conf & 0x00000070) >> 4; |
1505 | |||
1506 | break; | 1507 | break; |
1507 | case DCB_OUTPUT_EOL: | 1508 | case DCB_OUTPUT_EOL: |
1508 | /* weird g80 mobile type that "nv" treats as a terminator */ | 1509 | /* weird g80 mobile type that "nv" treats as a terminator */ |
@@ -1526,6 +1527,8 @@ parse_dcb20_entry(struct drm_device *dev, struct dcb_table *dcb, | |||
1526 | if (conf & 0x100000) | 1527 | if (conf & 0x100000) |
1527 | entry->i2c_upper_default = true; | 1528 | entry->i2c_upper_default = true; |
1528 | 1529 | ||
1530 | entry->hasht = (entry->location << 4) | entry->type; | ||
1531 | entry->hashm = (entry->heads << 8) | (link << 6) | entry->or; | ||
1529 | return true; | 1532 | return true; |
1530 | } | 1533 | } |
1531 | 1534 | ||
@@ -1908,7 +1911,7 @@ static int load_nv17_hwsq_ucode_entry(struct drm_device *dev, struct nvbios *bio | |||
1908 | */ | 1911 | */ |
1909 | 1912 | ||
1910 | struct nouveau_drm *drm = nouveau_drm(dev); | 1913 | struct nouveau_drm *drm = nouveau_drm(dev); |
1911 | struct nouveau_device *device = nv_device(drm->device); | 1914 | struct nvif_device *device = &drm->device; |
1912 | uint8_t bytes_to_write; | 1915 | uint8_t bytes_to_write; |
1913 | uint16_t hwsq_entry_offset; | 1916 | uint16_t hwsq_entry_offset; |
1914 | int i; | 1917 | int i; |
@@ -1931,15 +1934,15 @@ static int load_nv17_hwsq_ucode_entry(struct drm_device *dev, struct nvbios *bio | |||
1931 | hwsq_entry_offset = hwsq_offset + 2 + entry * bytes_to_write; | 1934 | hwsq_entry_offset = hwsq_offset + 2 + entry * bytes_to_write; |
1932 | 1935 | ||
1933 | /* set sequencer control */ | 1936 | /* set sequencer control */ |
1934 | nv_wr32(device, 0x00001304, ROM32(bios->data[hwsq_entry_offset])); | 1937 | nvif_wr32(device, 0x00001304, ROM32(bios->data[hwsq_entry_offset])); |
1935 | bytes_to_write -= 4; | 1938 | bytes_to_write -= 4; |
1936 | 1939 | ||
1937 | /* write ucode */ | 1940 | /* write ucode */ |
1938 | for (i = 0; i < bytes_to_write; i += 4) | 1941 | for (i = 0; i < bytes_to_write; i += 4) |
1939 | nv_wr32(device, 0x00001400 + i, ROM32(bios->data[hwsq_entry_offset + i + 4])); | 1942 | nvif_wr32(device, 0x00001400 + i, ROM32(bios->data[hwsq_entry_offset + i + 4])); |
1940 | 1943 | ||
1941 | /* twiddle NV_PBUS_DEBUG_4 */ | 1944 | /* twiddle NV_PBUS_DEBUG_4 */ |
1942 | nv_wr32(device, NV_PBUS_DEBUG_4, nv_rd32(device, NV_PBUS_DEBUG_4) | 0x18); | 1945 | nvif_wr32(device, NV_PBUS_DEBUG_4, nvif_rd32(device, NV_PBUS_DEBUG_4) | 0x18); |
1943 | 1946 | ||
1944 | return 0; | 1947 | return 0; |
1945 | } | 1948 | } |
@@ -2002,7 +2005,7 @@ uint8_t *nouveau_bios_embedded_edid(struct drm_device *dev) | |||
2002 | static bool NVInitVBIOS(struct drm_device *dev) | 2005 | static bool NVInitVBIOS(struct drm_device *dev) |
2003 | { | 2006 | { |
2004 | struct nouveau_drm *drm = nouveau_drm(dev); | 2007 | struct nouveau_drm *drm = nouveau_drm(dev); |
2005 | struct nouveau_bios *bios = nouveau_bios(drm->device); | 2008 | struct nouveau_bios *bios = nvkm_bios(&drm->device); |
2006 | struct nvbios *legacy = &drm->vbios; | 2009 | struct nvbios *legacy = &drm->vbios; |
2007 | 2010 | ||
2008 | memset(legacy, 0, sizeof(struct nvbios)); | 2011 | memset(legacy, 0, sizeof(struct nvbios)); |
@@ -2054,7 +2057,7 @@ nouveau_bios_posted(struct drm_device *dev) | |||
2054 | struct nouveau_drm *drm = nouveau_drm(dev); | 2057 | struct nouveau_drm *drm = nouveau_drm(dev); |
2055 | unsigned htotal; | 2058 | unsigned htotal; |
2056 | 2059 | ||
2057 | if (nv_device(drm->device)->card_type >= NV_50) | 2060 | if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA) |
2058 | return true; | 2061 | return true; |
2059 | 2062 | ||
2060 | htotal = NVReadVgaCrtc(dev, 0, 0x06); | 2063 | htotal = NVReadVgaCrtc(dev, 0, 0x06); |
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index ba29a701ca1d..da5d631aa5b9 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c | |||
@@ -27,13 +27,9 @@ | |||
27 | * Jeremy Kolb <jkolb@brandeis.edu> | 27 | * Jeremy Kolb <jkolb@brandeis.edu> |
28 | */ | 28 | */ |
29 | 29 | ||
30 | #include <core/engine.h> | 30 | #include <linux/dma-mapping.h> |
31 | #include <linux/swiotlb.h> | 31 | #include <linux/swiotlb.h> |
32 | 32 | ||
33 | #include <subdev/fb.h> | ||
34 | #include <subdev/vm.h> | ||
35 | #include <subdev/bar.h> | ||
36 | |||
37 | #include "nouveau_drm.h" | 33 | #include "nouveau_drm.h" |
38 | #include "nouveau_dma.h" | 34 | #include "nouveau_dma.h" |
39 | #include "nouveau_fence.h" | 35 | #include "nouveau_fence.h" |
@@ -52,7 +48,7 @@ nv10_bo_update_tile_region(struct drm_device *dev, struct nouveau_drm_tile *reg, | |||
52 | { | 48 | { |
53 | struct nouveau_drm *drm = nouveau_drm(dev); | 49 | struct nouveau_drm *drm = nouveau_drm(dev); |
54 | int i = reg - drm->tile.reg; | 50 | int i = reg - drm->tile.reg; |
55 | struct nouveau_fb *pfb = nouveau_fb(drm->device); | 51 | struct nouveau_fb *pfb = nvkm_fb(&drm->device); |
56 | struct nouveau_fb_tile *tile = &pfb->tile.region[i]; | 52 | struct nouveau_fb_tile *tile = &pfb->tile.region[i]; |
57 | struct nouveau_engine *engine; | 53 | struct nouveau_engine *engine; |
58 | 54 | ||
@@ -109,7 +105,7 @@ nv10_bo_set_tiling(struct drm_device *dev, u32 addr, | |||
109 | u32 size, u32 pitch, u32 flags) | 105 | u32 size, u32 pitch, u32 flags) |
110 | { | 106 | { |
111 | struct nouveau_drm *drm = nouveau_drm(dev); | 107 | struct nouveau_drm *drm = nouveau_drm(dev); |
112 | struct nouveau_fb *pfb = nouveau_fb(drm->device); | 108 | struct nouveau_fb *pfb = nvkm_fb(&drm->device); |
113 | struct nouveau_drm_tile *tile, *found = NULL; | 109 | struct nouveau_drm_tile *tile, *found = NULL; |
114 | int i; | 110 | int i; |
115 | 111 | ||
@@ -153,23 +149,23 @@ nouveau_bo_fixup_align(struct nouveau_bo *nvbo, u32 flags, | |||
153 | int *align, int *size) | 149 | int *align, int *size) |
154 | { | 150 | { |
155 | struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); | 151 | struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); |
156 | struct nouveau_device *device = nv_device(drm->device); | 152 | struct nvif_device *device = &drm->device; |
157 | 153 | ||
158 | if (device->card_type < NV_50) { | 154 | if (device->info.family < NV_DEVICE_INFO_V0_TESLA) { |
159 | if (nvbo->tile_mode) { | 155 | if (nvbo->tile_mode) { |
160 | if (device->chipset >= 0x40) { | 156 | if (device->info.chipset >= 0x40) { |
161 | *align = 65536; | 157 | *align = 65536; |
162 | *size = roundup(*size, 64 * nvbo->tile_mode); | 158 | *size = roundup(*size, 64 * nvbo->tile_mode); |
163 | 159 | ||
164 | } else if (device->chipset >= 0x30) { | 160 | } else if (device->info.chipset >= 0x30) { |
165 | *align = 32768; | 161 | *align = 32768; |
166 | *size = roundup(*size, 64 * nvbo->tile_mode); | 162 | *size = roundup(*size, 64 * nvbo->tile_mode); |
167 | 163 | ||
168 | } else if (device->chipset >= 0x20) { | 164 | } else if (device->info.chipset >= 0x20) { |
169 | *align = 16384; | 165 | *align = 16384; |
170 | *size = roundup(*size, 64 * nvbo->tile_mode); | 166 | *size = roundup(*size, 64 * nvbo->tile_mode); |
171 | 167 | ||
172 | } else if (device->chipset >= 0x10) { | 168 | } else if (device->info.chipset >= 0x10) { |
173 | *align = 16384; | 169 | *align = 16384; |
174 | *size = roundup(*size, 32 * nvbo->tile_mode); | 170 | *size = roundup(*size, 32 * nvbo->tile_mode); |
175 | } | 171 | } |
@@ -196,12 +192,12 @@ nouveau_bo_new(struct drm_device *dev, int size, int align, | |||
196 | int lpg_shift = 12; | 192 | int lpg_shift = 12; |
197 | int max_size; | 193 | int max_size; |
198 | 194 | ||
199 | if (drm->client.base.vm) | 195 | if (drm->client.vm) |
200 | lpg_shift = drm->client.base.vm->vmm->lpg_shift; | 196 | lpg_shift = drm->client.vm->vmm->lpg_shift; |
201 | max_size = INT_MAX & ~((1 << lpg_shift) - 1); | 197 | max_size = INT_MAX & ~((1 << lpg_shift) - 1); |
202 | 198 | ||
203 | if (size <= 0 || size > max_size) { | 199 | if (size <= 0 || size > max_size) { |
204 | nv_warn(drm, "skipped size %x\n", (u32)size); | 200 | NV_WARN(drm, "skipped size %x\n", (u32)size); |
205 | return -EINVAL; | 201 | return -EINVAL; |
206 | } | 202 | } |
207 | 203 | ||
@@ -219,9 +215,9 @@ nouveau_bo_new(struct drm_device *dev, int size, int align, | |||
219 | nvbo->bo.bdev = &drm->ttm.bdev; | 215 | nvbo->bo.bdev = &drm->ttm.bdev; |
220 | 216 | ||
221 | nvbo->page_shift = 12; | 217 | nvbo->page_shift = 12; |
222 | if (drm->client.base.vm) { | 218 | if (drm->client.vm) { |
223 | if (!(flags & TTM_PL_FLAG_TT) && size > 256 * 1024) | 219 | if (!(flags & TTM_PL_FLAG_TT) && size > 256 * 1024) |
224 | nvbo->page_shift = drm->client.base.vm->vmm->lpg_shift; | 220 | nvbo->page_shift = drm->client.vm->vmm->lpg_shift; |
225 | } | 221 | } |
226 | 222 | ||
227 | nouveau_bo_fixup_align(nvbo, flags, &align, &size); | 223 | nouveau_bo_fixup_align(nvbo, flags, &align, &size); |
@@ -261,11 +257,9 @@ static void | |||
261 | set_placement_range(struct nouveau_bo *nvbo, uint32_t type) | 257 | set_placement_range(struct nouveau_bo *nvbo, uint32_t type) |
262 | { | 258 | { |
263 | struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); | 259 | struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); |
264 | struct nouveau_fb *pfb = nouveau_fb(drm->device); | 260 | u32 vram_pages = drm->device.info.ram_size >> PAGE_SHIFT; |
265 | u32 vram_pages = pfb->ram->size >> PAGE_SHIFT; | ||
266 | 261 | ||
267 | if ((nv_device(drm->device)->card_type == NV_10 || | 262 | if (drm->device.info.family == NV_DEVICE_INFO_V0_CELSIUS && |
268 | nv_device(drm->device)->card_type == NV_11) && | ||
269 | nvbo->tile_mode && (type & TTM_PL_FLAG_VRAM) && | 263 | nvbo->tile_mode && (type & TTM_PL_FLAG_VRAM) && |
270 | nvbo->bo.mem.num_pages < vram_pages / 4) { | 264 | nvbo->bo.mem.num_pages < vram_pages / 4) { |
271 | /* | 265 | /* |
@@ -500,21 +494,28 @@ nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, | |||
500 | man->default_caching = TTM_PL_FLAG_CACHED; | 494 | man->default_caching = TTM_PL_FLAG_CACHED; |
501 | break; | 495 | break; |
502 | case TTM_PL_VRAM: | 496 | case TTM_PL_VRAM: |
503 | if (nv_device(drm->device)->card_type >= NV_50) { | 497 | man->flags = TTM_MEMTYPE_FLAG_FIXED | |
498 | TTM_MEMTYPE_FLAG_MAPPABLE; | ||
499 | man->available_caching = TTM_PL_FLAG_UNCACHED | | ||
500 | TTM_PL_FLAG_WC; | ||
501 | man->default_caching = TTM_PL_FLAG_WC; | ||
502 | |||
503 | if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA) { | ||
504 | /* Some BARs do not support being ioremapped WC */ | ||
505 | if (nvkm_bar(&drm->device)->iomap_uncached) { | ||
506 | man->available_caching = TTM_PL_FLAG_UNCACHED; | ||
507 | man->default_caching = TTM_PL_FLAG_UNCACHED; | ||
508 | } | ||
509 | |||
504 | man->func = &nouveau_vram_manager; | 510 | man->func = &nouveau_vram_manager; |
505 | man->io_reserve_fastpath = false; | 511 | man->io_reserve_fastpath = false; |
506 | man->use_io_reserve_lru = true; | 512 | man->use_io_reserve_lru = true; |
507 | } else { | 513 | } else { |
508 | man->func = &ttm_bo_manager_func; | 514 | man->func = &ttm_bo_manager_func; |
509 | } | 515 | } |
510 | man->flags = TTM_MEMTYPE_FLAG_FIXED | | ||
511 | TTM_MEMTYPE_FLAG_MAPPABLE; | ||
512 | man->available_caching = TTM_PL_FLAG_UNCACHED | | ||
513 | TTM_PL_FLAG_WC; | ||
514 | man->default_caching = TTM_PL_FLAG_WC; | ||
515 | break; | 516 | break; |
516 | case TTM_PL_TT: | 517 | case TTM_PL_TT: |
517 | if (nv_device(drm->device)->card_type >= NV_50) | 518 | if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA) |
518 | man->func = &nouveau_gart_manager; | 519 | man->func = &nouveau_gart_manager; |
519 | else | 520 | else |
520 | if (drm->agp.stat != ENABLED) | 521 | if (drm->agp.stat != ENABLED) |
@@ -763,9 +764,9 @@ nv50_bo_move_init(struct nouveau_channel *chan, u32 handle) | |||
763 | BEGIN_NV04(chan, NvSubCopy, 0x0000, 1); | 764 | BEGIN_NV04(chan, NvSubCopy, 0x0000, 1); |
764 | OUT_RING (chan, handle); | 765 | OUT_RING (chan, handle); |
765 | BEGIN_NV04(chan, NvSubCopy, 0x0180, 3); | 766 | BEGIN_NV04(chan, NvSubCopy, 0x0180, 3); |
766 | OUT_RING (chan, NvNotify0); | 767 | OUT_RING (chan, chan->drm->ntfy.handle); |
767 | OUT_RING (chan, NvDmaFB); | 768 | OUT_RING (chan, chan->vram.handle); |
768 | OUT_RING (chan, NvDmaFB); | 769 | OUT_RING (chan, chan->vram.handle); |
769 | } | 770 | } |
770 | 771 | ||
771 | return ret; | 772 | return ret; |
@@ -852,7 +853,7 @@ nv04_bo_move_init(struct nouveau_channel *chan, u32 handle) | |||
852 | BEGIN_NV04(chan, NvSubCopy, 0x0000, 1); | 853 | BEGIN_NV04(chan, NvSubCopy, 0x0000, 1); |
853 | OUT_RING (chan, handle); | 854 | OUT_RING (chan, handle); |
854 | BEGIN_NV04(chan, NvSubCopy, 0x0180, 1); | 855 | BEGIN_NV04(chan, NvSubCopy, 0x0180, 1); |
855 | OUT_RING (chan, NvNotify0); | 856 | OUT_RING (chan, chan->drm->ntfy.handle); |
856 | } | 857 | } |
857 | 858 | ||
858 | return ret; | 859 | return ret; |
@@ -864,7 +865,7 @@ nouveau_bo_mem_ctxdma(struct ttm_buffer_object *bo, | |||
864 | { | 865 | { |
865 | if (mem->mem_type == TTM_PL_TT) | 866 | if (mem->mem_type == TTM_PL_TT) |
866 | return NvDmaTT; | 867 | return NvDmaTT; |
867 | return NvDmaFB; | 868 | return chan->vram.handle; |
868 | } | 869 | } |
869 | 870 | ||
870 | static int | 871 | static int |
@@ -922,12 +923,12 @@ nouveau_bo_move_prep(struct nouveau_drm *drm, struct ttm_buffer_object *bo, | |||
922 | u64 size = (u64)mem->num_pages << PAGE_SHIFT; | 923 | u64 size = (u64)mem->num_pages << PAGE_SHIFT; |
923 | int ret; | 924 | int ret; |
924 | 925 | ||
925 | ret = nouveau_vm_get(nv_client(drm)->vm, size, old_node->page_shift, | 926 | ret = nouveau_vm_get(drm->client.vm, size, old_node->page_shift, |
926 | NV_MEM_ACCESS_RW, &old_node->vma[0]); | 927 | NV_MEM_ACCESS_RW, &old_node->vma[0]); |
927 | if (ret) | 928 | if (ret) |
928 | return ret; | 929 | return ret; |
929 | 930 | ||
930 | ret = nouveau_vm_get(nv_client(drm)->vm, size, new_node->page_shift, | 931 | ret = nouveau_vm_get(drm->client.vm, size, new_node->page_shift, |
931 | NV_MEM_ACCESS_RW, &old_node->vma[1]); | 932 | NV_MEM_ACCESS_RW, &old_node->vma[1]); |
932 | if (ret) { | 933 | if (ret) { |
933 | nouveau_vm_put(&old_node->vma[0]); | 934 | nouveau_vm_put(&old_node->vma[0]); |
@@ -945,6 +946,7 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr, | |||
945 | { | 946 | { |
946 | struct nouveau_drm *drm = nouveau_bdev(bo->bdev); | 947 | struct nouveau_drm *drm = nouveau_bdev(bo->bdev); |
947 | struct nouveau_channel *chan = drm->ttm.chan; | 948 | struct nouveau_channel *chan = drm->ttm.chan; |
949 | struct nouveau_cli *cli = (void *)nvif_client(&chan->device->base); | ||
948 | struct nouveau_fence *fence; | 950 | struct nouveau_fence *fence; |
949 | int ret; | 951 | int ret; |
950 | 952 | ||
@@ -952,13 +954,13 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr, | |||
952 | * old nouveau_mem node, these will get cleaned up after ttm has | 954 | * old nouveau_mem node, these will get cleaned up after ttm has |
953 | * destroyed the ttm_mem_reg | 955 | * destroyed the ttm_mem_reg |
954 | */ | 956 | */ |
955 | if (nv_device(drm->device)->card_type >= NV_50) { | 957 | if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA) { |
956 | ret = nouveau_bo_move_prep(drm, bo, new_mem); | 958 | ret = nouveau_bo_move_prep(drm, bo, new_mem); |
957 | if (ret) | 959 | if (ret) |
958 | return ret; | 960 | return ret; |
959 | } | 961 | } |
960 | 962 | ||
961 | mutex_lock_nested(&chan->cli->mutex, SINGLE_DEPTH_NESTING); | 963 | mutex_lock_nested(&cli->mutex, SINGLE_DEPTH_NESTING); |
962 | ret = nouveau_fence_sync(bo->sync_obj, chan); | 964 | ret = nouveau_fence_sync(bo->sync_obj, chan); |
963 | if (ret == 0) { | 965 | if (ret == 0) { |
964 | ret = drm->ttm.move(chan, bo, &bo->mem, new_mem); | 966 | ret = drm->ttm.move(chan, bo, &bo->mem, new_mem); |
@@ -973,7 +975,7 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr, | |||
973 | } | 975 | } |
974 | } | 976 | } |
975 | } | 977 | } |
976 | mutex_unlock(&chan->cli->mutex); | 978 | mutex_unlock(&cli->mutex); |
977 | return ret; | 979 | return ret; |
978 | } | 980 | } |
979 | 981 | ||
@@ -1005,9 +1007,7 @@ nouveau_bo_move_init(struct nouveau_drm *drm) | |||
1005 | int ret; | 1007 | int ret; |
1006 | 1008 | ||
1007 | do { | 1009 | do { |
1008 | struct nouveau_object *object; | ||
1009 | struct nouveau_channel *chan; | 1010 | struct nouveau_channel *chan; |
1010 | u32 handle = (mthd->engine << 16) | mthd->oclass; | ||
1011 | 1011 | ||
1012 | if (mthd->engine) | 1012 | if (mthd->engine) |
1013 | chan = drm->cechan; | 1013 | chan = drm->cechan; |
@@ -1016,13 +1016,14 @@ nouveau_bo_move_init(struct nouveau_drm *drm) | |||
1016 | if (chan == NULL) | 1016 | if (chan == NULL) |
1017 | continue; | 1017 | continue; |
1018 | 1018 | ||
1019 | ret = nouveau_object_new(nv_object(drm), chan->handle, handle, | 1019 | ret = nvif_object_init(chan->object, NULL, |
1020 | mthd->oclass, NULL, 0, &object); | 1020 | mthd->oclass | (mthd->engine << 16), |
1021 | mthd->oclass, NULL, 0, | ||
1022 | &drm->ttm.copy); | ||
1021 | if (ret == 0) { | 1023 | if (ret == 0) { |
1022 | ret = mthd->init(chan, handle); | 1024 | ret = mthd->init(chan, drm->ttm.copy.handle); |
1023 | if (ret) { | 1025 | if (ret) { |
1024 | nouveau_object_del(nv_object(drm), | 1026 | nvif_object_fini(&drm->ttm.copy); |
1025 | chan->handle, handle); | ||
1026 | continue; | 1027 | continue; |
1027 | } | 1028 | } |
1028 | 1029 | ||
@@ -1135,7 +1136,7 @@ nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem, | |||
1135 | if (new_mem->mem_type != TTM_PL_VRAM) | 1136 | if (new_mem->mem_type != TTM_PL_VRAM) |
1136 | return 0; | 1137 | return 0; |
1137 | 1138 | ||
1138 | if (nv_device(drm->device)->card_type >= NV_10) { | 1139 | if (drm->device.info.family >= NV_DEVICE_INFO_V0_CELSIUS) { |
1139 | *new_tile = nv10_bo_set_tiling(dev, offset, new_mem->size, | 1140 | *new_tile = nv10_bo_set_tiling(dev, offset, new_mem->size, |
1140 | nvbo->tile_mode, | 1141 | nvbo->tile_mode, |
1141 | nvbo->tile_flags); | 1142 | nvbo->tile_flags); |
@@ -1166,7 +1167,7 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr, | |||
1166 | struct nouveau_drm_tile *new_tile = NULL; | 1167 | struct nouveau_drm_tile *new_tile = NULL; |
1167 | int ret = 0; | 1168 | int ret = 0; |
1168 | 1169 | ||
1169 | if (nv_device(drm->device)->card_type < NV_50) { | 1170 | if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA) { |
1170 | ret = nouveau_bo_vm_bind(bo, new_mem, &new_tile); | 1171 | ret = nouveau_bo_vm_bind(bo, new_mem, &new_tile); |
1171 | if (ret) | 1172 | if (ret) |
1172 | return ret; | 1173 | return ret; |
@@ -1203,7 +1204,7 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr, | |||
1203 | ret = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem); | 1204 | ret = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem); |
1204 | 1205 | ||
1205 | out: | 1206 | out: |
1206 | if (nv_device(drm->device)->card_type < NV_50) { | 1207 | if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA) { |
1207 | if (ret) | 1208 | if (ret) |
1208 | nouveau_bo_vm_cleanup(bo, NULL, &new_tile); | 1209 | nouveau_bo_vm_cleanup(bo, NULL, &new_tile); |
1209 | else | 1210 | else |
@@ -1249,16 +1250,16 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) | |||
1249 | mem->bus.is_iomem = !dev->agp->cant_use_aperture; | 1250 | mem->bus.is_iomem = !dev->agp->cant_use_aperture; |
1250 | } | 1251 | } |
1251 | #endif | 1252 | #endif |
1252 | if (nv_device(drm->device)->card_type < NV_50 || !node->memtype) | 1253 | if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA || !node->memtype) |
1253 | /* untiled */ | 1254 | /* untiled */ |
1254 | break; | 1255 | break; |
1255 | /* fallthrough, tiled memory */ | 1256 | /* fallthrough, tiled memory */ |
1256 | case TTM_PL_VRAM: | 1257 | case TTM_PL_VRAM: |
1257 | mem->bus.offset = mem->start << PAGE_SHIFT; | 1258 | mem->bus.offset = mem->start << PAGE_SHIFT; |
1258 | mem->bus.base = nv_device_resource_start(nouveau_dev(dev), 1); | 1259 | mem->bus.base = nv_device_resource_start(nvkm_device(&drm->device), 1); |
1259 | mem->bus.is_iomem = true; | 1260 | mem->bus.is_iomem = true; |
1260 | if (nv_device(drm->device)->card_type >= NV_50) { | 1261 | if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA) { |
1261 | struct nouveau_bar *bar = nouveau_bar(drm->device); | 1262 | struct nouveau_bar *bar = nvkm_bar(&drm->device); |
1262 | 1263 | ||
1263 | ret = bar->umap(bar, node, NV_MEM_ACCESS_RW, | 1264 | ret = bar->umap(bar, node, NV_MEM_ACCESS_RW, |
1264 | &node->bar_vma); | 1265 | &node->bar_vma); |
@@ -1278,7 +1279,7 @@ static void | |||
1278 | nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) | 1279 | nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) |
1279 | { | 1280 | { |
1280 | struct nouveau_drm *drm = nouveau_bdev(bdev); | 1281 | struct nouveau_drm *drm = nouveau_bdev(bdev); |
1281 | struct nouveau_bar *bar = nouveau_bar(drm->device); | 1282 | struct nouveau_bar *bar = nvkm_bar(&drm->device); |
1282 | struct nouveau_mem *node = mem->mm_node; | 1283 | struct nouveau_mem *node = mem->mm_node; |
1283 | 1284 | ||
1284 | if (!node->bar_vma.node) | 1285 | if (!node->bar_vma.node) |
@@ -1292,15 +1293,15 @@ nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo) | |||
1292 | { | 1293 | { |
1293 | struct nouveau_drm *drm = nouveau_bdev(bo->bdev); | 1294 | struct nouveau_drm *drm = nouveau_bdev(bo->bdev); |
1294 | struct nouveau_bo *nvbo = nouveau_bo(bo); | 1295 | struct nouveau_bo *nvbo = nouveau_bo(bo); |
1295 | struct nouveau_device *device = nv_device(drm->device); | 1296 | struct nvif_device *device = &drm->device; |
1296 | u32 mappable = nv_device_resource_len(device, 1) >> PAGE_SHIFT; | 1297 | u32 mappable = nv_device_resource_len(nvkm_device(device), 1) >> PAGE_SHIFT; |
1297 | int ret; | 1298 | int ret; |
1298 | 1299 | ||
1299 | /* as long as the bo isn't in vram, and isn't tiled, we've got | 1300 | /* as long as the bo isn't in vram, and isn't tiled, we've got |
1300 | * nothing to do here. | 1301 | * nothing to do here. |
1301 | */ | 1302 | */ |
1302 | if (bo->mem.mem_type != TTM_PL_VRAM) { | 1303 | if (bo->mem.mem_type != TTM_PL_VRAM) { |
1303 | if (nv_device(drm->device)->card_type < NV_50 || | 1304 | if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA || |
1304 | !nouveau_bo_tile_layout(nvbo)) | 1305 | !nouveau_bo_tile_layout(nvbo)) |
1305 | return 0; | 1306 | return 0; |
1306 | 1307 | ||
@@ -1315,7 +1316,7 @@ nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo) | |||
1315 | } | 1316 | } |
1316 | 1317 | ||
1317 | /* make sure bo is in mappable vram */ | 1318 | /* make sure bo is in mappable vram */ |
1318 | if (nv_device(drm->device)->card_type >= NV_50 || | 1319 | if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA || |
1319 | bo->mem.start + bo->mem.num_pages < mappable) | 1320 | bo->mem.start + bo->mem.num_pages < mappable) |
1320 | return 0; | 1321 | return 0; |
1321 | 1322 | ||
@@ -1333,6 +1334,7 @@ nouveau_ttm_tt_populate(struct ttm_tt *ttm) | |||
1333 | struct nouveau_drm *drm; | 1334 | struct nouveau_drm *drm; |
1334 | struct nouveau_device *device; | 1335 | struct nouveau_device *device; |
1335 | struct drm_device *dev; | 1336 | struct drm_device *dev; |
1337 | struct device *pdev; | ||
1336 | unsigned i; | 1338 | unsigned i; |
1337 | int r; | 1339 | int r; |
1338 | bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG); | 1340 | bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG); |
@@ -1349,8 +1351,9 @@ nouveau_ttm_tt_populate(struct ttm_tt *ttm) | |||
1349 | } | 1351 | } |
1350 | 1352 | ||
1351 | drm = nouveau_bdev(ttm->bdev); | 1353 | drm = nouveau_bdev(ttm->bdev); |
1352 | device = nv_device(drm->device); | 1354 | device = nvkm_device(&drm->device); |
1353 | dev = drm->dev; | 1355 | dev = drm->dev; |
1356 | pdev = nv_device_base(device); | ||
1354 | 1357 | ||
1355 | #if __OS_HAS_AGP | 1358 | #if __OS_HAS_AGP |
1356 | if (drm->agp.stat == ENABLED) { | 1359 | if (drm->agp.stat == ENABLED) { |
@@ -1370,17 +1373,22 @@ nouveau_ttm_tt_populate(struct ttm_tt *ttm) | |||
1370 | } | 1373 | } |
1371 | 1374 | ||
1372 | for (i = 0; i < ttm->num_pages; i++) { | 1375 | for (i = 0; i < ttm->num_pages; i++) { |
1373 | ttm_dma->dma_address[i] = nv_device_map_page(device, | 1376 | dma_addr_t addr; |
1374 | ttm->pages[i]); | 1377 | |
1375 | if (!ttm_dma->dma_address[i]) { | 1378 | addr = dma_map_page(pdev, ttm->pages[i], 0, PAGE_SIZE, |
1379 | DMA_BIDIRECTIONAL); | ||
1380 | |||
1381 | if (dma_mapping_error(pdev, addr)) { | ||
1376 | while (--i) { | 1382 | while (--i) { |
1377 | nv_device_unmap_page(device, | 1383 | dma_unmap_page(pdev, ttm_dma->dma_address[i], |
1378 | ttm_dma->dma_address[i]); | 1384 | PAGE_SIZE, DMA_BIDIRECTIONAL); |
1379 | ttm_dma->dma_address[i] = 0; | 1385 | ttm_dma->dma_address[i] = 0; |
1380 | } | 1386 | } |
1381 | ttm_pool_unpopulate(ttm); | 1387 | ttm_pool_unpopulate(ttm); |
1382 | return -EFAULT; | 1388 | return -EFAULT; |
1383 | } | 1389 | } |
1390 | |||
1391 | ttm_dma->dma_address[i] = addr; | ||
1384 | } | 1392 | } |
1385 | return 0; | 1393 | return 0; |
1386 | } | 1394 | } |
@@ -1392,6 +1400,7 @@ nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm) | |||
1392 | struct nouveau_drm *drm; | 1400 | struct nouveau_drm *drm; |
1393 | struct nouveau_device *device; | 1401 | struct nouveau_device *device; |
1394 | struct drm_device *dev; | 1402 | struct drm_device *dev; |
1403 | struct device *pdev; | ||
1395 | unsigned i; | 1404 | unsigned i; |
1396 | bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG); | 1405 | bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG); |
1397 | 1406 | ||
@@ -1399,8 +1408,9 @@ nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm) | |||
1399 | return; | 1408 | return; |
1400 | 1409 | ||
1401 | drm = nouveau_bdev(ttm->bdev); | 1410 | drm = nouveau_bdev(ttm->bdev); |
1402 | device = nv_device(drm->device); | 1411 | device = nvkm_device(&drm->device); |
1403 | dev = drm->dev; | 1412 | dev = drm->dev; |
1413 | pdev = nv_device_base(device); | ||
1404 | 1414 | ||
1405 | #if __OS_HAS_AGP | 1415 | #if __OS_HAS_AGP |
1406 | if (drm->agp.stat == ENABLED) { | 1416 | if (drm->agp.stat == ENABLED) { |
@@ -1418,7 +1428,8 @@ nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm) | |||
1418 | 1428 | ||
1419 | for (i = 0; i < ttm->num_pages; i++) { | 1429 | for (i = 0; i < ttm->num_pages; i++) { |
1420 | if (ttm_dma->dma_address[i]) { | 1430 | if (ttm_dma->dma_address[i]) { |
1421 | nv_device_unmap_page(device, ttm_dma->dma_address[i]); | 1431 | dma_unmap_page(pdev, ttm_dma->dma_address[i], PAGE_SIZE, |
1432 | DMA_BIDIRECTIONAL); | ||
1422 | } | 1433 | } |
1423 | } | 1434 | } |
1424 | 1435 | ||
diff --git a/drivers/gpu/drm/nouveau/nouveau_chan.c b/drivers/gpu/drm/nouveau/nouveau_chan.c index ccb6b452d6d0..99cd9e4a2aa6 100644 --- a/drivers/gpu/drm/nouveau/nouveau_chan.c +++ b/drivers/gpu/drm/nouveau/nouveau_chan.c | |||
@@ -22,16 +22,11 @@ | |||
22 | * Authors: Ben Skeggs | 22 | * Authors: Ben Skeggs |
23 | */ | 23 | */ |
24 | 24 | ||
25 | #include <core/object.h> | 25 | #include <nvif/os.h> |
26 | #include <core/client.h> | 26 | #include <nvif/class.h> |
27 | #include <core/device.h> | ||
28 | #include <core/class.h> | ||
29 | |||
30 | #include <subdev/fb.h> | ||
31 | #include <subdev/vm.h> | ||
32 | #include <subdev/instmem.h> | ||
33 | 27 | ||
34 | #include <engine/software.h> | 28 | /*XXX*/ |
29 | #include <core/client.h> | ||
35 | 30 | ||
36 | #include "nouveau_drm.h" | 31 | #include "nouveau_drm.h" |
37 | #include "nouveau_dma.h" | 32 | #include "nouveau_dma.h" |
@@ -47,7 +42,7 @@ module_param_named(vram_pushbuf, nouveau_vram_pushbuf, int, 0400); | |||
47 | int | 42 | int |
48 | nouveau_channel_idle(struct nouveau_channel *chan) | 43 | nouveau_channel_idle(struct nouveau_channel *chan) |
49 | { | 44 | { |
50 | struct nouveau_cli *cli = chan->cli; | 45 | struct nouveau_cli *cli = (void *)nvif_client(chan->object); |
51 | struct nouveau_fence *fence = NULL; | 46 | struct nouveau_fence *fence = NULL; |
52 | int ret; | 47 | int ret; |
53 | 48 | ||
@@ -58,8 +53,8 @@ nouveau_channel_idle(struct nouveau_channel *chan) | |||
58 | } | 53 | } |
59 | 54 | ||
60 | if (ret) | 55 | if (ret) |
61 | NV_ERROR(cli, "failed to idle channel 0x%08x [%s]\n", | 56 | NV_PRINTK(error, cli, "failed to idle channel 0x%08x [%s]\n", |
62 | chan->handle, cli->base.name); | 57 | chan->object->handle, nvkm_client(&cli->base)->name); |
63 | return ret; | 58 | return ret; |
64 | } | 59 | } |
65 | 60 | ||
@@ -68,36 +63,34 @@ nouveau_channel_del(struct nouveau_channel **pchan) | |||
68 | { | 63 | { |
69 | struct nouveau_channel *chan = *pchan; | 64 | struct nouveau_channel *chan = *pchan; |
70 | if (chan) { | 65 | if (chan) { |
71 | struct nouveau_object *client = nv_object(chan->cli); | ||
72 | if (chan->fence) { | 66 | if (chan->fence) { |
73 | nouveau_channel_idle(chan); | 67 | nouveau_channel_idle(chan); |
74 | nouveau_fence(chan->drm)->context_del(chan); | 68 | nouveau_fence(chan->drm)->context_del(chan); |
75 | } | 69 | } |
76 | nouveau_object_del(client, NVDRM_DEVICE, chan->handle); | 70 | nvif_object_fini(&chan->nvsw); |
77 | nouveau_object_del(client, NVDRM_DEVICE, chan->push.handle); | 71 | nvif_object_fini(&chan->gart); |
72 | nvif_object_fini(&chan->vram); | ||
73 | nvif_object_ref(NULL, &chan->object); | ||
74 | nvif_object_fini(&chan->push.ctxdma); | ||
78 | nouveau_bo_vma_del(chan->push.buffer, &chan->push.vma); | 75 | nouveau_bo_vma_del(chan->push.buffer, &chan->push.vma); |
79 | nouveau_bo_unmap(chan->push.buffer); | 76 | nouveau_bo_unmap(chan->push.buffer); |
80 | if (chan->push.buffer && chan->push.buffer->pin_refcnt) | 77 | if (chan->push.buffer && chan->push.buffer->pin_refcnt) |
81 | nouveau_bo_unpin(chan->push.buffer); | 78 | nouveau_bo_unpin(chan->push.buffer); |
82 | nouveau_bo_ref(NULL, &chan->push.buffer); | 79 | nouveau_bo_ref(NULL, &chan->push.buffer); |
80 | nvif_device_ref(NULL, &chan->device); | ||
83 | kfree(chan); | 81 | kfree(chan); |
84 | } | 82 | } |
85 | *pchan = NULL; | 83 | *pchan = NULL; |
86 | } | 84 | } |
87 | 85 | ||
88 | static int | 86 | static int |
89 | nouveau_channel_prep(struct nouveau_drm *drm, struct nouveau_cli *cli, | 87 | nouveau_channel_prep(struct nouveau_drm *drm, struct nvif_device *device, |
90 | u32 parent, u32 handle, u32 size, | 88 | u32 handle, u32 size, struct nouveau_channel **pchan) |
91 | struct nouveau_channel **pchan) | ||
92 | { | 89 | { |
93 | struct nouveau_device *device = nv_device(drm->device); | 90 | struct nouveau_cli *cli = (void *)nvif_client(&device->base); |
94 | struct nouveau_instmem *imem = nouveau_instmem(device); | 91 | struct nouveau_vmmgr *vmm = nvkm_vmmgr(device); |
95 | struct nouveau_vmmgr *vmm = nouveau_vmmgr(device); | 92 | struct nv_dma_v0 args = {}; |
96 | struct nouveau_fb *pfb = nouveau_fb(device); | ||
97 | struct nouveau_client *client = &cli->base; | ||
98 | struct nv_dma_class args = {}; | ||
99 | struct nouveau_channel *chan; | 93 | struct nouveau_channel *chan; |
100 | struct nouveau_object *push; | ||
101 | u32 target; | 94 | u32 target; |
102 | int ret; | 95 | int ret; |
103 | 96 | ||
@@ -105,9 +98,8 @@ nouveau_channel_prep(struct nouveau_drm *drm, struct nouveau_cli *cli, | |||
105 | if (!chan) | 98 | if (!chan) |
106 | return -ENOMEM; | 99 | return -ENOMEM; |
107 | 100 | ||
108 | chan->cli = cli; | 101 | nvif_device_ref(device, &chan->device); |
109 | chan->drm = drm; | 102 | chan->drm = drm; |
110 | chan->handle = handle; | ||
111 | 103 | ||
112 | /* allocate memory for dma push buffer */ | 104 | /* allocate memory for dma push buffer */ |
113 | target = TTM_PL_FLAG_TT; | 105 | target = TTM_PL_FLAG_TT; |
@@ -132,51 +124,54 @@ nouveau_channel_prep(struct nouveau_drm *drm, struct nouveau_cli *cli, | |||
132 | * we be able to call out to other (indirect) push buffers | 124 | * we be able to call out to other (indirect) push buffers |
133 | */ | 125 | */ |
134 | chan->push.vma.offset = chan->push.buffer->bo.offset; | 126 | chan->push.vma.offset = chan->push.buffer->bo.offset; |
135 | chan->push.handle = NVDRM_PUSH | (handle & 0xffff); | ||
136 | 127 | ||
137 | if (device->card_type >= NV_50) { | 128 | if (device->info.family >= NV_DEVICE_INFO_V0_TESLA) { |
138 | ret = nouveau_bo_vma_add(chan->push.buffer, client->vm, | 129 | ret = nouveau_bo_vma_add(chan->push.buffer, cli->vm, |
139 | &chan->push.vma); | 130 | &chan->push.vma); |
140 | if (ret) { | 131 | if (ret) { |
141 | nouveau_channel_del(pchan); | 132 | nouveau_channel_del(pchan); |
142 | return ret; | 133 | return ret; |
143 | } | 134 | } |
144 | 135 | ||
145 | args.flags = NV_DMA_TARGET_VM | NV_DMA_ACCESS_VM; | 136 | args.target = NV_DMA_V0_TARGET_VM; |
137 | args.access = NV_DMA_V0_ACCESS_VM; | ||
146 | args.start = 0; | 138 | args.start = 0; |
147 | args.limit = client->vm->vmm->limit - 1; | 139 | args.limit = cli->vm->vmm->limit - 1; |
148 | } else | 140 | } else |
149 | if (chan->push.buffer->bo.mem.mem_type == TTM_PL_VRAM) { | 141 | if (chan->push.buffer->bo.mem.mem_type == TTM_PL_VRAM) { |
150 | u64 limit = pfb->ram->size - imem->reserved - 1; | 142 | if (device->info.family == NV_DEVICE_INFO_V0_TNT) { |
151 | if (device->card_type == NV_04) { | ||
152 | /* nv04 vram pushbuf hack, retarget to its location in | 143 | /* nv04 vram pushbuf hack, retarget to its location in |
153 | * the framebuffer bar rather than direct vram access.. | 144 | * the framebuffer bar rather than direct vram access.. |
154 | * nfi why this exists, it came from the -nv ddx. | 145 | * nfi why this exists, it came from the -nv ddx. |
155 | */ | 146 | */ |
156 | args.flags = NV_DMA_TARGET_PCI | NV_DMA_ACCESS_RDWR; | 147 | args.target = NV_DMA_V0_TARGET_PCI; |
157 | args.start = nv_device_resource_start(device, 1); | 148 | args.access = NV_DMA_V0_ACCESS_RDWR; |
158 | args.limit = args.start + limit; | 149 | args.start = nv_device_resource_start(nvkm_device(device), 1); |
150 | args.limit = args.start + device->info.ram_user - 1; | ||
159 | } else { | 151 | } else { |
160 | args.flags = NV_DMA_TARGET_VRAM | NV_DMA_ACCESS_RDWR; | 152 | args.target = NV_DMA_V0_TARGET_VRAM; |
153 | args.access = NV_DMA_V0_ACCESS_RDWR; | ||
161 | args.start = 0; | 154 | args.start = 0; |
162 | args.limit = limit; | 155 | args.limit = device->info.ram_user - 1; |
163 | } | 156 | } |
164 | } else { | 157 | } else { |
165 | if (chan->drm->agp.stat == ENABLED) { | 158 | if (chan->drm->agp.stat == ENABLED) { |
166 | args.flags = NV_DMA_TARGET_AGP | NV_DMA_ACCESS_RDWR; | 159 | args.target = NV_DMA_V0_TARGET_AGP; |
160 | args.access = NV_DMA_V0_ACCESS_RDWR; | ||
167 | args.start = chan->drm->agp.base; | 161 | args.start = chan->drm->agp.base; |
168 | args.limit = chan->drm->agp.base + | 162 | args.limit = chan->drm->agp.base + |
169 | chan->drm->agp.size - 1; | 163 | chan->drm->agp.size - 1; |
170 | } else { | 164 | } else { |
171 | args.flags = NV_DMA_TARGET_VM | NV_DMA_ACCESS_RDWR; | 165 | args.target = NV_DMA_V0_TARGET_VM; |
166 | args.access = NV_DMA_V0_ACCESS_RDWR; | ||
172 | args.start = 0; | 167 | args.start = 0; |
173 | args.limit = vmm->limit - 1; | 168 | args.limit = vmm->limit - 1; |
174 | } | 169 | } |
175 | } | 170 | } |
176 | 171 | ||
177 | ret = nouveau_object_new(nv_object(chan->cli), parent, | 172 | ret = nvif_object_init(nvif_object(device), NULL, NVDRM_PUSH | |
178 | chan->push.handle, 0x0002, | 173 | (handle & 0xffff), NV_DMA_FROM_MEMORY, |
179 | &args, sizeof(args), &push); | 174 | &args, sizeof(args), &chan->push.ctxdma); |
180 | if (ret) { | 175 | if (ret) { |
181 | nouveau_channel_del(pchan); | 176 | nouveau_channel_del(pchan); |
182 | return ret; | 177 | return ret; |
@@ -186,38 +181,56 @@ nouveau_channel_prep(struct nouveau_drm *drm, struct nouveau_cli *cli, | |||
186 | } | 181 | } |
187 | 182 | ||
188 | static int | 183 | static int |
189 | nouveau_channel_ind(struct nouveau_drm *drm, struct nouveau_cli *cli, | 184 | nouveau_channel_ind(struct nouveau_drm *drm, struct nvif_device *device, |
190 | u32 parent, u32 handle, u32 engine, | 185 | u32 handle, u32 engine, struct nouveau_channel **pchan) |
191 | struct nouveau_channel **pchan) | ||
192 | { | 186 | { |
193 | static const u16 oclasses[] = { NVE0_CHANNEL_IND_CLASS, | 187 | static const u16 oclasses[] = { KEPLER_CHANNEL_GPFIFO_A, |
194 | NVC0_CHANNEL_IND_CLASS, | 188 | FERMI_CHANNEL_GPFIFO, |
195 | NV84_CHANNEL_IND_CLASS, | 189 | G82_CHANNEL_GPFIFO, |
196 | NV50_CHANNEL_IND_CLASS, | 190 | NV50_CHANNEL_GPFIFO, |
197 | 0 }; | 191 | 0 }; |
198 | const u16 *oclass = oclasses; | 192 | const u16 *oclass = oclasses; |
199 | struct nve0_channel_ind_class args; | 193 | union { |
194 | struct nv50_channel_gpfifo_v0 nv50; | ||
195 | struct kepler_channel_gpfifo_a_v0 kepler; | ||
196 | } args, *retn; | ||
200 | struct nouveau_channel *chan; | 197 | struct nouveau_channel *chan; |
198 | u32 size; | ||
201 | int ret; | 199 | int ret; |
202 | 200 | ||
203 | /* allocate dma push buffer */ | 201 | /* allocate dma push buffer */ |
204 | ret = nouveau_channel_prep(drm, cli, parent, handle, 0x12000, &chan); | 202 | ret = nouveau_channel_prep(drm, device, handle, 0x12000, &chan); |
205 | *pchan = chan; | 203 | *pchan = chan; |
206 | if (ret) | 204 | if (ret) |
207 | return ret; | 205 | return ret; |
208 | 206 | ||
209 | /* create channel object */ | 207 | /* create channel object */ |
210 | args.pushbuf = chan->push.handle; | ||
211 | args.ioffset = 0x10000 + chan->push.vma.offset; | ||
212 | args.ilength = 0x02000; | ||
213 | args.engine = engine; | ||
214 | |||
215 | do { | 208 | do { |
216 | ret = nouveau_object_new(nv_object(cli), parent, handle, | 209 | if (oclass[0] >= KEPLER_CHANNEL_GPFIFO_A) { |
217 | *oclass++, &args, sizeof(args), | 210 | args.kepler.version = 0; |
218 | &chan->object); | 211 | args.kepler.engine = engine; |
219 | if (ret == 0) | 212 | args.kepler.pushbuf = chan->push.ctxdma.handle; |
213 | args.kepler.ilength = 0x02000; | ||
214 | args.kepler.ioffset = 0x10000 + chan->push.vma.offset; | ||
215 | size = sizeof(args.kepler); | ||
216 | } else { | ||
217 | args.nv50.version = 0; | ||
218 | args.nv50.pushbuf = chan->push.ctxdma.handle; | ||
219 | args.nv50.ilength = 0x02000; | ||
220 | args.nv50.ioffset = 0x10000 + chan->push.vma.offset; | ||
221 | size = sizeof(args.nv50); | ||
222 | } | ||
223 | |||
224 | ret = nvif_object_new(nvif_object(device), handle, *oclass++, | ||
225 | &args, size, &chan->object); | ||
226 | if (ret == 0) { | ||
227 | retn = chan->object->data; | ||
228 | if (chan->object->oclass >= KEPLER_CHANNEL_GPFIFO_A) | ||
229 | chan->chid = retn->kepler.chid; | ||
230 | else | ||
231 | chan->chid = retn->nv50.chid; | ||
220 | return ret; | 232 | return ret; |
233 | } | ||
221 | } while (*oclass); | 234 | } while (*oclass); |
222 | 235 | ||
223 | nouveau_channel_del(pchan); | 236 | nouveau_channel_del(pchan); |
@@ -225,35 +238,38 @@ nouveau_channel_ind(struct nouveau_drm *drm, struct nouveau_cli *cli, | |||
225 | } | 238 | } |
226 | 239 | ||
227 | static int | 240 | static int |
228 | nouveau_channel_dma(struct nouveau_drm *drm, struct nouveau_cli *cli, | 241 | nouveau_channel_dma(struct nouveau_drm *drm, struct nvif_device *device, |
229 | u32 parent, u32 handle, struct nouveau_channel **pchan) | 242 | u32 handle, struct nouveau_channel **pchan) |
230 | { | 243 | { |
231 | static const u16 oclasses[] = { NV40_CHANNEL_DMA_CLASS, | 244 | static const u16 oclasses[] = { NV40_CHANNEL_DMA, |
232 | NV17_CHANNEL_DMA_CLASS, | 245 | NV17_CHANNEL_DMA, |
233 | NV10_CHANNEL_DMA_CLASS, | 246 | NV10_CHANNEL_DMA, |
234 | NV03_CHANNEL_DMA_CLASS, | 247 | NV03_CHANNEL_DMA, |
235 | 0 }; | 248 | 0 }; |
236 | const u16 *oclass = oclasses; | 249 | const u16 *oclass = oclasses; |
237 | struct nv03_channel_dma_class args; | 250 | struct nv03_channel_dma_v0 args, *retn; |
238 | struct nouveau_channel *chan; | 251 | struct nouveau_channel *chan; |
239 | int ret; | 252 | int ret; |
240 | 253 | ||
241 | /* allocate dma push buffer */ | 254 | /* allocate dma push buffer */ |
242 | ret = nouveau_channel_prep(drm, cli, parent, handle, 0x10000, &chan); | 255 | ret = nouveau_channel_prep(drm, device, handle, 0x10000, &chan); |
243 | *pchan = chan; | 256 | *pchan = chan; |
244 | if (ret) | 257 | if (ret) |
245 | return ret; | 258 | return ret; |
246 | 259 | ||
247 | /* create channel object */ | 260 | /* create channel object */ |
248 | args.pushbuf = chan->push.handle; | 261 | args.version = 0; |
262 | args.pushbuf = chan->push.ctxdma.handle; | ||
249 | args.offset = chan->push.vma.offset; | 263 | args.offset = chan->push.vma.offset; |
250 | 264 | ||
251 | do { | 265 | do { |
252 | ret = nouveau_object_new(nv_object(cli), parent, handle, | 266 | ret = nvif_object_new(nvif_object(device), handle, *oclass++, |
253 | *oclass++, &args, sizeof(args), | 267 | &args, sizeof(args), &chan->object); |
254 | &chan->object); | 268 | if (ret == 0) { |
255 | if (ret == 0) | 269 | retn = chan->object->data; |
270 | chan->chid = retn->chid; | ||
256 | return ret; | 271 | return ret; |
272 | } | ||
257 | } while (ret && *oclass); | 273 | } while (ret && *oclass); |
258 | 274 | ||
259 | nouveau_channel_del(pchan); | 275 | nouveau_channel_del(pchan); |
@@ -263,60 +279,63 @@ nouveau_channel_dma(struct nouveau_drm *drm, struct nouveau_cli *cli, | |||
263 | static int | 279 | static int |
264 | nouveau_channel_init(struct nouveau_channel *chan, u32 vram, u32 gart) | 280 | nouveau_channel_init(struct nouveau_channel *chan, u32 vram, u32 gart) |
265 | { | 281 | { |
266 | struct nouveau_client *client = nv_client(chan->cli); | 282 | struct nvif_device *device = chan->device; |
267 | struct nouveau_device *device = nv_device(chan->drm->device); | 283 | struct nouveau_cli *cli = (void *)nvif_client(&device->base); |
268 | struct nouveau_instmem *imem = nouveau_instmem(device); | 284 | struct nouveau_vmmgr *vmm = nvkm_vmmgr(device); |
269 | struct nouveau_vmmgr *vmm = nouveau_vmmgr(device); | ||
270 | struct nouveau_fb *pfb = nouveau_fb(device); | ||
271 | struct nouveau_software_chan *swch; | 285 | struct nouveau_software_chan *swch; |
272 | struct nouveau_object *object; | 286 | struct nv_dma_v0 args = {}; |
273 | struct nv_dma_class args = {}; | ||
274 | int ret, i; | 287 | int ret, i; |
275 | 288 | ||
289 | nvif_object_map(chan->object); | ||
290 | |||
276 | /* allocate dma objects to cover all allowed vram, and gart */ | 291 | /* allocate dma objects to cover all allowed vram, and gart */ |
277 | if (device->card_type < NV_C0) { | 292 | if (device->info.family < NV_DEVICE_INFO_V0_FERMI) { |
278 | if (device->card_type >= NV_50) { | 293 | if (device->info.family >= NV_DEVICE_INFO_V0_TESLA) { |
279 | args.flags = NV_DMA_TARGET_VM | NV_DMA_ACCESS_VM; | 294 | args.target = NV_DMA_V0_TARGET_VM; |
295 | args.access = NV_DMA_V0_ACCESS_VM; | ||
280 | args.start = 0; | 296 | args.start = 0; |
281 | args.limit = client->vm->vmm->limit - 1; | 297 | args.limit = cli->vm->vmm->limit - 1; |
282 | } else { | 298 | } else { |
283 | args.flags = NV_DMA_TARGET_VRAM | NV_DMA_ACCESS_RDWR; | 299 | args.target = NV_DMA_V0_TARGET_VRAM; |
300 | args.access = NV_DMA_V0_ACCESS_RDWR; | ||
284 | args.start = 0; | 301 | args.start = 0; |
285 | args.limit = pfb->ram->size - imem->reserved - 1; | 302 | args.limit = device->info.ram_user - 1; |
286 | } | 303 | } |
287 | 304 | ||
288 | ret = nouveau_object_new(nv_object(client), chan->handle, vram, | 305 | ret = nvif_object_init(chan->object, NULL, vram, |
289 | 0x003d, &args, sizeof(args), &object); | 306 | NV_DMA_IN_MEMORY, &args, |
307 | sizeof(args), &chan->vram); | ||
290 | if (ret) | 308 | if (ret) |
291 | return ret; | 309 | return ret; |
292 | 310 | ||
293 | if (device->card_type >= NV_50) { | 311 | if (device->info.family >= NV_DEVICE_INFO_V0_TESLA) { |
294 | args.flags = NV_DMA_TARGET_VM | NV_DMA_ACCESS_VM; | 312 | args.target = NV_DMA_V0_TARGET_VM; |
313 | args.access = NV_DMA_V0_ACCESS_VM; | ||
295 | args.start = 0; | 314 | args.start = 0; |
296 | args.limit = client->vm->vmm->limit - 1; | 315 | args.limit = cli->vm->vmm->limit - 1; |
297 | } else | 316 | } else |
298 | if (chan->drm->agp.stat == ENABLED) { | 317 | if (chan->drm->agp.stat == ENABLED) { |
299 | args.flags = NV_DMA_TARGET_AGP | NV_DMA_ACCESS_RDWR; | 318 | args.target = NV_DMA_V0_TARGET_AGP; |
319 | args.access = NV_DMA_V0_ACCESS_RDWR; | ||
300 | args.start = chan->drm->agp.base; | 320 | args.start = chan->drm->agp.base; |
301 | args.limit = chan->drm->agp.base + | 321 | args.limit = chan->drm->agp.base + |
302 | chan->drm->agp.size - 1; | 322 | chan->drm->agp.size - 1; |
303 | } else { | 323 | } else { |
304 | args.flags = NV_DMA_TARGET_VM | NV_DMA_ACCESS_RDWR; | 324 | args.target = NV_DMA_V0_TARGET_VM; |
325 | args.access = NV_DMA_V0_ACCESS_RDWR; | ||
305 | args.start = 0; | 326 | args.start = 0; |
306 | args.limit = vmm->limit - 1; | 327 | args.limit = vmm->limit - 1; |
307 | } | 328 | } |
308 | 329 | ||
309 | ret = nouveau_object_new(nv_object(client), chan->handle, gart, | 330 | ret = nvif_object_init(chan->object, NULL, gart, |
310 | 0x003d, &args, sizeof(args), &object); | 331 | NV_DMA_IN_MEMORY, &args, |
332 | sizeof(args), &chan->gart); | ||
311 | if (ret) | 333 | if (ret) |
312 | return ret; | 334 | return ret; |
313 | |||
314 | chan->vram = vram; | ||
315 | chan->gart = gart; | ||
316 | } | 335 | } |
317 | 336 | ||
318 | /* initialise dma tracking parameters */ | 337 | /* initialise dma tracking parameters */ |
319 | switch (nv_hclass(chan->object) & 0x00ff) { | 338 | switch (chan->object->oclass & 0x00ff) { |
320 | case 0x006b: | 339 | case 0x006b: |
321 | case 0x006e: | 340 | case 0x006e: |
322 | chan->user_put = 0x40; | 341 | chan->user_put = 0x40; |
@@ -347,13 +366,13 @@ nouveau_channel_init(struct nouveau_channel *chan, u32 vram, u32 gart) | |||
347 | OUT_RING(chan, 0x00000000); | 366 | OUT_RING(chan, 0x00000000); |
348 | 367 | ||
349 | /* allocate software object class (used for fences on <= nv05) */ | 368 | /* allocate software object class (used for fences on <= nv05) */ |
350 | if (device->card_type < NV_10) { | 369 | if (device->info.family < NV_DEVICE_INFO_V0_CELSIUS) { |
351 | ret = nouveau_object_new(nv_object(client), chan->handle, | 370 | ret = nvif_object_init(chan->object, NULL, 0x006e, 0x006e, |
352 | NvSw, 0x006e, NULL, 0, &object); | 371 | NULL, 0, &chan->nvsw); |
353 | if (ret) | 372 | if (ret) |
354 | return ret; | 373 | return ret; |
355 | 374 | ||
356 | swch = (void *)object->parent; | 375 | swch = (void *)nvkm_object(&chan->nvsw)->parent; |
357 | swch->flip = nouveau_flip_complete; | 376 | swch->flip = nouveau_flip_complete; |
358 | swch->flip_data = chan; | 377 | swch->flip_data = chan; |
359 | 378 | ||
@@ -362,7 +381,7 @@ nouveau_channel_init(struct nouveau_channel *chan, u32 vram, u32 gart) | |||
362 | return ret; | 381 | return ret; |
363 | 382 | ||
364 | BEGIN_NV04(chan, NvSubSw, 0x0000, 1); | 383 | BEGIN_NV04(chan, NvSubSw, 0x0000, 1); |
365 | OUT_RING (chan, NvSw); | 384 | OUT_RING (chan, chan->nvsw.handle); |
366 | FIRE_RING (chan); | 385 | FIRE_RING (chan); |
367 | } | 386 | } |
368 | 387 | ||
@@ -371,25 +390,26 @@ nouveau_channel_init(struct nouveau_channel *chan, u32 vram, u32 gart) | |||
371 | } | 390 | } |
372 | 391 | ||
373 | int | 392 | int |
374 | nouveau_channel_new(struct nouveau_drm *drm, struct nouveau_cli *cli, | 393 | nouveau_channel_new(struct nouveau_drm *drm, struct nvif_device *device, |
375 | u32 parent, u32 handle, u32 arg0, u32 arg1, | 394 | u32 handle, u32 arg0, u32 arg1, |
376 | struct nouveau_channel **pchan) | 395 | struct nouveau_channel **pchan) |
377 | { | 396 | { |
397 | struct nouveau_cli *cli = (void *)nvif_client(&device->base); | ||
378 | int ret; | 398 | int ret; |
379 | 399 | ||
380 | ret = nouveau_channel_ind(drm, cli, parent, handle, arg0, pchan); | 400 | ret = nouveau_channel_ind(drm, device, handle, arg0, pchan); |
381 | if (ret) { | 401 | if (ret) { |
382 | NV_DEBUG(cli, "ib channel create, %d\n", ret); | 402 | NV_PRINTK(debug, cli, "ib channel create, %d\n", ret); |
383 | ret = nouveau_channel_dma(drm, cli, parent, handle, pchan); | 403 | ret = nouveau_channel_dma(drm, device, handle, pchan); |
384 | if (ret) { | 404 | if (ret) { |
385 | NV_DEBUG(cli, "dma channel create, %d\n", ret); | 405 | NV_PRINTK(debug, cli, "dma channel create, %d\n", ret); |
386 | return ret; | 406 | return ret; |
387 | } | 407 | } |
388 | } | 408 | } |
389 | 409 | ||
390 | ret = nouveau_channel_init(*pchan, arg0, arg1); | 410 | ret = nouveau_channel_init(*pchan, arg0, arg1); |
391 | if (ret) { | 411 | if (ret) { |
392 | NV_ERROR(cli, "channel failed to initialise, %d\n", ret); | 412 | NV_PRINTK(error, cli, "channel failed to initialise, %d\n", ret); |
393 | nouveau_channel_del(pchan); | 413 | nouveau_channel_del(pchan); |
394 | return ret; | 414 | return ret; |
395 | } | 415 | } |
diff --git a/drivers/gpu/drm/nouveau/nouveau_chan.h b/drivers/gpu/drm/nouveau/nouveau_chan.h index 40f97e2c47b6..20163709d608 100644 --- a/drivers/gpu/drm/nouveau/nouveau_chan.h +++ b/drivers/gpu/drm/nouveau/nouveau_chan.h | |||
@@ -1,20 +1,23 @@ | |||
1 | #ifndef __NOUVEAU_CHAN_H__ | 1 | #ifndef __NOUVEAU_CHAN_H__ |
2 | #define __NOUVEAU_CHAN_H__ | 2 | #define __NOUVEAU_CHAN_H__ |
3 | 3 | ||
4 | struct nouveau_cli; | 4 | #include <nvif/object.h> |
5 | struct nvif_device; | ||
5 | 6 | ||
6 | struct nouveau_channel { | 7 | struct nouveau_channel { |
7 | struct nouveau_cli *cli; | 8 | struct nvif_device *device; |
8 | struct nouveau_drm *drm; | 9 | struct nouveau_drm *drm; |
9 | 10 | ||
10 | u32 handle; | 11 | int chid; |
11 | u32 vram; | 12 | |
12 | u32 gart; | 13 | struct nvif_object vram; |
14 | struct nvif_object gart; | ||
15 | struct nvif_object nvsw; | ||
13 | 16 | ||
14 | struct { | 17 | struct { |
15 | struct nouveau_bo *buffer; | 18 | struct nouveau_bo *buffer; |
16 | struct nouveau_vma vma; | 19 | struct nouveau_vma vma; |
17 | u32 handle; | 20 | struct nvif_object ctxdma; |
18 | } push; | 21 | } push; |
19 | 22 | ||
20 | /* TODO: this will be reworked in the near future */ | 23 | /* TODO: this will be reworked in the near future */ |
@@ -34,12 +37,12 @@ struct nouveau_channel { | |||
34 | u32 user_get; | 37 | u32 user_get; |
35 | u32 user_put; | 38 | u32 user_put; |
36 | 39 | ||
37 | struct nouveau_object *object; | 40 | struct nvif_object *object; |
38 | }; | 41 | }; |
39 | 42 | ||
40 | 43 | ||
41 | int nouveau_channel_new(struct nouveau_drm *, struct nouveau_cli *, | 44 | int nouveau_channel_new(struct nouveau_drm *, struct nvif_device *, |
42 | u32 parent, u32 handle, u32 arg0, u32 arg1, | 45 | u32 handle, u32 arg0, u32 arg1, |
43 | struct nouveau_channel **); | 46 | struct nouveau_channel **); |
44 | void nouveau_channel_del(struct nouveau_channel **); | 47 | void nouveau_channel_del(struct nouveau_channel **); |
45 | int nouveau_channel_idle(struct nouveau_channel *); | 48 | int nouveau_channel_idle(struct nouveau_channel *); |
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c index dbdc9ad59546..1ec44c83e919 100644 --- a/drivers/gpu/drm/nouveau/nouveau_connector.c +++ b/drivers/gpu/drm/nouveau/nouveau_connector.c | |||
@@ -42,9 +42,7 @@ | |||
42 | #include "nouveau_encoder.h" | 42 | #include "nouveau_encoder.h" |
43 | #include "nouveau_crtc.h" | 43 | #include "nouveau_crtc.h" |
44 | 44 | ||
45 | #include <subdev/i2c.h> | 45 | #include <nvif/event.h> |
46 | #include <subdev/gpio.h> | ||
47 | #include <engine/disp.h> | ||
48 | 46 | ||
49 | MODULE_PARM_DESC(tv_disable, "Disable TV-out detection"); | 47 | MODULE_PARM_DESC(tv_disable, "Disable TV-out detection"); |
50 | static int nouveau_tv_disable = 0; | 48 | static int nouveau_tv_disable = 0; |
@@ -102,7 +100,7 @@ static void | |||
102 | nouveau_connector_destroy(struct drm_connector *connector) | 100 | nouveau_connector_destroy(struct drm_connector *connector) |
103 | { | 101 | { |
104 | struct nouveau_connector *nv_connector = nouveau_connector(connector); | 102 | struct nouveau_connector *nv_connector = nouveau_connector(connector); |
105 | nouveau_event_ref(NULL, &nv_connector->hpd); | 103 | nvif_notify_fini(&nv_connector->hpd); |
106 | kfree(nv_connector->edid); | 104 | kfree(nv_connector->edid); |
107 | drm_connector_unregister(connector); | 105 | drm_connector_unregister(connector); |
108 | drm_connector_cleanup(connector); | 106 | drm_connector_cleanup(connector); |
@@ -117,7 +115,7 @@ nouveau_connector_ddc_detect(struct drm_connector *connector) | |||
117 | struct drm_device *dev = connector->dev; | 115 | struct drm_device *dev = connector->dev; |
118 | struct nouveau_connector *nv_connector = nouveau_connector(connector); | 116 | struct nouveau_connector *nv_connector = nouveau_connector(connector); |
119 | struct nouveau_drm *drm = nouveau_drm(dev); | 117 | struct nouveau_drm *drm = nouveau_drm(dev); |
120 | struct nouveau_gpio *gpio = nouveau_gpio(drm->device); | 118 | struct nouveau_gpio *gpio = nvkm_gpio(&drm->device); |
121 | struct nouveau_encoder *nv_encoder; | 119 | struct nouveau_encoder *nv_encoder; |
122 | struct drm_encoder *encoder; | 120 | struct drm_encoder *encoder; |
123 | int i, panel = -ENODEV; | 121 | int i, panel = -ENODEV; |
@@ -206,7 +204,7 @@ nouveau_connector_set_encoder(struct drm_connector *connector, | |||
206 | return; | 204 | return; |
207 | nv_connector->detected_encoder = nv_encoder; | 205 | nv_connector->detected_encoder = nv_encoder; |
208 | 206 | ||
209 | if (nv_device(drm->device)->card_type >= NV_50) { | 207 | if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA) { |
210 | connector->interlace_allowed = true; | 208 | connector->interlace_allowed = true; |
211 | connector->doublescan_allowed = true; | 209 | connector->doublescan_allowed = true; |
212 | } else | 210 | } else |
@@ -216,9 +214,8 @@ nouveau_connector_set_encoder(struct drm_connector *connector, | |||
216 | connector->interlace_allowed = false; | 214 | connector->interlace_allowed = false; |
217 | } else { | 215 | } else { |
218 | connector->doublescan_allowed = true; | 216 | connector->doublescan_allowed = true; |
219 | if (nv_device(drm->device)->card_type == NV_20 || | 217 | if (drm->device.info.family == NV_DEVICE_INFO_V0_KELVIN || |
220 | ((nv_device(drm->device)->card_type == NV_10 || | 218 | (drm->device.info.family == NV_DEVICE_INFO_V0_CELSIUS && |
221 | nv_device(drm->device)->card_type == NV_11) && | ||
222 | (dev->pdev->device & 0x0ff0) != 0x0100 && | 219 | (dev->pdev->device & 0x0ff0) != 0x0100 && |
223 | (dev->pdev->device & 0x0ff0) != 0x0150)) | 220 | (dev->pdev->device & 0x0ff0) != 0x0150)) |
224 | /* HW is broken */ | 221 | /* HW is broken */ |
@@ -802,11 +799,11 @@ get_tmds_link_bandwidth(struct drm_connector *connector) | |||
802 | struct dcb_output *dcb = nv_connector->detected_encoder->dcb; | 799 | struct dcb_output *dcb = nv_connector->detected_encoder->dcb; |
803 | 800 | ||
804 | if (dcb->location != DCB_LOC_ON_CHIP || | 801 | if (dcb->location != DCB_LOC_ON_CHIP || |
805 | nv_device(drm->device)->chipset >= 0x46) | 802 | drm->device.info.chipset >= 0x46) |
806 | return 165000; | 803 | return 165000; |
807 | else if (nv_device(drm->device)->chipset >= 0x40) | 804 | else if (drm->device.info.chipset >= 0x40) |
808 | return 155000; | 805 | return 155000; |
809 | else if (nv_device(drm->device)->chipset >= 0x18) | 806 | else if (drm->device.info.chipset >= 0x18) |
810 | return 135000; | 807 | return 135000; |
811 | else | 808 | else |
812 | return 112000; | 809 | return 112000; |
@@ -939,18 +936,19 @@ nouveau_connector_funcs_dp = { | |||
939 | .force = nouveau_connector_force | 936 | .force = nouveau_connector_force |
940 | }; | 937 | }; |
941 | 938 | ||
942 | static void | 939 | static int |
943 | nouveau_connector_hotplug_work(struct work_struct *work) | 940 | nouveau_connector_hotplug(struct nvif_notify *notify) |
944 | { | 941 | { |
945 | struct nouveau_connector *nv_connector = | 942 | struct nouveau_connector *nv_connector = |
946 | container_of(work, typeof(*nv_connector), work); | 943 | container_of(notify, typeof(*nv_connector), hpd); |
947 | struct drm_connector *connector = &nv_connector->base; | 944 | struct drm_connector *connector = &nv_connector->base; |
948 | struct nouveau_drm *drm = nouveau_drm(connector->dev); | 945 | struct nouveau_drm *drm = nouveau_drm(connector->dev); |
946 | const struct nvif_notify_conn_rep_v0 *rep = notify->data; | ||
949 | const char *name = connector->name; | 947 | const char *name = connector->name; |
950 | 948 | ||
951 | if (nv_connector->status & NVKM_HPD_IRQ) { | 949 | if (rep->mask & NVIF_NOTIFY_CONN_V0_IRQ) { |
952 | } else { | 950 | } else { |
953 | bool plugged = (nv_connector->status != NVKM_HPD_UNPLUG); | 951 | bool plugged = (rep->mask != NVIF_NOTIFY_CONN_V0_UNPLUG); |
954 | 952 | ||
955 | NV_DEBUG(drm, "%splugged %s\n", plugged ? "" : "un", name); | 953 | NV_DEBUG(drm, "%splugged %s\n", plugged ? "" : "un", name); |
956 | 954 | ||
@@ -961,16 +959,7 @@ nouveau_connector_hotplug_work(struct work_struct *work) | |||
961 | drm_helper_hpd_irq_event(connector->dev); | 959 | drm_helper_hpd_irq_event(connector->dev); |
962 | } | 960 | } |
963 | 961 | ||
964 | nouveau_event_get(nv_connector->hpd); | 962 | return NVIF_NOTIFY_KEEP; |
965 | } | ||
966 | |||
967 | static int | ||
968 | nouveau_connector_hotplug(void *data, u32 type, int index) | ||
969 | { | ||
970 | struct nouveau_connector *nv_connector = data; | ||
971 | nv_connector->status = type; | ||
972 | schedule_work(&nv_connector->work); | ||
973 | return NVKM_EVENT_DROP; | ||
974 | } | 963 | } |
975 | 964 | ||
976 | static ssize_t | 965 | static ssize_t |
@@ -1040,7 +1029,6 @@ nouveau_connector_create(struct drm_device *dev, int index) | |||
1040 | struct nouveau_drm *drm = nouveau_drm(dev); | 1029 | struct nouveau_drm *drm = nouveau_drm(dev); |
1041 | struct nouveau_display *disp = nouveau_display(dev); | 1030 | struct nouveau_display *disp = nouveau_display(dev); |
1042 | struct nouveau_connector *nv_connector = NULL; | 1031 | struct nouveau_connector *nv_connector = NULL; |
1043 | struct nouveau_disp *pdisp = nouveau_disp(drm->device); | ||
1044 | struct drm_connector *connector; | 1032 | struct drm_connector *connector; |
1045 | int type, ret = 0; | 1033 | int type, ret = 0; |
1046 | bool dummy; | 1034 | bool dummy; |
@@ -1194,7 +1182,7 @@ nouveau_connector_create(struct drm_device *dev, int index) | |||
1194 | 1182 | ||
1195 | switch (nv_connector->type) { | 1183 | switch (nv_connector->type) { |
1196 | case DCB_CONNECTOR_VGA: | 1184 | case DCB_CONNECTOR_VGA: |
1197 | if (nv_device(drm->device)->card_type >= NV_50) { | 1185 | if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA) { |
1198 | drm_object_attach_property(&connector->base, | 1186 | drm_object_attach_property(&connector->base, |
1199 | dev->mode_config.scaling_mode_property, | 1187 | dev->mode_config.scaling_mode_property, |
1200 | nv_connector->scaling_mode); | 1188 | nv_connector->scaling_mode); |
@@ -1226,16 +1214,20 @@ nouveau_connector_create(struct drm_device *dev, int index) | |||
1226 | break; | 1214 | break; |
1227 | } | 1215 | } |
1228 | 1216 | ||
1229 | ret = nouveau_event_new(pdisp->hpd, NVKM_HPD, index, | 1217 | ret = nvif_notify_init(&disp->disp, NULL, nouveau_connector_hotplug, |
1230 | nouveau_connector_hotplug, | 1218 | true, NV04_DISP_NTFY_CONN, |
1231 | nv_connector, &nv_connector->hpd); | 1219 | &(struct nvif_notify_conn_req_v0) { |
1220 | .mask = NVIF_NOTIFY_CONN_V0_ANY, | ||
1221 | .conn = index, | ||
1222 | }, | ||
1223 | sizeof(struct nvif_notify_conn_req_v0), | ||
1224 | sizeof(struct nvif_notify_conn_rep_v0), | ||
1225 | &nv_connector->hpd); | ||
1232 | if (ret) | 1226 | if (ret) |
1233 | connector->polled = DRM_CONNECTOR_POLL_CONNECT; | 1227 | connector->polled = DRM_CONNECTOR_POLL_CONNECT; |
1234 | else | 1228 | else |
1235 | connector->polled = DRM_CONNECTOR_POLL_HPD; | 1229 | connector->polled = DRM_CONNECTOR_POLL_HPD; |
1236 | 1230 | ||
1237 | INIT_WORK(&nv_connector->work, nouveau_connector_hotplug_work); | ||
1238 | |||
1239 | drm_connector_register(connector); | 1231 | drm_connector_register(connector); |
1240 | return connector; | 1232 | return connector; |
1241 | } | 1233 | } |
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.h b/drivers/gpu/drm/nouveau/nouveau_connector.h index 8861b6c579ad..68029d041dd2 100644 --- a/drivers/gpu/drm/nouveau/nouveau_connector.h +++ b/drivers/gpu/drm/nouveau/nouveau_connector.h | |||
@@ -27,14 +27,12 @@ | |||
27 | #ifndef __NOUVEAU_CONNECTOR_H__ | 27 | #ifndef __NOUVEAU_CONNECTOR_H__ |
28 | #define __NOUVEAU_CONNECTOR_H__ | 28 | #define __NOUVEAU_CONNECTOR_H__ |
29 | 29 | ||
30 | #include <nvif/notify.h> | ||
31 | |||
30 | #include <drm/drm_edid.h> | 32 | #include <drm/drm_edid.h> |
31 | #include <drm/drm_dp_helper.h> | 33 | #include <drm/drm_dp_helper.h> |
32 | #include "nouveau_crtc.h" | 34 | #include "nouveau_crtc.h" |
33 | 35 | ||
34 | #include <core/event.h> | ||
35 | |||
36 | #include <subdev/bios.h> | ||
37 | |||
38 | struct nouveau_i2c_port; | 36 | struct nouveau_i2c_port; |
39 | 37 | ||
40 | enum nouveau_underscan_type { | 38 | enum nouveau_underscan_type { |
@@ -67,9 +65,7 @@ struct nouveau_connector { | |||
67 | u8 index; | 65 | u8 index; |
68 | u8 *dcb; | 66 | u8 *dcb; |
69 | 67 | ||
70 | struct nouveau_eventh *hpd; | 68 | struct nvif_notify hpd; |
71 | u32 status; | ||
72 | struct work_struct work; | ||
73 | 69 | ||
74 | struct drm_dp_aux aux; | 70 | struct drm_dp_aux aux; |
75 | 71 | ||
diff --git a/drivers/gpu/drm/nouveau/nouveau_crtc.h b/drivers/gpu/drm/nouveau/nouveau_crtc.h index a0534489d23f..f19cb1c5fc5a 100644 --- a/drivers/gpu/drm/nouveau/nouveau_crtc.h +++ b/drivers/gpu/drm/nouveau/nouveau_crtc.h | |||
@@ -27,10 +27,13 @@ | |||
27 | #ifndef __NOUVEAU_CRTC_H__ | 27 | #ifndef __NOUVEAU_CRTC_H__ |
28 | #define __NOUVEAU_CRTC_H__ | 28 | #define __NOUVEAU_CRTC_H__ |
29 | 29 | ||
30 | #include <nvif/notify.h> | ||
31 | |||
30 | struct nouveau_crtc { | 32 | struct nouveau_crtc { |
31 | struct drm_crtc base; | 33 | struct drm_crtc base; |
32 | 34 | ||
33 | int index; | 35 | int index; |
36 | struct nvif_notify vblank; | ||
34 | 37 | ||
35 | uint32_t dpms_saved_fp_control; | 38 | uint32_t dpms_saved_fp_control; |
36 | uint32_t fp_users; | 39 | uint32_t fp_users; |
@@ -46,7 +49,7 @@ struct nouveau_crtc { | |||
46 | int cpp; | 49 | int cpp; |
47 | bool blanked; | 50 | bool blanked; |
48 | uint32_t offset; | 51 | uint32_t offset; |
49 | uint32_t tile_flags; | 52 | uint32_t handle; |
50 | } fb; | 53 | } fb; |
51 | 54 | ||
52 | struct { | 55 | struct { |
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c index 47ad74255bf1..1cc7b603c753 100644 --- a/drivers/gpu/drm/nouveau/nouveau_display.c +++ b/drivers/gpu/drm/nouveau/nouveau_display.c | |||
@@ -27,6 +27,8 @@ | |||
27 | #include <drm/drmP.h> | 27 | #include <drm/drmP.h> |
28 | #include <drm/drm_crtc_helper.h> | 28 | #include <drm/drm_crtc_helper.h> |
29 | 29 | ||
30 | #include <nvif/class.h> | ||
31 | |||
30 | #include "nouveau_fbcon.h" | 32 | #include "nouveau_fbcon.h" |
31 | #include "dispnv04/hw.h" | 33 | #include "dispnv04/hw.h" |
32 | #include "nouveau_crtc.h" | 34 | #include "nouveau_crtc.h" |
@@ -37,35 +39,42 @@ | |||
37 | 39 | ||
38 | #include "nouveau_fence.h" | 40 | #include "nouveau_fence.h" |
39 | 41 | ||
40 | #include <engine/disp.h> | 42 | #include <nvif/event.h> |
41 | |||
42 | #include <core/class.h> | ||
43 | 43 | ||
44 | static int | 44 | static int |
45 | nouveau_display_vblank_handler(void *data, u32 type, int head) | 45 | nouveau_display_vblank_handler(struct nvif_notify *notify) |
46 | { | 46 | { |
47 | struct nouveau_drm *drm = data; | 47 | struct nouveau_crtc *nv_crtc = |
48 | drm_handle_vblank(drm->dev, head); | 48 | container_of(notify, typeof(*nv_crtc), vblank); |
49 | return NVKM_EVENT_KEEP; | 49 | drm_handle_vblank(nv_crtc->base.dev, nv_crtc->index); |
50 | return NVIF_NOTIFY_KEEP; | ||
50 | } | 51 | } |
51 | 52 | ||
52 | int | 53 | int |
53 | nouveau_display_vblank_enable(struct drm_device *dev, int head) | 54 | nouveau_display_vblank_enable(struct drm_device *dev, int head) |
54 | { | 55 | { |
55 | struct nouveau_display *disp = nouveau_display(dev); | 56 | struct drm_crtc *crtc; |
56 | if (disp) { | 57 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { |
57 | nouveau_event_get(disp->vblank[head]); | 58 | struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); |
58 | return 0; | 59 | if (nv_crtc->index == head) { |
60 | nvif_notify_get(&nv_crtc->vblank); | ||
61 | return 0; | ||
62 | } | ||
59 | } | 63 | } |
60 | return -EIO; | 64 | return -EINVAL; |
61 | } | 65 | } |
62 | 66 | ||
63 | void | 67 | void |
64 | nouveau_display_vblank_disable(struct drm_device *dev, int head) | 68 | nouveau_display_vblank_disable(struct drm_device *dev, int head) |
65 | { | 69 | { |
66 | struct nouveau_display *disp = nouveau_display(dev); | 70 | struct drm_crtc *crtc; |
67 | if (disp) | 71 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { |
68 | nouveau_event_put(disp->vblank[head]); | 72 | struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); |
73 | if (nv_crtc->index == head) { | ||
74 | nvif_notify_put(&nv_crtc->vblank); | ||
75 | return; | ||
76 | } | ||
77 | } | ||
69 | } | 78 | } |
70 | 79 | ||
71 | static inline int | 80 | static inline int |
@@ -86,17 +95,22 @@ int | |||
86 | nouveau_display_scanoutpos_head(struct drm_crtc *crtc, int *vpos, int *hpos, | 95 | nouveau_display_scanoutpos_head(struct drm_crtc *crtc, int *vpos, int *hpos, |
87 | ktime_t *stime, ktime_t *etime) | 96 | ktime_t *stime, ktime_t *etime) |
88 | { | 97 | { |
89 | const u32 mthd = NV04_DISP_SCANOUTPOS + nouveau_crtc(crtc)->index; | 98 | struct { |
99 | struct nv04_disp_mthd_v0 base; | ||
100 | struct nv04_disp_scanoutpos_v0 scan; | ||
101 | } args = { | ||
102 | .base.method = NV04_DISP_SCANOUTPOS, | ||
103 | .base.head = nouveau_crtc(crtc)->index, | ||
104 | }; | ||
90 | struct nouveau_display *disp = nouveau_display(crtc->dev); | 105 | struct nouveau_display *disp = nouveau_display(crtc->dev); |
91 | struct nv04_display_scanoutpos args; | ||
92 | int ret, retry = 1; | 106 | int ret, retry = 1; |
93 | 107 | ||
94 | do { | 108 | do { |
95 | ret = nv_exec(disp->core, mthd, &args, sizeof(args)); | 109 | ret = nvif_mthd(&disp->disp, 0, &args, sizeof(args)); |
96 | if (ret != 0) | 110 | if (ret != 0) |
97 | return 0; | 111 | return 0; |
98 | 112 | ||
99 | if (args.vline) { | 113 | if (args.scan.vline) { |
100 | ret |= DRM_SCANOUTPOS_ACCURATE; | 114 | ret |= DRM_SCANOUTPOS_ACCURATE; |
101 | ret |= DRM_SCANOUTPOS_VALID; | 115 | ret |= DRM_SCANOUTPOS_VALID; |
102 | break; | 116 | break; |
@@ -105,10 +119,11 @@ nouveau_display_scanoutpos_head(struct drm_crtc *crtc, int *vpos, int *hpos, | |||
105 | if (retry) ndelay(crtc->linedur_ns); | 119 | if (retry) ndelay(crtc->linedur_ns); |
106 | } while (retry--); | 120 | } while (retry--); |
107 | 121 | ||
108 | *hpos = args.hline; | 122 | *hpos = args.scan.hline; |
109 | *vpos = calc(args.vblanks, args.vblanke, args.vtotal, args.vline); | 123 | *vpos = calc(args.scan.vblanks, args.scan.vblanke, |
110 | if (stime) *stime = ns_to_ktime(args.time[0]); | 124 | args.scan.vtotal, args.scan.vline); |
111 | if (etime) *etime = ns_to_ktime(args.time[1]); | 125 | if (stime) *stime = ns_to_ktime(args.scan.time[0]); |
126 | if (etime) *etime = ns_to_ktime(args.scan.time[1]); | ||
112 | 127 | ||
113 | if (*vpos < 0) | 128 | if (*vpos < 0) |
114 | ret |= DRM_SCANOUTPOS_INVBL; | 129 | ret |= DRM_SCANOUTPOS_INVBL; |
@@ -151,16 +166,13 @@ nouveau_display_vblstamp(struct drm_device *dev, int head, int *max_error, | |||
151 | static void | 166 | static void |
152 | nouveau_display_vblank_fini(struct drm_device *dev) | 167 | nouveau_display_vblank_fini(struct drm_device *dev) |
153 | { | 168 | { |
154 | struct nouveau_display *disp = nouveau_display(dev); | 169 | struct drm_crtc *crtc; |
155 | int i; | ||
156 | 170 | ||
157 | drm_vblank_cleanup(dev); | 171 | drm_vblank_cleanup(dev); |
158 | 172 | ||
159 | if (disp->vblank) { | 173 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { |
160 | for (i = 0; i < dev->mode_config.num_crtc; i++) | 174 | struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); |
161 | nouveau_event_ref(NULL, &disp->vblank[i]); | 175 | nvif_notify_fini(&nv_crtc->vblank); |
162 | kfree(disp->vblank); | ||
163 | disp->vblank = NULL; | ||
164 | } | 176 | } |
165 | } | 177 | } |
166 | 178 | ||
@@ -168,19 +180,20 @@ static int | |||
168 | nouveau_display_vblank_init(struct drm_device *dev) | 180 | nouveau_display_vblank_init(struct drm_device *dev) |
169 | { | 181 | { |
170 | struct nouveau_display *disp = nouveau_display(dev); | 182 | struct nouveau_display *disp = nouveau_display(dev); |
171 | struct nouveau_drm *drm = nouveau_drm(dev); | 183 | struct drm_crtc *crtc; |
172 | struct nouveau_disp *pdisp = nouveau_disp(drm->device); | 184 | int ret; |
173 | int ret, i; | ||
174 | |||
175 | disp->vblank = kzalloc(dev->mode_config.num_crtc * | ||
176 | sizeof(*disp->vblank), GFP_KERNEL); | ||
177 | if (!disp->vblank) | ||
178 | return -ENOMEM; | ||
179 | 185 | ||
180 | for (i = 0; i < dev->mode_config.num_crtc; i++) { | 186 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { |
181 | ret = nouveau_event_new(pdisp->vblank, 1, i, | 187 | struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); |
182 | nouveau_display_vblank_handler, | 188 | ret = nvif_notify_init(&disp->disp, NULL, |
183 | drm, &disp->vblank[i]); | 189 | nouveau_display_vblank_handler, false, |
190 | NV04_DISP_NTFY_VBLANK, | ||
191 | &(struct nvif_notify_head_req_v0) { | ||
192 | .head = nv_crtc->index, | ||
193 | }, | ||
194 | sizeof(struct nvif_notify_head_req_v0), | ||
195 | sizeof(struct nvif_notify_head_rep_v0), | ||
196 | &nv_crtc->vblank); | ||
184 | if (ret) { | 197 | if (ret) { |
185 | nouveau_display_vblank_fini(dev); | 198 | nouveau_display_vblank_fini(dev); |
186 | return ret; | 199 | return ret; |
@@ -200,6 +213,10 @@ static void | |||
200 | nouveau_user_framebuffer_destroy(struct drm_framebuffer *drm_fb) | 213 | nouveau_user_framebuffer_destroy(struct drm_framebuffer *drm_fb) |
201 | { | 214 | { |
202 | struct nouveau_framebuffer *fb = nouveau_framebuffer(drm_fb); | 215 | struct nouveau_framebuffer *fb = nouveau_framebuffer(drm_fb); |
216 | struct nouveau_display *disp = nouveau_display(drm_fb->dev); | ||
217 | |||
218 | if (disp->fb_dtor) | ||
219 | disp->fb_dtor(drm_fb); | ||
203 | 220 | ||
204 | if (fb->nvbo) | 221 | if (fb->nvbo) |
205 | drm_gem_object_unreference_unlocked(&fb->nvbo->gem); | 222 | drm_gem_object_unreference_unlocked(&fb->nvbo->gem); |
@@ -229,63 +246,24 @@ nouveau_framebuffer_init(struct drm_device *dev, | |||
229 | struct drm_mode_fb_cmd2 *mode_cmd, | 246 | struct drm_mode_fb_cmd2 *mode_cmd, |
230 | struct nouveau_bo *nvbo) | 247 | struct nouveau_bo *nvbo) |
231 | { | 248 | { |
232 | struct nouveau_drm *drm = nouveau_drm(dev); | 249 | struct nouveau_display *disp = nouveau_display(dev); |
233 | struct drm_framebuffer *fb = &nv_fb->base; | 250 | struct drm_framebuffer *fb = &nv_fb->base; |
234 | int ret; | 251 | int ret; |
235 | 252 | ||
236 | drm_helper_mode_fill_fb_struct(fb, mode_cmd); | 253 | drm_helper_mode_fill_fb_struct(fb, mode_cmd); |
237 | nv_fb->nvbo = nvbo; | 254 | nv_fb->nvbo = nvbo; |
238 | 255 | ||
239 | if (nv_device(drm->device)->card_type >= NV_50) { | ||
240 | u32 tile_flags = nouveau_bo_tile_layout(nvbo); | ||
241 | if (tile_flags == 0x7a00 || | ||
242 | tile_flags == 0xfe00) | ||
243 | nv_fb->r_dma = NvEvoFB32; | ||
244 | else | ||
245 | if (tile_flags == 0x7000) | ||
246 | nv_fb->r_dma = NvEvoFB16; | ||
247 | else | ||
248 | nv_fb->r_dma = NvEvoVRAM_LP; | ||
249 | |||
250 | switch (fb->depth) { | ||
251 | case 8: nv_fb->r_format = 0x1e00; break; | ||
252 | case 15: nv_fb->r_format = 0xe900; break; | ||
253 | case 16: nv_fb->r_format = 0xe800; break; | ||
254 | case 24: | ||
255 | case 32: nv_fb->r_format = 0xcf00; break; | ||
256 | case 30: nv_fb->r_format = 0xd100; break; | ||
257 | default: | ||
258 | NV_ERROR(drm, "unknown depth %d\n", fb->depth); | ||
259 | return -EINVAL; | ||
260 | } | ||
261 | |||
262 | if (nvbo->tile_flags & NOUVEAU_GEM_TILE_NONCONTIG) { | ||
263 | NV_ERROR(drm, "framebuffer requires contiguous bo\n"); | ||
264 | return -EINVAL; | ||
265 | } | ||
266 | |||
267 | if (nv_device(drm->device)->chipset == 0x50) | ||
268 | nv_fb->r_format |= (tile_flags << 8); | ||
269 | |||
270 | if (!tile_flags) { | ||
271 | if (nv_device(drm->device)->card_type < NV_D0) | ||
272 | nv_fb->r_pitch = 0x00100000 | fb->pitches[0]; | ||
273 | else | ||
274 | nv_fb->r_pitch = 0x01000000 | fb->pitches[0]; | ||
275 | } else { | ||
276 | u32 mode = nvbo->tile_mode; | ||
277 | if (nv_device(drm->device)->card_type >= NV_C0) | ||
278 | mode >>= 4; | ||
279 | nv_fb->r_pitch = ((fb->pitches[0] / 4) << 4) | mode; | ||
280 | } | ||
281 | } | ||
282 | |||
283 | ret = drm_framebuffer_init(dev, fb, &nouveau_framebuffer_funcs); | 256 | ret = drm_framebuffer_init(dev, fb, &nouveau_framebuffer_funcs); |
284 | if (ret) { | 257 | if (ret) |
285 | return ret; | 258 | return ret; |
259 | |||
260 | if (disp->fb_ctor) { | ||
261 | ret = disp->fb_ctor(fb); | ||
262 | if (ret) | ||
263 | disp->fb_dtor(fb); | ||
286 | } | 264 | } |
287 | 265 | ||
288 | return 0; | 266 | return ret; |
289 | } | 267 | } |
290 | 268 | ||
291 | static struct drm_framebuffer * | 269 | static struct drm_framebuffer * |
@@ -393,7 +371,7 @@ nouveau_display_init(struct drm_device *dev) | |||
393 | /* enable hotplug interrupts */ | 371 | /* enable hotplug interrupts */ |
394 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { | 372 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { |
395 | struct nouveau_connector *conn = nouveau_connector(connector); | 373 | struct nouveau_connector *conn = nouveau_connector(connector); |
396 | if (conn->hpd) nouveau_event_get(conn->hpd); | 374 | nvif_notify_get(&conn->hpd); |
397 | } | 375 | } |
398 | 376 | ||
399 | return ret; | 377 | return ret; |
@@ -404,37 +382,32 @@ nouveau_display_fini(struct drm_device *dev) | |||
404 | { | 382 | { |
405 | struct nouveau_display *disp = nouveau_display(dev); | 383 | struct nouveau_display *disp = nouveau_display(dev); |
406 | struct drm_connector *connector; | 384 | struct drm_connector *connector; |
385 | int head; | ||
386 | |||
387 | /* Make sure that drm and hw vblank irqs get properly disabled. */ | ||
388 | for (head = 0; head < dev->mode_config.num_crtc; head++) | ||
389 | drm_vblank_off(dev, head); | ||
407 | 390 | ||
408 | /* disable hotplug interrupts */ | 391 | /* disable hotplug interrupts */ |
409 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { | 392 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { |
410 | struct nouveau_connector *conn = nouveau_connector(connector); | 393 | struct nouveau_connector *conn = nouveau_connector(connector); |
411 | if (conn->hpd) nouveau_event_put(conn->hpd); | 394 | nvif_notify_put(&conn->hpd); |
412 | } | 395 | } |
413 | 396 | ||
414 | drm_kms_helper_poll_disable(dev); | 397 | drm_kms_helper_poll_disable(dev); |
415 | disp->fini(dev); | 398 | disp->fini(dev); |
416 | } | 399 | } |
417 | 400 | ||
418 | int | 401 | static void |
419 | nouveau_display_create(struct drm_device *dev) | 402 | nouveau_display_create_properties(struct drm_device *dev) |
420 | { | 403 | { |
421 | struct nouveau_drm *drm = nouveau_drm(dev); | 404 | struct nouveau_display *disp = nouveau_display(dev); |
422 | struct nouveau_device *device = nouveau_dev(dev); | 405 | int gen; |
423 | struct nouveau_display *disp; | ||
424 | int ret, gen; | ||
425 | |||
426 | disp = drm->display = kzalloc(sizeof(*disp), GFP_KERNEL); | ||
427 | if (!disp) | ||
428 | return -ENOMEM; | ||
429 | |||
430 | drm_mode_config_init(dev); | ||
431 | drm_mode_create_scaling_mode_property(dev); | ||
432 | drm_mode_create_dvi_i_properties(dev); | ||
433 | 406 | ||
434 | if (nv_device(drm->device)->card_type < NV_50) | 407 | if (disp->disp.oclass < NV50_DISP) |
435 | gen = 0; | 408 | gen = 0; |
436 | else | 409 | else |
437 | if (nv_device(drm->device)->card_type < NV_D0) | 410 | if (disp->disp.oclass < GF110_DISP) |
438 | gen = 1; | 411 | gen = 1; |
439 | else | 412 | else |
440 | gen = 2; | 413 | gen = 2; |
@@ -449,26 +422,43 @@ nouveau_display_create(struct drm_device *dev) | |||
449 | disp->underscan_vborder_property = | 422 | disp->underscan_vborder_property = |
450 | drm_property_create_range(dev, 0, "underscan vborder", 0, 128); | 423 | drm_property_create_range(dev, 0, "underscan vborder", 0, 128); |
451 | 424 | ||
452 | if (gen >= 1) { | 425 | if (gen < 1) |
453 | /* -90..+90 */ | 426 | return; |
454 | disp->vibrant_hue_property = | ||
455 | drm_property_create_range(dev, 0, "vibrant hue", 0, 180); | ||
456 | 427 | ||
457 | /* -100..+100 */ | 428 | /* -90..+90 */ |
458 | disp->color_vibrance_property = | 429 | disp->vibrant_hue_property = |
459 | drm_property_create_range(dev, 0, "color vibrance", 0, 200); | 430 | drm_property_create_range(dev, 0, "vibrant hue", 0, 180); |
460 | } | 431 | |
432 | /* -100..+100 */ | ||
433 | disp->color_vibrance_property = | ||
434 | drm_property_create_range(dev, 0, "color vibrance", 0, 200); | ||
435 | } | ||
436 | |||
437 | int | ||
438 | nouveau_display_create(struct drm_device *dev) | ||
439 | { | ||
440 | struct nouveau_drm *drm = nouveau_drm(dev); | ||
441 | struct nouveau_display *disp; | ||
442 | int ret; | ||
443 | |||
444 | disp = drm->display = kzalloc(sizeof(*disp), GFP_KERNEL); | ||
445 | if (!disp) | ||
446 | return -ENOMEM; | ||
447 | |||
448 | drm_mode_config_init(dev); | ||
449 | drm_mode_create_scaling_mode_property(dev); | ||
450 | drm_mode_create_dvi_i_properties(dev); | ||
461 | 451 | ||
462 | dev->mode_config.funcs = &nouveau_mode_config_funcs; | 452 | dev->mode_config.funcs = &nouveau_mode_config_funcs; |
463 | dev->mode_config.fb_base = nv_device_resource_start(device, 1); | 453 | dev->mode_config.fb_base = nv_device_resource_start(nvkm_device(&drm->device), 1); |
464 | 454 | ||
465 | dev->mode_config.min_width = 0; | 455 | dev->mode_config.min_width = 0; |
466 | dev->mode_config.min_height = 0; | 456 | dev->mode_config.min_height = 0; |
467 | if (nv_device(drm->device)->card_type < NV_10) { | 457 | if (drm->device.info.family < NV_DEVICE_INFO_V0_CELSIUS) { |
468 | dev->mode_config.max_width = 2048; | 458 | dev->mode_config.max_width = 2048; |
469 | dev->mode_config.max_height = 2048; | 459 | dev->mode_config.max_height = 2048; |
470 | } else | 460 | } else |
471 | if (nv_device(drm->device)->card_type < NV_50) { | 461 | if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA) { |
472 | dev->mode_config.max_width = 4096; | 462 | dev->mode_config.max_width = 4096; |
473 | dev->mode_config.max_height = 4096; | 463 | dev->mode_config.max_height = 4096; |
474 | } else { | 464 | } else { |
@@ -479,7 +469,7 @@ nouveau_display_create(struct drm_device *dev) | |||
479 | dev->mode_config.preferred_depth = 24; | 469 | dev->mode_config.preferred_depth = 24; |
480 | dev->mode_config.prefer_shadow = 1; | 470 | dev->mode_config.prefer_shadow = 1; |
481 | 471 | ||
482 | if (nv_device(drm->device)->chipset < 0x11) | 472 | if (drm->device.info.chipset < 0x11) |
483 | dev->mode_config.async_page_flip = false; | 473 | dev->mode_config.async_page_flip = false; |
484 | else | 474 | else |
485 | dev->mode_config.async_page_flip = true; | 475 | dev->mode_config.async_page_flip = true; |
@@ -487,29 +477,30 @@ nouveau_display_create(struct drm_device *dev) | |||
487 | drm_kms_helper_poll_init(dev); | 477 | drm_kms_helper_poll_init(dev); |
488 | drm_kms_helper_poll_disable(dev); | 478 | drm_kms_helper_poll_disable(dev); |
489 | 479 | ||
490 | if (drm->vbios.dcb.entries) { | 480 | if (nouveau_modeset != 2 && drm->vbios.dcb.entries) { |
491 | static const u16 oclass[] = { | 481 | static const u16 oclass[] = { |
492 | GM107_DISP_CLASS, | 482 | GM107_DISP, |
493 | NVF0_DISP_CLASS, | 483 | GK110_DISP, |
494 | NVE0_DISP_CLASS, | 484 | GK104_DISP, |
495 | NVD0_DISP_CLASS, | 485 | GF110_DISP, |
496 | NVA3_DISP_CLASS, | 486 | GT214_DISP, |
497 | NV94_DISP_CLASS, | 487 | GT206_DISP, |
498 | NVA0_DISP_CLASS, | 488 | GT200_DISP, |
499 | NV84_DISP_CLASS, | 489 | G82_DISP, |
500 | NV50_DISP_CLASS, | 490 | NV50_DISP, |
501 | NV04_DISP_CLASS, | 491 | NV04_DISP, |
502 | }; | 492 | }; |
503 | int i; | 493 | int i; |
504 | 494 | ||
505 | for (i = 0, ret = -ENODEV; ret && i < ARRAY_SIZE(oclass); i++) { | 495 | for (i = 0, ret = -ENODEV; ret && i < ARRAY_SIZE(oclass); i++) { |
506 | ret = nouveau_object_new(nv_object(drm), NVDRM_DEVICE, | 496 | ret = nvif_object_init(nvif_object(&drm->device), NULL, |
507 | NVDRM_DISPLAY, oclass[i], | 497 | NVDRM_DISPLAY, oclass[i], |
508 | NULL, 0, &disp->core); | 498 | NULL, 0, &disp->disp); |
509 | } | 499 | } |
510 | 500 | ||
511 | if (ret == 0) { | 501 | if (ret == 0) { |
512 | if (nv_mclass(disp->core) < NV50_DISP_CLASS) | 502 | nouveau_display_create_properties(dev); |
503 | if (disp->disp.oclass < NV50_DISP) | ||
513 | ret = nv04_display_create(dev); | 504 | ret = nv04_display_create(dev); |
514 | else | 505 | else |
515 | ret = nv50_display_create(dev); | 506 | ret = nv50_display_create(dev); |
@@ -542,7 +533,6 @@ void | |||
542 | nouveau_display_destroy(struct drm_device *dev) | 533 | nouveau_display_destroy(struct drm_device *dev) |
543 | { | 534 | { |
544 | struct nouveau_display *disp = nouveau_display(dev); | 535 | struct nouveau_display *disp = nouveau_display(dev); |
545 | struct nouveau_drm *drm = nouveau_drm(dev); | ||
546 | 536 | ||
547 | nouveau_backlight_exit(dev); | 537 | nouveau_backlight_exit(dev); |
548 | nouveau_display_vblank_fini(dev); | 538 | nouveau_display_vblank_fini(dev); |
@@ -553,7 +543,7 @@ nouveau_display_destroy(struct drm_device *dev) | |||
553 | if (disp->dtor) | 543 | if (disp->dtor) |
554 | disp->dtor(dev); | 544 | disp->dtor(dev); |
555 | 545 | ||
556 | nouveau_object_del(nv_object(drm), NVDRM_DEVICE, NVDRM_DISPLAY); | 546 | nvif_object_fini(&disp->disp); |
557 | 547 | ||
558 | nouveau_drm(dev)->display = NULL; | 548 | nouveau_drm(dev)->display = NULL; |
559 | kfree(disp); | 549 | kfree(disp); |
@@ -620,6 +610,8 @@ void | |||
620 | nouveau_display_resume(struct drm_device *dev) | 610 | nouveau_display_resume(struct drm_device *dev) |
621 | { | 611 | { |
622 | struct drm_crtc *crtc; | 612 | struct drm_crtc *crtc; |
613 | int head; | ||
614 | |||
623 | nouveau_display_init(dev); | 615 | nouveau_display_init(dev); |
624 | 616 | ||
625 | /* Force CLUT to get re-loaded during modeset */ | 617 | /* Force CLUT to get re-loaded during modeset */ |
@@ -629,6 +621,10 @@ nouveau_display_resume(struct drm_device *dev) | |||
629 | nv_crtc->lut.depth = 0; | 621 | nv_crtc->lut.depth = 0; |
630 | } | 622 | } |
631 | 623 | ||
624 | /* Make sure that drm and hw vblank irqs get resumed if needed. */ | ||
625 | for (head = 0; head < dev->mode_config.num_crtc; head++) | ||
626 | drm_vblank_on(dev, head); | ||
627 | |||
632 | drm_helper_resume_force_mode(dev); | 628 | drm_helper_resume_force_mode(dev); |
633 | 629 | ||
634 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { | 630 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { |
@@ -669,7 +665,7 @@ nouveau_page_flip_emit(struct nouveau_channel *chan, | |||
669 | if (ret) | 665 | if (ret) |
670 | goto fail; | 666 | goto fail; |
671 | 667 | ||
672 | if (nv_device(drm->device)->card_type < NV_C0) | 668 | if (drm->device.info.family < NV_DEVICE_INFO_V0_FERMI) |
673 | BEGIN_NV04(chan, NvSubSw, NV_SW_PAGE_FLIP, 1); | 669 | BEGIN_NV04(chan, NvSubSw, NV_SW_PAGE_FLIP, 1); |
674 | else | 670 | else |
675 | BEGIN_NVC0(chan, FermiSw, NV_SW_PAGE_FLIP, 1); | 671 | BEGIN_NVC0(chan, FermiSw, NV_SW_PAGE_FLIP, 1); |
@@ -698,12 +694,15 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb, | |||
698 | struct nouveau_bo *old_bo = nouveau_framebuffer(crtc->primary->fb)->nvbo; | 694 | struct nouveau_bo *old_bo = nouveau_framebuffer(crtc->primary->fb)->nvbo; |
699 | struct nouveau_bo *new_bo = nouveau_framebuffer(fb)->nvbo; | 695 | struct nouveau_bo *new_bo = nouveau_framebuffer(fb)->nvbo; |
700 | struct nouveau_page_flip_state *s; | 696 | struct nouveau_page_flip_state *s; |
701 | struct nouveau_channel *chan = drm->channel; | 697 | struct nouveau_channel *chan; |
698 | struct nouveau_cli *cli; | ||
702 | struct nouveau_fence *fence; | 699 | struct nouveau_fence *fence; |
703 | int ret; | 700 | int ret; |
704 | 701 | ||
705 | if (!drm->channel) | 702 | chan = drm->channel; |
703 | if (!chan) | ||
706 | return -ENODEV; | 704 | return -ENODEV; |
705 | cli = (void *)nvif_client(&chan->device->base); | ||
707 | 706 | ||
708 | s = kzalloc(sizeof(*s), GFP_KERNEL); | 707 | s = kzalloc(sizeof(*s), GFP_KERNEL); |
709 | if (!s) | 708 | if (!s) |
@@ -715,7 +714,7 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb, | |||
715 | goto fail_free; | 714 | goto fail_free; |
716 | } | 715 | } |
717 | 716 | ||
718 | mutex_lock(&chan->cli->mutex); | 717 | mutex_lock(&cli->mutex); |
719 | 718 | ||
720 | /* synchronise rendering channel with the kernel's channel */ | 719 | /* synchronise rendering channel with the kernel's channel */ |
721 | spin_lock(&new_bo->bo.bdev->fence_lock); | 720 | spin_lock(&new_bo->bo.bdev->fence_lock); |
@@ -740,7 +739,7 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb, | |||
740 | drm_vblank_get(dev, nouveau_crtc(crtc)->index); | 739 | drm_vblank_get(dev, nouveau_crtc(crtc)->index); |
741 | 740 | ||
742 | /* Emit a page flip */ | 741 | /* Emit a page flip */ |
743 | if (nv_device(drm->device)->card_type >= NV_50) { | 742 | if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA) { |
744 | ret = nv50_display_flip_next(crtc, fb, chan, swap_interval); | 743 | ret = nv50_display_flip_next(crtc, fb, chan, swap_interval); |
745 | if (ret) | 744 | if (ret) |
746 | goto fail_unreserve; | 745 | goto fail_unreserve; |
@@ -769,7 +768,7 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb, | |||
769 | ret = nouveau_page_flip_emit(chan, old_bo, new_bo, s, &fence); | 768 | ret = nouveau_page_flip_emit(chan, old_bo, new_bo, s, &fence); |
770 | if (ret) | 769 | if (ret) |
771 | goto fail_unreserve; | 770 | goto fail_unreserve; |
772 | mutex_unlock(&chan->cli->mutex); | 771 | mutex_unlock(&cli->mutex); |
773 | 772 | ||
774 | /* Update the crtc struct and cleanup */ | 773 | /* Update the crtc struct and cleanup */ |
775 | crtc->primary->fb = fb; | 774 | crtc->primary->fb = fb; |
@@ -785,7 +784,7 @@ fail_unreserve: | |||
785 | drm_vblank_put(dev, nouveau_crtc(crtc)->index); | 784 | drm_vblank_put(dev, nouveau_crtc(crtc)->index); |
786 | ttm_bo_unreserve(&old_bo->bo); | 785 | ttm_bo_unreserve(&old_bo->bo); |
787 | fail_unpin: | 786 | fail_unpin: |
788 | mutex_unlock(&chan->cli->mutex); | 787 | mutex_unlock(&cli->mutex); |
789 | if (old_bo != new_bo) | 788 | if (old_bo != new_bo) |
790 | nouveau_bo_unpin(new_bo); | 789 | nouveau_bo_unpin(new_bo); |
791 | fail_free: | 790 | fail_free: |
@@ -815,7 +814,7 @@ nouveau_finish_page_flip(struct nouveau_channel *chan, | |||
815 | s = list_first_entry(&fctx->flip, struct nouveau_page_flip_state, head); | 814 | s = list_first_entry(&fctx->flip, struct nouveau_page_flip_state, head); |
816 | if (s->event) { | 815 | if (s->event) { |
817 | /* Vblank timestamps/counts are only correct on >= NV-50 */ | 816 | /* Vblank timestamps/counts are only correct on >= NV-50 */ |
818 | if (nv_device(drm->device)->card_type >= NV_50) | 817 | if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA) |
819 | crtcid = s->crtc; | 818 | crtcid = s->crtc; |
820 | 819 | ||
821 | drm_send_vblank_event(dev, crtcid, s->event); | 820 | drm_send_vblank_event(dev, crtcid, s->event); |
@@ -841,7 +840,7 @@ nouveau_flip_complete(void *data) | |||
841 | struct nouveau_page_flip_state state; | 840 | struct nouveau_page_flip_state state; |
842 | 841 | ||
843 | if (!nouveau_finish_page_flip(chan, &state)) { | 842 | if (!nouveau_finish_page_flip(chan, &state)) { |
844 | if (nv_device(drm->device)->card_type < NV_50) { | 843 | if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA) { |
845 | nv_set_crtc_base(drm->dev, state.crtc, state.offset + | 844 | nv_set_crtc_base(drm->dev, state.crtc, state.offset + |
846 | state.y * state.pitch + | 845 | state.y * state.pitch + |
847 | state.x * state.bpp / 8); | 846 | state.x * state.bpp / 8); |
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.h b/drivers/gpu/drm/nouveau/nouveau_display.h index a71cf77e55b2..88ca177cb1c7 100644 --- a/drivers/gpu/drm/nouveau/nouveau_display.h +++ b/drivers/gpu/drm/nouveau/nouveau_display.h | |||
@@ -9,9 +9,11 @@ struct nouveau_framebuffer { | |||
9 | struct drm_framebuffer base; | 9 | struct drm_framebuffer base; |
10 | struct nouveau_bo *nvbo; | 10 | struct nouveau_bo *nvbo; |
11 | struct nouveau_vma vma; | 11 | struct nouveau_vma vma; |
12 | u32 r_dma; | 12 | u32 r_handle; |
13 | u32 r_format; | 13 | u32 r_format; |
14 | u32 r_pitch; | 14 | u32 r_pitch; |
15 | struct nvif_object h_base[4]; | ||
16 | struct nvif_object h_core; | ||
15 | }; | 17 | }; |
16 | 18 | ||
17 | static inline struct nouveau_framebuffer * | 19 | static inline struct nouveau_framebuffer * |
@@ -36,8 +38,10 @@ struct nouveau_display { | |||
36 | int (*init)(struct drm_device *); | 38 | int (*init)(struct drm_device *); |
37 | void (*fini)(struct drm_device *); | 39 | void (*fini)(struct drm_device *); |
38 | 40 | ||
39 | struct nouveau_object *core; | 41 | int (*fb_ctor)(struct drm_framebuffer *); |
40 | struct nouveau_eventh **vblank; | 42 | void (*fb_dtor)(struct drm_framebuffer *); |
43 | |||
44 | struct nvif_object disp; | ||
41 | 45 | ||
42 | struct drm_property *dithering_mode; | 46 | struct drm_property *dithering_mode; |
43 | struct drm_property *dithering_depth; | 47 | struct drm_property *dithering_depth; |
diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.c b/drivers/gpu/drm/nouveau/nouveau_dma.c index c177272152e2..8508603cc8c3 100644 --- a/drivers/gpu/drm/nouveau/nouveau_dma.c +++ b/drivers/gpu/drm/nouveau/nouveau_dma.c | |||
@@ -24,8 +24,6 @@ | |||
24 | * | 24 | * |
25 | */ | 25 | */ |
26 | 26 | ||
27 | #include <core/client.h> | ||
28 | |||
29 | #include "nouveau_drm.h" | 27 | #include "nouveau_drm.h" |
30 | #include "nouveau_dma.h" | 28 | #include "nouveau_dma.h" |
31 | 29 | ||
@@ -54,9 +52,9 @@ READ_GET(struct nouveau_channel *chan, uint64_t *prev_get, int *timeout) | |||
54 | { | 52 | { |
55 | uint64_t val; | 53 | uint64_t val; |
56 | 54 | ||
57 | val = nv_ro32(chan->object, chan->user_get); | 55 | val = nvif_rd32(chan, chan->user_get); |
58 | if (chan->user_get_hi) | 56 | if (chan->user_get_hi) |
59 | val |= (uint64_t)nv_ro32(chan->object, chan->user_get_hi) << 32; | 57 | val |= (uint64_t)nvif_rd32(chan, chan->user_get_hi) << 32; |
60 | 58 | ||
61 | /* reset counter as long as GET is still advancing, this is | 59 | /* reset counter as long as GET is still advancing, this is |
62 | * to avoid misdetecting a GPU lockup if the GPU happens to | 60 | * to avoid misdetecting a GPU lockup if the GPU happens to |
@@ -84,12 +82,13 @@ void | |||
84 | nv50_dma_push(struct nouveau_channel *chan, struct nouveau_bo *bo, | 82 | nv50_dma_push(struct nouveau_channel *chan, struct nouveau_bo *bo, |
85 | int delta, int length) | 83 | int delta, int length) |
86 | { | 84 | { |
85 | struct nouveau_cli *cli = (void *)nvif_client(&chan->device->base); | ||
87 | struct nouveau_bo *pb = chan->push.buffer; | 86 | struct nouveau_bo *pb = chan->push.buffer; |
88 | struct nouveau_vma *vma; | 87 | struct nouveau_vma *vma; |
89 | int ip = (chan->dma.ib_put * 2) + chan->dma.ib_base; | 88 | int ip = (chan->dma.ib_put * 2) + chan->dma.ib_base; |
90 | u64 offset; | 89 | u64 offset; |
91 | 90 | ||
92 | vma = nouveau_bo_vma_find(bo, nv_client(chan->cli)->vm); | 91 | vma = nouveau_bo_vma_find(bo, cli->vm); |
93 | BUG_ON(!vma); | 92 | BUG_ON(!vma); |
94 | offset = vma->offset + delta; | 93 | offset = vma->offset + delta; |
95 | 94 | ||
@@ -104,7 +103,7 @@ nv50_dma_push(struct nouveau_channel *chan, struct nouveau_bo *bo, | |||
104 | /* Flush writes. */ | 103 | /* Flush writes. */ |
105 | nouveau_bo_rd32(pb, 0); | 104 | nouveau_bo_rd32(pb, 0); |
106 | 105 | ||
107 | nv_wo32(chan->object, 0x8c, chan->dma.ib_put); | 106 | nvif_wr32(chan, 0x8c, chan->dma.ib_put); |
108 | chan->dma.ib_free--; | 107 | chan->dma.ib_free--; |
109 | } | 108 | } |
110 | 109 | ||
@@ -114,7 +113,7 @@ nv50_dma_push_wait(struct nouveau_channel *chan, int count) | |||
114 | uint32_t cnt = 0, prev_get = 0; | 113 | uint32_t cnt = 0, prev_get = 0; |
115 | 114 | ||
116 | while (chan->dma.ib_free < count) { | 115 | while (chan->dma.ib_free < count) { |
117 | uint32_t get = nv_ro32(chan->object, 0x88); | 116 | uint32_t get = nvif_rd32(chan, 0x88); |
118 | if (get != prev_get) { | 117 | if (get != prev_get) { |
119 | prev_get = get; | 118 | prev_get = get; |
120 | cnt = 0; | 119 | cnt = 0; |
diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.h b/drivers/gpu/drm/nouveau/nouveau_dma.h index dc0e0c5cadb4..8da0a272c45a 100644 --- a/drivers/gpu/drm/nouveau/nouveau_dma.h +++ b/drivers/gpu/drm/nouveau/nouveau_dma.h | |||
@@ -58,31 +58,14 @@ enum { | |||
58 | FermiSw = 5, /* DO NOT CHANGE (well.. 6/7 will work...) */ | 58 | FermiSw = 5, /* DO NOT CHANGE (well.. 6/7 will work...) */ |
59 | }; | 59 | }; |
60 | 60 | ||
61 | /* Object handles. */ | 61 | /* Object handles - for stuff that's doesn't use handle == oclass. */ |
62 | enum { | 62 | enum { |
63 | NvM2MF = 0x80000001, | ||
64 | NvDmaFB = 0x80000002, | 63 | NvDmaFB = 0x80000002, |
65 | NvDmaTT = 0x80000003, | 64 | NvDmaTT = 0x80000003, |
66 | NvNotify0 = 0x80000006, | 65 | NvNotify0 = 0x80000006, |
67 | Nv2D = 0x80000007, | ||
68 | NvCtxSurf2D = 0x80000008, | ||
69 | NvRop = 0x80000009, | ||
70 | NvImagePatt = 0x8000000a, | ||
71 | NvClipRect = 0x8000000b, | ||
72 | NvGdiRect = 0x8000000c, | ||
73 | NvImageBlit = 0x8000000d, | ||
74 | NvSw = 0x8000000e, | ||
75 | NvSema = 0x8000000f, | 66 | NvSema = 0x8000000f, |
76 | NvEvoSema0 = 0x80000010, | 67 | NvEvoSema0 = 0x80000010, |
77 | NvEvoSema1 = 0x80000011, | 68 | NvEvoSema1 = 0x80000011, |
78 | NvNotify1 = 0x80000012, | ||
79 | |||
80 | /* G80+ display objects */ | ||
81 | NvEvoVRAM = 0x01000000, | ||
82 | NvEvoFB16 = 0x01000001, | ||
83 | NvEvoFB32 = 0x01000002, | ||
84 | NvEvoVRAM_LP = 0x01000003, | ||
85 | NvEvoSync = 0xcafe0000 | ||
86 | }; | 69 | }; |
87 | 70 | ||
88 | #define NV_MEMORY_TO_MEMORY_FORMAT 0x00000039 | 71 | #define NV_MEMORY_TO_MEMORY_FORMAT 0x00000039 |
@@ -157,7 +140,7 @@ BEGIN_IMC0(struct nouveau_channel *chan, int subc, int mthd, u16 data) | |||
157 | #define WRITE_PUT(val) do { \ | 140 | #define WRITE_PUT(val) do { \ |
158 | mb(); \ | 141 | mb(); \ |
159 | nouveau_bo_rd32(chan->push.buffer, 0); \ | 142 | nouveau_bo_rd32(chan->push.buffer, 0); \ |
160 | nv_wo32(chan->object, chan->user_put, ((val) << 2) + chan->push.vma.offset); \ | 143 | nvif_wr32(chan, chan->user_put, ((val) << 2) + chan->push.vma.offset); \ |
161 | } while (0) | 144 | } while (0) |
162 | 145 | ||
163 | static inline void | 146 | static inline void |
diff --git a/drivers/gpu/drm/nouveau/nouveau_dp.c b/drivers/gpu/drm/nouveau/nouveau_dp.c index 5675ffc175ae..c5137cccce7d 100644 --- a/drivers/gpu/drm/nouveau/nouveau_dp.c +++ b/drivers/gpu/drm/nouveau/nouveau_dp.c | |||
@@ -30,11 +30,6 @@ | |||
30 | #include "nouveau_encoder.h" | 30 | #include "nouveau_encoder.h" |
31 | #include "nouveau_crtc.h" | 31 | #include "nouveau_crtc.h" |
32 | 32 | ||
33 | #include <core/class.h> | ||
34 | |||
35 | #include <subdev/gpio.h> | ||
36 | #include <subdev/i2c.h> | ||
37 | |||
38 | static void | 33 | static void |
39 | nouveau_dp_probe_oui(struct drm_device *dev, struct nouveau_i2c_port *auxch, | 34 | nouveau_dp_probe_oui(struct drm_device *dev, struct nouveau_i2c_port *auxch, |
40 | u8 *dpcd) | 35 | u8 *dpcd) |
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c index c9428c943afb..250a5e88c751 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drm.c +++ b/drivers/gpu/drm/nouveau/nouveau_drm.c | |||
@@ -27,21 +27,14 @@ | |||
27 | #include <linux/pci.h> | 27 | #include <linux/pci.h> |
28 | #include <linux/pm_runtime.h> | 28 | #include <linux/pm_runtime.h> |
29 | #include <linux/vga_switcheroo.h> | 29 | #include <linux/vga_switcheroo.h> |
30 | |||
30 | #include "drmP.h" | 31 | #include "drmP.h" |
31 | #include "drm_crtc_helper.h" | 32 | #include "drm_crtc_helper.h" |
33 | |||
32 | #include <core/device.h> | 34 | #include <core/device.h> |
33 | #include <core/client.h> | ||
34 | #include <core/gpuobj.h> | 35 | #include <core/gpuobj.h> |
35 | #include <core/class.h> | ||
36 | #include <core/option.h> | 36 | #include <core/option.h> |
37 | 37 | ||
38 | #include <engine/device.h> | ||
39 | #include <engine/disp.h> | ||
40 | #include <engine/fifo.h> | ||
41 | #include <engine/software.h> | ||
42 | |||
43 | #include <subdev/vm.h> | ||
44 | |||
45 | #include "nouveau_drm.h" | 38 | #include "nouveau_drm.h" |
46 | #include "nouveau_dma.h" | 39 | #include "nouveau_dma.h" |
47 | #include "nouveau_ttm.h" | 40 | #include "nouveau_ttm.h" |
@@ -57,6 +50,7 @@ | |||
57 | #include "nouveau_fbcon.h" | 50 | #include "nouveau_fbcon.h" |
58 | #include "nouveau_fence.h" | 51 | #include "nouveau_fence.h" |
59 | #include "nouveau_debugfs.h" | 52 | #include "nouveau_debugfs.h" |
53 | #include "nouveau_usif.h" | ||
60 | 54 | ||
61 | MODULE_PARM_DESC(config, "option string to pass to driver core"); | 55 | MODULE_PARM_DESC(config, "option string to pass to driver core"); |
62 | static char *nouveau_config; | 56 | static char *nouveau_config; |
@@ -109,40 +103,37 @@ static int | |||
109 | nouveau_cli_create(u64 name, const char *sname, | 103 | nouveau_cli_create(u64 name, const char *sname, |
110 | int size, void **pcli) | 104 | int size, void **pcli) |
111 | { | 105 | { |
112 | struct nouveau_cli *cli; | 106 | struct nouveau_cli *cli = *pcli = kzalloc(size, GFP_KERNEL); |
113 | int ret; | 107 | if (cli) { |
114 | 108 | int ret = nvif_client_init(NULL, NULL, sname, name, | |
115 | *pcli = NULL; | 109 | nouveau_config, nouveau_debug, |
116 | ret = nouveau_client_create_(sname, name, nouveau_config, | 110 | &cli->base); |
117 | nouveau_debug, size, pcli); | 111 | if (ret == 0) { |
118 | cli = *pcli; | 112 | mutex_init(&cli->mutex); |
119 | if (ret) { | 113 | usif_client_init(cli); |
120 | if (cli) | 114 | } |
121 | nouveau_client_destroy(&cli->base); | ||
122 | *pcli = NULL; | ||
123 | return ret; | 115 | return ret; |
124 | } | 116 | } |
125 | 117 | return -ENOMEM; | |
126 | mutex_init(&cli->mutex); | ||
127 | return 0; | ||
128 | } | 118 | } |
129 | 119 | ||
130 | static void | 120 | static void |
131 | nouveau_cli_destroy(struct nouveau_cli *cli) | 121 | nouveau_cli_destroy(struct nouveau_cli *cli) |
132 | { | 122 | { |
133 | struct nouveau_object *client = nv_object(cli); | 123 | nouveau_vm_ref(NULL, &nvkm_client(&cli->base)->vm, NULL); |
134 | nouveau_vm_ref(NULL, &cli->base.vm, NULL); | 124 | nvif_client_fini(&cli->base); |
135 | nouveau_client_fini(&cli->base, false); | 125 | usif_client_fini(cli); |
136 | atomic_set(&client->refcount, 1); | ||
137 | nouveau_object_ref(NULL, &client); | ||
138 | } | 126 | } |
139 | 127 | ||
140 | static void | 128 | static void |
141 | nouveau_accel_fini(struct nouveau_drm *drm) | 129 | nouveau_accel_fini(struct nouveau_drm *drm) |
142 | { | 130 | { |
143 | nouveau_gpuobj_ref(NULL, &drm->notify); | ||
144 | nouveau_channel_del(&drm->channel); | 131 | nouveau_channel_del(&drm->channel); |
132 | nvif_object_fini(&drm->ntfy); | ||
133 | nouveau_gpuobj_ref(NULL, &drm->notify); | ||
134 | nvif_object_fini(&drm->nvsw); | ||
145 | nouveau_channel_del(&drm->cechan); | 135 | nouveau_channel_del(&drm->cechan); |
136 | nvif_object_fini(&drm->ttm.copy); | ||
146 | if (drm->fence) | 137 | if (drm->fence) |
147 | nouveau_fence(drm)->dtor(drm); | 138 | nouveau_fence(drm)->dtor(drm); |
148 | } | 139 | } |
@@ -150,46 +141,71 @@ nouveau_accel_fini(struct nouveau_drm *drm) | |||
150 | static void | 141 | static void |
151 | nouveau_accel_init(struct nouveau_drm *drm) | 142 | nouveau_accel_init(struct nouveau_drm *drm) |
152 | { | 143 | { |
153 | struct nouveau_device *device = nv_device(drm->device); | 144 | struct nvif_device *device = &drm->device; |
154 | struct nouveau_object *object; | ||
155 | u32 arg0, arg1; | 145 | u32 arg0, arg1; |
156 | int ret; | 146 | u32 sclass[16]; |
147 | int ret, i; | ||
157 | 148 | ||
158 | if (nouveau_noaccel || !nouveau_fifo(device) /*XXX*/) | 149 | if (nouveau_noaccel) |
159 | return; | 150 | return; |
160 | 151 | ||
161 | /* initialise synchronisation routines */ | 152 | /* initialise synchronisation routines */ |
162 | if (device->card_type < NV_10) ret = nv04_fence_create(drm); | 153 | /*XXX: this is crap, but the fence/channel stuff is a little |
163 | else if (device->card_type < NV_11 || | 154 | * backwards in some places. this will be fixed. |
164 | device->chipset < 0x17) ret = nv10_fence_create(drm); | 155 | */ |
165 | else if (device->card_type < NV_50) ret = nv17_fence_create(drm); | 156 | ret = nvif_object_sclass(&device->base, sclass, ARRAY_SIZE(sclass)); |
166 | else if (device->chipset < 0x84) ret = nv50_fence_create(drm); | 157 | if (ret < 0) |
167 | else if (device->card_type < NV_C0) ret = nv84_fence_create(drm); | 158 | return; |
168 | else ret = nvc0_fence_create(drm); | 159 | |
160 | for (ret = -ENOSYS, i = 0; ret && i < ARRAY_SIZE(sclass); i++) { | ||
161 | switch (sclass[i]) { | ||
162 | case NV03_CHANNEL_DMA: | ||
163 | ret = nv04_fence_create(drm); | ||
164 | break; | ||
165 | case NV10_CHANNEL_DMA: | ||
166 | ret = nv10_fence_create(drm); | ||
167 | break; | ||
168 | case NV17_CHANNEL_DMA: | ||
169 | case NV40_CHANNEL_DMA: | ||
170 | ret = nv17_fence_create(drm); | ||
171 | break; | ||
172 | case NV50_CHANNEL_GPFIFO: | ||
173 | ret = nv50_fence_create(drm); | ||
174 | break; | ||
175 | case G82_CHANNEL_GPFIFO: | ||
176 | ret = nv84_fence_create(drm); | ||
177 | break; | ||
178 | case FERMI_CHANNEL_GPFIFO: | ||
179 | case KEPLER_CHANNEL_GPFIFO_A: | ||
180 | ret = nvc0_fence_create(drm); | ||
181 | break; | ||
182 | default: | ||
183 | break; | ||
184 | } | ||
185 | } | ||
186 | |||
169 | if (ret) { | 187 | if (ret) { |
170 | NV_ERROR(drm, "failed to initialise sync subsystem, %d\n", ret); | 188 | NV_ERROR(drm, "failed to initialise sync subsystem, %d\n", ret); |
171 | nouveau_accel_fini(drm); | 189 | nouveau_accel_fini(drm); |
172 | return; | 190 | return; |
173 | } | 191 | } |
174 | 192 | ||
175 | if (device->card_type >= NV_E0) { | 193 | if (device->info.family >= NV_DEVICE_INFO_V0_KEPLER) { |
176 | ret = nouveau_channel_new(drm, &drm->client, NVDRM_DEVICE, | 194 | ret = nouveau_channel_new(drm, &drm->device, NVDRM_CHAN + 1, |
177 | NVDRM_CHAN + 1, | 195 | KEPLER_CHANNEL_GPFIFO_A_V0_ENGINE_CE0| |
178 | NVE0_CHANNEL_IND_ENGINE_CE0 | | 196 | KEPLER_CHANNEL_GPFIFO_A_V0_ENGINE_CE1, |
179 | NVE0_CHANNEL_IND_ENGINE_CE1, 0, | 197 | 0, &drm->cechan); |
180 | &drm->cechan); | ||
181 | if (ret) | 198 | if (ret) |
182 | NV_ERROR(drm, "failed to create ce channel, %d\n", ret); | 199 | NV_ERROR(drm, "failed to create ce channel, %d\n", ret); |
183 | 200 | ||
184 | arg0 = NVE0_CHANNEL_IND_ENGINE_GR; | 201 | arg0 = KEPLER_CHANNEL_GPFIFO_A_V0_ENGINE_GR; |
185 | arg1 = 1; | 202 | arg1 = 1; |
186 | } else | 203 | } else |
187 | if (device->chipset >= 0xa3 && | 204 | if (device->info.chipset >= 0xa3 && |
188 | device->chipset != 0xaa && | 205 | device->info.chipset != 0xaa && |
189 | device->chipset != 0xac) { | 206 | device->info.chipset != 0xac) { |
190 | ret = nouveau_channel_new(drm, &drm->client, NVDRM_DEVICE, | 207 | ret = nouveau_channel_new(drm, &drm->device, NVDRM_CHAN + 1, |
191 | NVDRM_CHAN + 1, NvDmaFB, NvDmaTT, | 208 | NvDmaFB, NvDmaTT, &drm->cechan); |
192 | &drm->cechan); | ||
193 | if (ret) | 209 | if (ret) |
194 | NV_ERROR(drm, "failed to create ce channel, %d\n", ret); | 210 | NV_ERROR(drm, "failed to create ce channel, %d\n", ret); |
195 | 211 | ||
@@ -200,30 +216,30 @@ nouveau_accel_init(struct nouveau_drm *drm) | |||
200 | arg1 = NvDmaTT; | 216 | arg1 = NvDmaTT; |
201 | } | 217 | } |
202 | 218 | ||
203 | ret = nouveau_channel_new(drm, &drm->client, NVDRM_DEVICE, NVDRM_CHAN, | 219 | ret = nouveau_channel_new(drm, &drm->device, NVDRM_CHAN, arg0, arg1, |
204 | arg0, arg1, &drm->channel); | 220 | &drm->channel); |
205 | if (ret) { | 221 | if (ret) { |
206 | NV_ERROR(drm, "failed to create kernel channel, %d\n", ret); | 222 | NV_ERROR(drm, "failed to create kernel channel, %d\n", ret); |
207 | nouveau_accel_fini(drm); | 223 | nouveau_accel_fini(drm); |
208 | return; | 224 | return; |
209 | } | 225 | } |
210 | 226 | ||
211 | ret = nouveau_object_new(nv_object(drm), NVDRM_CHAN, NVDRM_NVSW, | 227 | ret = nvif_object_init(drm->channel->object, NULL, NVDRM_NVSW, |
212 | nouveau_abi16_swclass(drm), NULL, 0, &object); | 228 | nouveau_abi16_swclass(drm), NULL, 0, &drm->nvsw); |
213 | if (ret == 0) { | 229 | if (ret == 0) { |
214 | struct nouveau_software_chan *swch = (void *)object->parent; | 230 | struct nouveau_software_chan *swch; |
215 | ret = RING_SPACE(drm->channel, 2); | 231 | ret = RING_SPACE(drm->channel, 2); |
216 | if (ret == 0) { | 232 | if (ret == 0) { |
217 | if (device->card_type < NV_C0) { | 233 | if (device->info.family < NV_DEVICE_INFO_V0_FERMI) { |
218 | BEGIN_NV04(drm->channel, NvSubSw, 0, 1); | 234 | BEGIN_NV04(drm->channel, NvSubSw, 0, 1); |
219 | OUT_RING (drm->channel, NVDRM_NVSW); | 235 | OUT_RING (drm->channel, NVDRM_NVSW); |
220 | } else | 236 | } else |
221 | if (device->card_type < NV_E0) { | 237 | if (device->info.family < NV_DEVICE_INFO_V0_KEPLER) { |
222 | BEGIN_NVC0(drm->channel, FermiSw, 0, 1); | 238 | BEGIN_NVC0(drm->channel, FermiSw, 0, 1); |
223 | OUT_RING (drm->channel, 0x001f0000); | 239 | OUT_RING (drm->channel, 0x001f0000); |
224 | } | 240 | } |
225 | } | 241 | } |
226 | swch = (void *)object->parent; | 242 | swch = (void *)nvkm_object(&drm->nvsw)->parent; |
227 | swch->flip = nouveau_flip_complete; | 243 | swch->flip = nouveau_flip_complete; |
228 | swch->flip_data = drm->channel; | 244 | swch->flip_data = drm->channel; |
229 | } | 245 | } |
@@ -234,24 +250,24 @@ nouveau_accel_init(struct nouveau_drm *drm) | |||
234 | return; | 250 | return; |
235 | } | 251 | } |
236 | 252 | ||
237 | if (device->card_type < NV_C0) { | 253 | if (device->info.family < NV_DEVICE_INFO_V0_FERMI) { |
238 | ret = nouveau_gpuobj_new(drm->device, NULL, 32, 0, 0, | 254 | ret = nouveau_gpuobj_new(nvkm_object(&drm->device), NULL, 32, |
239 | &drm->notify); | 255 | 0, 0, &drm->notify); |
240 | if (ret) { | 256 | if (ret) { |
241 | NV_ERROR(drm, "failed to allocate notifier, %d\n", ret); | 257 | NV_ERROR(drm, "failed to allocate notifier, %d\n", ret); |
242 | nouveau_accel_fini(drm); | 258 | nouveau_accel_fini(drm); |
243 | return; | 259 | return; |
244 | } | 260 | } |
245 | 261 | ||
246 | ret = nouveau_object_new(nv_object(drm), | 262 | ret = nvif_object_init(drm->channel->object, NULL, NvNotify0, |
247 | drm->channel->handle, NvNotify0, | 263 | NV_DMA_IN_MEMORY, |
248 | 0x003d, &(struct nv_dma_class) { | 264 | &(struct nv_dma_v0) { |
249 | .flags = NV_DMA_TARGET_VRAM | | 265 | .target = NV_DMA_V0_TARGET_VRAM, |
250 | NV_DMA_ACCESS_RDWR, | 266 | .access = NV_DMA_V0_ACCESS_RDWR, |
251 | .start = drm->notify->addr, | 267 | .start = drm->notify->addr, |
252 | .limit = drm->notify->addr + 31 | 268 | .limit = drm->notify->addr + 31 |
253 | }, sizeof(struct nv_dma_class), | 269 | }, sizeof(struct nv_dma_v0), |
254 | &object); | 270 | &drm->ntfy); |
255 | if (ret) { | 271 | if (ret) { |
256 | nouveau_accel_fini(drm); | 272 | nouveau_accel_fini(drm); |
257 | return; | 273 | return; |
@@ -294,7 +310,8 @@ static int nouveau_drm_probe(struct pci_dev *pdev, | |||
294 | #ifdef CONFIG_X86 | 310 | #ifdef CONFIG_X86 |
295 | boot = pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW; | 311 | boot = pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW; |
296 | #endif | 312 | #endif |
297 | remove_conflicting_framebuffers(aper, "nouveaufb", boot); | 313 | if (nouveau_modeset != 2) |
314 | remove_conflicting_framebuffers(aper, "nouveaufb", boot); | ||
298 | kfree(aper); | 315 | kfree(aper); |
299 | 316 | ||
300 | ret = nouveau_device_create(pdev, NOUVEAU_BUS_PCI, | 317 | ret = nouveau_device_create(pdev, NOUVEAU_BUS_PCI, |
@@ -348,7 +365,6 @@ static int | |||
348 | nouveau_drm_load(struct drm_device *dev, unsigned long flags) | 365 | nouveau_drm_load(struct drm_device *dev, unsigned long flags) |
349 | { | 366 | { |
350 | struct pci_dev *pdev = dev->pdev; | 367 | struct pci_dev *pdev = dev->pdev; |
351 | struct nouveau_device *device; | ||
352 | struct nouveau_drm *drm; | 368 | struct nouveau_drm *drm; |
353 | int ret; | 369 | int ret; |
354 | 370 | ||
@@ -359,7 +375,8 @@ nouveau_drm_load(struct drm_device *dev, unsigned long flags) | |||
359 | 375 | ||
360 | dev->dev_private = drm; | 376 | dev->dev_private = drm; |
361 | drm->dev = dev; | 377 | drm->dev = dev; |
362 | nouveau_client(drm)->debug = nouveau_dbgopt(nouveau_debug, "DRM"); | 378 | nvkm_client(&drm->client.base)->debug = |
379 | nouveau_dbgopt(nouveau_debug, "DRM"); | ||
363 | 380 | ||
364 | INIT_LIST_HEAD(&drm->clients); | 381 | INIT_LIST_HEAD(&drm->clients); |
365 | spin_lock_init(&drm->tile.lock); | 382 | spin_lock_init(&drm->tile.lock); |
@@ -370,33 +387,34 @@ nouveau_drm_load(struct drm_device *dev, unsigned long flags) | |||
370 | * (possibly) execute vbios init tables (see nouveau_agp.h) | 387 | * (possibly) execute vbios init tables (see nouveau_agp.h) |
371 | */ | 388 | */ |
372 | if (pdev && drm_pci_device_is_agp(dev) && dev->agp) { | 389 | if (pdev && drm_pci_device_is_agp(dev) && dev->agp) { |
390 | const u64 enables = NV_DEVICE_V0_DISABLE_IDENTIFY | | ||
391 | NV_DEVICE_V0_DISABLE_MMIO; | ||
373 | /* dummy device object, doesn't init anything, but allows | 392 | /* dummy device object, doesn't init anything, but allows |
374 | * agp code access to registers | 393 | * agp code access to registers |
375 | */ | 394 | */ |
376 | ret = nouveau_object_new(nv_object(drm), NVDRM_CLIENT, | 395 | ret = nvif_device_init(&drm->client.base.base, NULL, |
377 | NVDRM_DEVICE, 0x0080, | 396 | NVDRM_DEVICE, NV_DEVICE, |
378 | &(struct nv_device_class) { | 397 | &(struct nv_device_v0) { |
379 | .device = ~0, | 398 | .device = ~0, |
380 | .disable = | 399 | .disable = ~enables, |
381 | ~(NV_DEVICE_DISABLE_MMIO | | ||
382 | NV_DEVICE_DISABLE_IDENTIFY), | ||
383 | .debug0 = ~0, | 400 | .debug0 = ~0, |
384 | }, sizeof(struct nv_device_class), | 401 | }, sizeof(struct nv_device_v0), |
385 | &drm->device); | 402 | &drm->device); |
386 | if (ret) | 403 | if (ret) |
387 | goto fail_device; | 404 | goto fail_device; |
388 | 405 | ||
389 | nouveau_agp_reset(drm); | 406 | nouveau_agp_reset(drm); |
390 | nouveau_object_del(nv_object(drm), NVDRM_CLIENT, NVDRM_DEVICE); | 407 | nvif_device_fini(&drm->device); |
391 | } | 408 | } |
392 | 409 | ||
393 | ret = nouveau_object_new(nv_object(drm), NVDRM_CLIENT, NVDRM_DEVICE, | 410 | ret = nvif_device_init(&drm->client.base.base, NULL, NVDRM_DEVICE, |
394 | 0x0080, &(struct nv_device_class) { | 411 | NV_DEVICE, |
412 | &(struct nv_device_v0) { | ||
395 | .device = ~0, | 413 | .device = ~0, |
396 | .disable = 0, | 414 | .disable = 0, |
397 | .debug0 = 0, | 415 | .debug0 = 0, |
398 | }, sizeof(struct nv_device_class), | 416 | }, sizeof(struct nv_device_v0), |
399 | &drm->device); | 417 | &drm->device); |
400 | if (ret) | 418 | if (ret) |
401 | goto fail_device; | 419 | goto fail_device; |
402 | 420 | ||
@@ -406,18 +424,19 @@ nouveau_drm_load(struct drm_device *dev, unsigned long flags) | |||
406 | * nosnoop capability. hopefully won't cause issues until a | 424 | * nosnoop capability. hopefully won't cause issues until a |
407 | * better fix is found - assuming there is one... | 425 | * better fix is found - assuming there is one... |
408 | */ | 426 | */ |
409 | device = nv_device(drm->device); | 427 | if (drm->device.info.chipset == 0xc1) |
410 | if (nv_device(drm->device)->chipset == 0xc1) | 428 | nvif_mask(&drm->device, 0x00088080, 0x00000800, 0x00000000); |
411 | nv_mask(device, 0x00088080, 0x00000800, 0x00000000); | ||
412 | 429 | ||
413 | nouveau_vga_init(drm); | 430 | nouveau_vga_init(drm); |
414 | nouveau_agp_init(drm); | 431 | nouveau_agp_init(drm); |
415 | 432 | ||
416 | if (device->card_type >= NV_50) { | 433 | if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA) { |
417 | ret = nouveau_vm_new(nv_device(drm->device), 0, (1ULL << 40), | 434 | ret = nouveau_vm_new(nvkm_device(&drm->device), 0, (1ULL << 40), |
418 | 0x1000, &drm->client.base.vm); | 435 | 0x1000, &drm->client.vm); |
419 | if (ret) | 436 | if (ret) |
420 | goto fail_device; | 437 | goto fail_device; |
438 | |||
439 | nvkm_client(&drm->client.base)->vm = drm->client.vm; | ||
421 | } | 440 | } |
422 | 441 | ||
423 | ret = nouveau_ttm_init(drm); | 442 | ret = nouveau_ttm_init(drm); |
@@ -463,6 +482,7 @@ fail_ttm: | |||
463 | nouveau_agp_fini(drm); | 482 | nouveau_agp_fini(drm); |
464 | nouveau_vga_fini(drm); | 483 | nouveau_vga_fini(drm); |
465 | fail_device: | 484 | fail_device: |
485 | nvif_device_fini(&drm->device); | ||
466 | nouveau_cli_destroy(&drm->client); | 486 | nouveau_cli_destroy(&drm->client); |
467 | return ret; | 487 | return ret; |
468 | } | 488 | } |
@@ -488,26 +508,37 @@ nouveau_drm_unload(struct drm_device *dev) | |||
488 | nouveau_agp_fini(drm); | 508 | nouveau_agp_fini(drm); |
489 | nouveau_vga_fini(drm); | 509 | nouveau_vga_fini(drm); |
490 | 510 | ||
511 | nvif_device_fini(&drm->device); | ||
491 | if (drm->hdmi_device) | 512 | if (drm->hdmi_device) |
492 | pci_dev_put(drm->hdmi_device); | 513 | pci_dev_put(drm->hdmi_device); |
493 | nouveau_cli_destroy(&drm->client); | 514 | nouveau_cli_destroy(&drm->client); |
494 | return 0; | 515 | return 0; |
495 | } | 516 | } |
496 | 517 | ||
497 | static void | 518 | void |
498 | nouveau_drm_remove(struct pci_dev *pdev) | 519 | nouveau_drm_device_remove(struct drm_device *dev) |
499 | { | 520 | { |
500 | struct drm_device *dev = pci_get_drvdata(pdev); | ||
501 | struct nouveau_drm *drm = nouveau_drm(dev); | 521 | struct nouveau_drm *drm = nouveau_drm(dev); |
522 | struct nouveau_client *client; | ||
502 | struct nouveau_object *device; | 523 | struct nouveau_object *device; |
503 | 524 | ||
504 | dev->irq_enabled = false; | 525 | dev->irq_enabled = false; |
505 | device = drm->client.base.device; | 526 | client = nvkm_client(&drm->client.base); |
527 | device = client->device; | ||
506 | drm_put_dev(dev); | 528 | drm_put_dev(dev); |
507 | 529 | ||
508 | nouveau_object_ref(NULL, &device); | 530 | nouveau_object_ref(NULL, &device); |
509 | nouveau_object_debug(); | 531 | nouveau_object_debug(); |
510 | } | 532 | } |
533 | EXPORT_SYMBOL(nouveau_drm_device_remove); | ||
534 | |||
535 | static void | ||
536 | nouveau_drm_remove(struct pci_dev *pdev) | ||
537 | { | ||
538 | struct drm_device *dev = pci_get_drvdata(pdev); | ||
539 | |||
540 | nouveau_drm_device_remove(dev); | ||
541 | } | ||
511 | 542 | ||
512 | static int | 543 | static int |
513 | nouveau_do_suspend(struct drm_device *dev, bool runtime) | 544 | nouveau_do_suspend(struct drm_device *dev, bool runtime) |
@@ -548,13 +579,13 @@ nouveau_do_suspend(struct drm_device *dev, bool runtime) | |||
548 | } | 579 | } |
549 | 580 | ||
550 | list_for_each_entry(cli, &drm->clients, head) { | 581 | list_for_each_entry(cli, &drm->clients, head) { |
551 | ret = nouveau_client_fini(&cli->base, true); | 582 | ret = nvif_client_suspend(&cli->base); |
552 | if (ret) | 583 | if (ret) |
553 | goto fail_client; | 584 | goto fail_client; |
554 | } | 585 | } |
555 | 586 | ||
556 | NV_INFO(drm, "suspending kernel object tree...\n"); | 587 | NV_INFO(drm, "suspending kernel object tree...\n"); |
557 | ret = nouveau_client_fini(&drm->client.base, true); | 588 | ret = nvif_client_suspend(&drm->client.base); |
558 | if (ret) | 589 | if (ret) |
559 | goto fail_client; | 590 | goto fail_client; |
560 | 591 | ||
@@ -563,7 +594,7 @@ nouveau_do_suspend(struct drm_device *dev, bool runtime) | |||
563 | 594 | ||
564 | fail_client: | 595 | fail_client: |
565 | list_for_each_entry_continue_reverse(cli, &drm->clients, head) { | 596 | list_for_each_entry_continue_reverse(cli, &drm->clients, head) { |
566 | nouveau_client_init(&cli->base); | 597 | nvif_client_resume(&cli->base); |
567 | } | 598 | } |
568 | 599 | ||
569 | if (drm->fence && nouveau_fence(drm)->resume) | 600 | if (drm->fence && nouveau_fence(drm)->resume) |
@@ -611,7 +642,7 @@ nouveau_do_resume(struct drm_device *dev) | |||
611 | nouveau_agp_reset(drm); | 642 | nouveau_agp_reset(drm); |
612 | 643 | ||
613 | NV_INFO(drm, "resuming kernel object tree...\n"); | 644 | NV_INFO(drm, "resuming kernel object tree...\n"); |
614 | nouveau_client_init(&drm->client.base); | 645 | nvif_client_resume(&drm->client.base); |
615 | nouveau_agp_init(drm); | 646 | nouveau_agp_init(drm); |
616 | 647 | ||
617 | NV_INFO(drm, "resuming client object trees...\n"); | 648 | NV_INFO(drm, "resuming client object trees...\n"); |
@@ -619,7 +650,7 @@ nouveau_do_resume(struct drm_device *dev) | |||
619 | nouveau_fence(drm)->resume(drm); | 650 | nouveau_fence(drm)->resume(drm); |
620 | 651 | ||
621 | list_for_each_entry(cli, &drm->clients, head) { | 652 | list_for_each_entry(cli, &drm->clients, head) { |
622 | nouveau_client_init(&cli->base); | 653 | nvif_client_resume(&cli->base); |
623 | } | 654 | } |
624 | 655 | ||
625 | nouveau_run_vbios_init(dev); | 656 | nouveau_run_vbios_init(dev); |
@@ -715,13 +746,17 @@ nouveau_drm_open(struct drm_device *dev, struct drm_file *fpriv) | |||
715 | if (ret) | 746 | if (ret) |
716 | goto out_suspend; | 747 | goto out_suspend; |
717 | 748 | ||
718 | if (nv_device(drm->device)->card_type >= NV_50) { | 749 | cli->base.super = false; |
719 | ret = nouveau_vm_new(nv_device(drm->device), 0, (1ULL << 40), | 750 | |
720 | 0x1000, &cli->base.vm); | 751 | if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA) { |
752 | ret = nouveau_vm_new(nvkm_device(&drm->device), 0, (1ULL << 40), | ||
753 | 0x1000, &cli->vm); | ||
721 | if (ret) { | 754 | if (ret) { |
722 | nouveau_cli_destroy(cli); | 755 | nouveau_cli_destroy(cli); |
723 | goto out_suspend; | 756 | goto out_suspend; |
724 | } | 757 | } |
758 | |||
759 | nvkm_client(&cli->base)->vm = cli->vm; | ||
725 | } | 760 | } |
726 | 761 | ||
727 | fpriv->driver_priv = cli; | 762 | fpriv->driver_priv = cli; |
@@ -779,24 +814,31 @@ nouveau_ioctls[] = { | |||
779 | DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_INFO, nouveau_gem_ioctl_info, DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW), | 814 | DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_INFO, nouveau_gem_ioctl_info, DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW), |
780 | }; | 815 | }; |
781 | 816 | ||
782 | long nouveau_drm_ioctl(struct file *filp, | 817 | long |
783 | unsigned int cmd, unsigned long arg) | 818 | nouveau_drm_ioctl(struct file *file, unsigned int cmd, unsigned long arg) |
784 | { | 819 | { |
785 | struct drm_file *file_priv = filp->private_data; | 820 | struct drm_file *filp = file->private_data; |
786 | struct drm_device *dev; | 821 | struct drm_device *dev = filp->minor->dev; |
787 | long ret; | 822 | long ret; |
788 | dev = file_priv->minor->dev; | ||
789 | 823 | ||
790 | ret = pm_runtime_get_sync(dev->dev); | 824 | ret = pm_runtime_get_sync(dev->dev); |
791 | if (ret < 0 && ret != -EACCES) | 825 | if (ret < 0 && ret != -EACCES) |
792 | return ret; | 826 | return ret; |
793 | 827 | ||
794 | ret = drm_ioctl(filp, cmd, arg); | 828 | switch (_IOC_NR(cmd) - DRM_COMMAND_BASE) { |
829 | case DRM_NOUVEAU_NVIF: | ||
830 | ret = usif_ioctl(filp, (void __user *)arg, _IOC_SIZE(cmd)); | ||
831 | break; | ||
832 | default: | ||
833 | ret = drm_ioctl(file, cmd, arg); | ||
834 | break; | ||
835 | } | ||
795 | 836 | ||
796 | pm_runtime_mark_last_busy(dev->dev); | 837 | pm_runtime_mark_last_busy(dev->dev); |
797 | pm_runtime_put_autosuspend(dev->dev); | 838 | pm_runtime_put_autosuspend(dev->dev); |
798 | return ret; | 839 | return ret; |
799 | } | 840 | } |
841 | |||
800 | static const struct file_operations | 842 | static const struct file_operations |
801 | nouveau_driver_fops = { | 843 | nouveau_driver_fops = { |
802 | .owner = THIS_MODULE, | 844 | .owner = THIS_MODULE, |
@@ -921,7 +963,7 @@ static int nouveau_pmops_runtime_resume(struct device *dev) | |||
921 | { | 963 | { |
922 | struct pci_dev *pdev = to_pci_dev(dev); | 964 | struct pci_dev *pdev = to_pci_dev(dev); |
923 | struct drm_device *drm_dev = pci_get_drvdata(pdev); | 965 | struct drm_device *drm_dev = pci_get_drvdata(pdev); |
924 | struct nouveau_device *device = nouveau_dev(drm_dev); | 966 | struct nvif_device *device = &nouveau_drm(drm_dev)->device; |
925 | int ret; | 967 | int ret; |
926 | 968 | ||
927 | if (nouveau_runtime_pm == 0) | 969 | if (nouveau_runtime_pm == 0) |
@@ -937,7 +979,7 @@ static int nouveau_pmops_runtime_resume(struct device *dev) | |||
937 | ret = nouveau_do_resume(drm_dev); | 979 | ret = nouveau_do_resume(drm_dev); |
938 | drm_kms_helper_poll_enable(drm_dev); | 980 | drm_kms_helper_poll_enable(drm_dev); |
939 | /* do magic */ | 981 | /* do magic */ |
940 | nv_mask(device, 0x88488, (1 << 25), (1 << 25)); | 982 | nvif_mask(device, 0x88488, (1 << 25), (1 << 25)); |
941 | vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_ON); | 983 | vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_ON); |
942 | drm_dev->switch_power_state = DRM_SWITCH_POWER_ON; | 984 | drm_dev->switch_power_state = DRM_SWITCH_POWER_ON; |
943 | nv_debug_level(NORMAL); | 985 | nv_debug_level(NORMAL); |
@@ -1005,24 +1047,41 @@ nouveau_drm_pci_driver = { | |||
1005 | .driver.pm = &nouveau_pm_ops, | 1047 | .driver.pm = &nouveau_pm_ops, |
1006 | }; | 1048 | }; |
1007 | 1049 | ||
1008 | int nouveau_drm_platform_probe(struct platform_device *pdev) | 1050 | struct drm_device * |
1051 | nouveau_platform_device_create_(struct platform_device *pdev, int size, | ||
1052 | void **pobject) | ||
1009 | { | 1053 | { |
1010 | struct nouveau_device *device; | 1054 | struct drm_device *drm; |
1011 | int ret; | 1055 | int err; |
1012 | 1056 | ||
1013 | ret = nouveau_device_create(pdev, NOUVEAU_BUS_PLATFORM, | 1057 | err = nouveau_device_create_(pdev, NOUVEAU_BUS_PLATFORM, |
1014 | nouveau_platform_name(pdev), | 1058 | nouveau_platform_name(pdev), |
1015 | dev_name(&pdev->dev), nouveau_config, | 1059 | dev_name(&pdev->dev), nouveau_config, |
1016 | nouveau_debug, &device); | 1060 | nouveau_debug, size, pobject); |
1017 | 1061 | if (err) | |
1018 | ret = drm_platform_init(&driver, pdev); | 1062 | return ERR_PTR(err); |
1019 | if (ret) { | 1063 | |
1020 | nouveau_object_ref(NULL, (struct nouveau_object **)&device); | 1064 | drm = drm_dev_alloc(&driver, &pdev->dev); |
1021 | return ret; | 1065 | if (!drm) { |
1066 | err = -ENOMEM; | ||
1067 | goto err_free; | ||
1022 | } | 1068 | } |
1023 | 1069 | ||
1024 | return ret; | 1070 | err = drm_dev_set_unique(drm, "%s", dev_name(&pdev->dev)); |
1071 | if (err < 0) | ||
1072 | goto err_free; | ||
1073 | |||
1074 | drm->platformdev = pdev; | ||
1075 | platform_set_drvdata(pdev, drm); | ||
1076 | |||
1077 | return drm; | ||
1078 | |||
1079 | err_free: | ||
1080 | nouveau_object_ref(NULL, (struct nouveau_object **)pobject); | ||
1081 | |||
1082 | return ERR_PTR(err); | ||
1025 | } | 1083 | } |
1084 | EXPORT_SYMBOL(nouveau_platform_device_create_); | ||
1026 | 1085 | ||
1027 | static int __init | 1086 | static int __init |
1028 | nouveau_drm_init(void) | 1087 | nouveau_drm_init(void) |
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.h b/drivers/gpu/drm/nouveau/nouveau_drm.h index 7efbafaf7c1d..b02b02452c85 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drm.h +++ b/drivers/gpu/drm/nouveau/nouveau_drm.h | |||
@@ -9,8 +9,8 @@ | |||
9 | #define DRIVER_DATE "20120801" | 9 | #define DRIVER_DATE "20120801" |
10 | 10 | ||
11 | #define DRIVER_MAJOR 1 | 11 | #define DRIVER_MAJOR 1 |
12 | #define DRIVER_MINOR 1 | 12 | #define DRIVER_MINOR 2 |
13 | #define DRIVER_PATCHLEVEL 1 | 13 | #define DRIVER_PATCHLEVEL 0 |
14 | 14 | ||
15 | /* | 15 | /* |
16 | * 1.1.1: | 16 | * 1.1.1: |
@@ -21,15 +21,17 @@ | |||
21 | * to control registers on the MPs to enable performance counters, | 21 | * to control registers on the MPs to enable performance counters, |
22 | * and to control the warp error enable mask (OpenGL requires out of | 22 | * and to control the warp error enable mask (OpenGL requires out of |
23 | * bounds access to local memory to be silently ignored / return 0). | 23 | * bounds access to local memory to be silently ignored / return 0). |
24 | * 1.1.2: | ||
25 | * - fixes multiple bugs in flip completion events and timestamping | ||
26 | * 1.2.0: | ||
27 | * - object api exposed to userspace | ||
28 | * - fermi,kepler,maxwell zbc | ||
24 | */ | 29 | */ |
25 | 30 | ||
26 | #include <core/client.h> | 31 | #include <nvif/client.h> |
27 | #include <core/event.h> | 32 | #include <nvif/device.h> |
28 | |||
29 | #include <subdev/vm.h> | ||
30 | 33 | ||
31 | #include <drmP.h> | 34 | #include <drmP.h> |
32 | #include <drm/nouveau_drm.h> | ||
33 | 35 | ||
34 | #include <drm/ttm/ttm_bo_api.h> | 36 | #include <drm/ttm/ttm_bo_api.h> |
35 | #include <drm/ttm/ttm_bo_driver.h> | 37 | #include <drm/ttm/ttm_bo_driver.h> |
@@ -38,7 +40,10 @@ | |||
38 | #include <drm/ttm/ttm_module.h> | 40 | #include <drm/ttm/ttm_module.h> |
39 | #include <drm/ttm/ttm_page_alloc.h> | 41 | #include <drm/ttm/ttm_page_alloc.h> |
40 | 42 | ||
43 | #include "uapi/drm/nouveau_drm.h" | ||
44 | |||
41 | struct nouveau_channel; | 45 | struct nouveau_channel; |
46 | struct platform_device; | ||
42 | 47 | ||
43 | #define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT) | 48 | #define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT) |
44 | 49 | ||
@@ -50,6 +55,17 @@ struct nouveau_drm_tile { | |||
50 | bool used; | 55 | bool used; |
51 | }; | 56 | }; |
52 | 57 | ||
58 | enum nouveau_drm_object_route { | ||
59 | NVDRM_OBJECT_NVIF = 0, | ||
60 | NVDRM_OBJECT_USIF, | ||
61 | NVDRM_OBJECT_ABI16, | ||
62 | }; | ||
63 | |||
64 | enum nouveau_drm_notify_route { | ||
65 | NVDRM_NOTIFY_NVIF = 0, | ||
66 | NVDRM_NOTIFY_USIF | ||
67 | }; | ||
68 | |||
53 | enum nouveau_drm_handle { | 69 | enum nouveau_drm_handle { |
54 | NVDRM_CLIENT = 0xffffffff, | 70 | NVDRM_CLIENT = 0xffffffff, |
55 | NVDRM_DEVICE = 0xdddddddd, | 71 | NVDRM_DEVICE = 0xdddddddd, |
@@ -61,10 +77,13 @@ enum nouveau_drm_handle { | |||
61 | }; | 77 | }; |
62 | 78 | ||
63 | struct nouveau_cli { | 79 | struct nouveau_cli { |
64 | struct nouveau_client base; | 80 | struct nvif_client base; |
81 | struct nouveau_vm *vm; /*XXX*/ | ||
65 | struct list_head head; | 82 | struct list_head head; |
66 | struct mutex mutex; | 83 | struct mutex mutex; |
67 | void *abi16; | 84 | void *abi16; |
85 | struct list_head objects; | ||
86 | struct list_head notifys; | ||
68 | }; | 87 | }; |
69 | 88 | ||
70 | static inline struct nouveau_cli * | 89 | static inline struct nouveau_cli * |
@@ -73,13 +92,16 @@ nouveau_cli(struct drm_file *fpriv) | |||
73 | return fpriv ? fpriv->driver_priv : NULL; | 92 | return fpriv ? fpriv->driver_priv : NULL; |
74 | } | 93 | } |
75 | 94 | ||
95 | #include <nvif/object.h> | ||
96 | #include <nvif/device.h> | ||
97 | |||
76 | extern int nouveau_runtime_pm; | 98 | extern int nouveau_runtime_pm; |
77 | 99 | ||
78 | struct nouveau_drm { | 100 | struct nouveau_drm { |
79 | struct nouveau_cli client; | 101 | struct nouveau_cli client; |
80 | struct drm_device *dev; | 102 | struct drm_device *dev; |
81 | 103 | ||
82 | struct nouveau_object *device; | 104 | struct nvif_device device; |
83 | struct list_head clients; | 105 | struct list_head clients; |
84 | 106 | ||
85 | struct { | 107 | struct { |
@@ -102,6 +124,7 @@ struct nouveau_drm { | |||
102 | struct ttm_buffer_object *, | 124 | struct ttm_buffer_object *, |
103 | struct ttm_mem_reg *, struct ttm_mem_reg *); | 125 | struct ttm_mem_reg *, struct ttm_mem_reg *); |
104 | struct nouveau_channel *chan; | 126 | struct nouveau_channel *chan; |
127 | struct nvif_object copy; | ||
105 | int mtrr; | 128 | int mtrr; |
106 | } ttm; | 129 | } ttm; |
107 | 130 | ||
@@ -119,6 +142,8 @@ struct nouveau_drm { | |||
119 | struct nouveau_channel *channel; | 142 | struct nouveau_channel *channel; |
120 | struct nouveau_gpuobj *notify; | 143 | struct nouveau_gpuobj *notify; |
121 | struct nouveau_fbdev *fbcon; | 144 | struct nouveau_fbdev *fbcon; |
145 | struct nvif_object nvsw; | ||
146 | struct nvif_object ntfy; | ||
122 | 147 | ||
123 | /* nv10-nv40 tiling regions */ | 148 | /* nv10-nv40 tiling regions */ |
124 | struct { | 149 | struct { |
@@ -148,20 +173,25 @@ nouveau_drm(struct drm_device *dev) | |||
148 | return dev->dev_private; | 173 | return dev->dev_private; |
149 | } | 174 | } |
150 | 175 | ||
151 | static inline struct nouveau_device * | ||
152 | nouveau_dev(struct drm_device *dev) | ||
153 | { | ||
154 | return nv_device(nouveau_drm(dev)->device); | ||
155 | } | ||
156 | |||
157 | int nouveau_pmops_suspend(struct device *); | 176 | int nouveau_pmops_suspend(struct device *); |
158 | int nouveau_pmops_resume(struct device *); | 177 | int nouveau_pmops_resume(struct device *); |
159 | 178 | ||
160 | #define NV_FATAL(cli, fmt, args...) nv_fatal((cli), fmt, ##args) | 179 | #define nouveau_platform_device_create(p, u) \ |
161 | #define NV_ERROR(cli, fmt, args...) nv_error((cli), fmt, ##args) | 180 | nouveau_platform_device_create_(p, sizeof(**u), (void **)u) |
162 | #define NV_WARN(cli, fmt, args...) nv_warn((cli), fmt, ##args) | 181 | struct drm_device * |
163 | #define NV_INFO(cli, fmt, args...) nv_info((cli), fmt, ##args) | 182 | nouveau_platform_device_create_(struct platform_device *pdev, |
164 | #define NV_DEBUG(cli, fmt, args...) nv_debug((cli), fmt, ##args) | 183 | int size, void **pobject); |
184 | void nouveau_drm_device_remove(struct drm_device *dev); | ||
185 | |||
186 | #define NV_PRINTK(l,c,f,a...) do { \ | ||
187 | struct nouveau_cli *_cli = (c); \ | ||
188 | nv_##l(_cli->base.base.priv, f, ##a); \ | ||
189 | } while(0) | ||
190 | #define NV_FATAL(drm,f,a...) NV_PRINTK(fatal, &(drm)->client, f, ##a) | ||
191 | #define NV_ERROR(drm,f,a...) NV_PRINTK(error, &(drm)->client, f, ##a) | ||
192 | #define NV_WARN(drm,f,a...) NV_PRINTK(warn, &(drm)->client, f, ##a) | ||
193 | #define NV_INFO(drm,f,a...) NV_PRINTK(info, &(drm)->client, f, ##a) | ||
194 | #define NV_DEBUG(drm,f,a...) NV_PRINTK(debug, &(drm)->client, f, ##a) | ||
165 | 195 | ||
166 | extern int nouveau_modeset; | 196 | extern int nouveau_modeset; |
167 | 197 | ||
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c index 758c11cb9a9a..ebfe3180109e 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c +++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c | |||
@@ -51,11 +51,6 @@ | |||
51 | 51 | ||
52 | #include "nouveau_crtc.h" | 52 | #include "nouveau_crtc.h" |
53 | 53 | ||
54 | #include <core/client.h> | ||
55 | #include <core/device.h> | ||
56 | |||
57 | #include <subdev/fb.h> | ||
58 | |||
59 | MODULE_PARM_DESC(nofbaccel, "Disable fbcon acceleration"); | 54 | MODULE_PARM_DESC(nofbaccel, "Disable fbcon acceleration"); |
60 | static int nouveau_nofbaccel = 0; | 55 | static int nouveau_nofbaccel = 0; |
61 | module_param_named(nofbaccel, nouveau_nofbaccel, int, 0400); | 56 | module_param_named(nofbaccel, nouveau_nofbaccel, int, 0400); |
@@ -65,7 +60,7 @@ nouveau_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect) | |||
65 | { | 60 | { |
66 | struct nouveau_fbdev *fbcon = info->par; | 61 | struct nouveau_fbdev *fbcon = info->par; |
67 | struct nouveau_drm *drm = nouveau_drm(fbcon->dev); | 62 | struct nouveau_drm *drm = nouveau_drm(fbcon->dev); |
68 | struct nouveau_device *device = nv_device(drm->device); | 63 | struct nvif_device *device = &drm->device; |
69 | int ret; | 64 | int ret; |
70 | 65 | ||
71 | if (info->state != FBINFO_STATE_RUNNING) | 66 | if (info->state != FBINFO_STATE_RUNNING) |
@@ -74,10 +69,10 @@ nouveau_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect) | |||
74 | ret = -ENODEV; | 69 | ret = -ENODEV; |
75 | if (!in_interrupt() && !(info->flags & FBINFO_HWACCEL_DISABLED) && | 70 | if (!in_interrupt() && !(info->flags & FBINFO_HWACCEL_DISABLED) && |
76 | mutex_trylock(&drm->client.mutex)) { | 71 | mutex_trylock(&drm->client.mutex)) { |
77 | if (device->card_type < NV_50) | 72 | if (device->info.family < NV_DEVICE_INFO_V0_TESLA) |
78 | ret = nv04_fbcon_fillrect(info, rect); | 73 | ret = nv04_fbcon_fillrect(info, rect); |
79 | else | 74 | else |
80 | if (device->card_type < NV_C0) | 75 | if (device->info.family < NV_DEVICE_INFO_V0_FERMI) |
81 | ret = nv50_fbcon_fillrect(info, rect); | 76 | ret = nv50_fbcon_fillrect(info, rect); |
82 | else | 77 | else |
83 | ret = nvc0_fbcon_fillrect(info, rect); | 78 | ret = nvc0_fbcon_fillrect(info, rect); |
@@ -97,7 +92,7 @@ nouveau_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *image) | |||
97 | { | 92 | { |
98 | struct nouveau_fbdev *fbcon = info->par; | 93 | struct nouveau_fbdev *fbcon = info->par; |
99 | struct nouveau_drm *drm = nouveau_drm(fbcon->dev); | 94 | struct nouveau_drm *drm = nouveau_drm(fbcon->dev); |
100 | struct nouveau_device *device = nv_device(drm->device); | 95 | struct nvif_device *device = &drm->device; |
101 | int ret; | 96 | int ret; |
102 | 97 | ||
103 | if (info->state != FBINFO_STATE_RUNNING) | 98 | if (info->state != FBINFO_STATE_RUNNING) |
@@ -106,10 +101,10 @@ nouveau_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *image) | |||
106 | ret = -ENODEV; | 101 | ret = -ENODEV; |
107 | if (!in_interrupt() && !(info->flags & FBINFO_HWACCEL_DISABLED) && | 102 | if (!in_interrupt() && !(info->flags & FBINFO_HWACCEL_DISABLED) && |
108 | mutex_trylock(&drm->client.mutex)) { | 103 | mutex_trylock(&drm->client.mutex)) { |
109 | if (device->card_type < NV_50) | 104 | if (device->info.family < NV_DEVICE_INFO_V0_TESLA) |
110 | ret = nv04_fbcon_copyarea(info, image); | 105 | ret = nv04_fbcon_copyarea(info, image); |
111 | else | 106 | else |
112 | if (device->card_type < NV_C0) | 107 | if (device->info.family < NV_DEVICE_INFO_V0_FERMI) |
113 | ret = nv50_fbcon_copyarea(info, image); | 108 | ret = nv50_fbcon_copyarea(info, image); |
114 | else | 109 | else |
115 | ret = nvc0_fbcon_copyarea(info, image); | 110 | ret = nvc0_fbcon_copyarea(info, image); |
@@ -129,7 +124,7 @@ nouveau_fbcon_imageblit(struct fb_info *info, const struct fb_image *image) | |||
129 | { | 124 | { |
130 | struct nouveau_fbdev *fbcon = info->par; | 125 | struct nouveau_fbdev *fbcon = info->par; |
131 | struct nouveau_drm *drm = nouveau_drm(fbcon->dev); | 126 | struct nouveau_drm *drm = nouveau_drm(fbcon->dev); |
132 | struct nouveau_device *device = nv_device(drm->device); | 127 | struct nvif_device *device = &drm->device; |
133 | int ret; | 128 | int ret; |
134 | 129 | ||
135 | if (info->state != FBINFO_STATE_RUNNING) | 130 | if (info->state != FBINFO_STATE_RUNNING) |
@@ -138,10 +133,10 @@ nouveau_fbcon_imageblit(struct fb_info *info, const struct fb_image *image) | |||
138 | ret = -ENODEV; | 133 | ret = -ENODEV; |
139 | if (!in_interrupt() && !(info->flags & FBINFO_HWACCEL_DISABLED) && | 134 | if (!in_interrupt() && !(info->flags & FBINFO_HWACCEL_DISABLED) && |
140 | mutex_trylock(&drm->client.mutex)) { | 135 | mutex_trylock(&drm->client.mutex)) { |
141 | if (device->card_type < NV_50) | 136 | if (device->info.family < NV_DEVICE_INFO_V0_TESLA) |
142 | ret = nv04_fbcon_imageblit(info, image); | 137 | ret = nv04_fbcon_imageblit(info, image); |
143 | else | 138 | else |
144 | if (device->card_type < NV_C0) | 139 | if (device->info.family < NV_DEVICE_INFO_V0_FERMI) |
145 | ret = nv50_fbcon_imageblit(info, image); | 140 | ret = nv50_fbcon_imageblit(info, image); |
146 | else | 141 | else |
147 | ret = nvc0_fbcon_imageblit(info, image); | 142 | ret = nvc0_fbcon_imageblit(info, image); |
@@ -212,6 +207,65 @@ static struct fb_ops nouveau_fbcon_sw_ops = { | |||
212 | .fb_debug_leave = drm_fb_helper_debug_leave, | 207 | .fb_debug_leave = drm_fb_helper_debug_leave, |
213 | }; | 208 | }; |
214 | 209 | ||
210 | void | ||
211 | nouveau_fbcon_accel_save_disable(struct drm_device *dev) | ||
212 | { | ||
213 | struct nouveau_drm *drm = nouveau_drm(dev); | ||
214 | if (drm->fbcon) { | ||
215 | drm->fbcon->saved_flags = drm->fbcon->helper.fbdev->flags; | ||
216 | drm->fbcon->helper.fbdev->flags |= FBINFO_HWACCEL_DISABLED; | ||
217 | } | ||
218 | } | ||
219 | |||
220 | void | ||
221 | nouveau_fbcon_accel_restore(struct drm_device *dev) | ||
222 | { | ||
223 | struct nouveau_drm *drm = nouveau_drm(dev); | ||
224 | if (drm->fbcon) { | ||
225 | drm->fbcon->helper.fbdev->flags = drm->fbcon->saved_flags; | ||
226 | } | ||
227 | } | ||
228 | |||
229 | void | ||
230 | nouveau_fbcon_accel_fini(struct drm_device *dev) | ||
231 | { | ||
232 | struct nouveau_drm *drm = nouveau_drm(dev); | ||
233 | struct nouveau_fbdev *fbcon = drm->fbcon; | ||
234 | if (fbcon && drm->channel) { | ||
235 | console_lock(); | ||
236 | fbcon->helper.fbdev->flags |= FBINFO_HWACCEL_DISABLED; | ||
237 | console_unlock(); | ||
238 | nouveau_channel_idle(drm->channel); | ||
239 | nvif_object_fini(&fbcon->twod); | ||
240 | nvif_object_fini(&fbcon->blit); | ||
241 | nvif_object_fini(&fbcon->gdi); | ||
242 | nvif_object_fini(&fbcon->patt); | ||
243 | nvif_object_fini(&fbcon->rop); | ||
244 | nvif_object_fini(&fbcon->clip); | ||
245 | nvif_object_fini(&fbcon->surf2d); | ||
246 | } | ||
247 | } | ||
248 | |||
249 | void | ||
250 | nouveau_fbcon_accel_init(struct drm_device *dev) | ||
251 | { | ||
252 | struct nouveau_drm *drm = nouveau_drm(dev); | ||
253 | struct nouveau_fbdev *fbcon = drm->fbcon; | ||
254 | struct fb_info *info = fbcon->helper.fbdev; | ||
255 | int ret; | ||
256 | |||
257 | if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA) | ||
258 | ret = nv04_fbcon_accel_init(info); | ||
259 | else | ||
260 | if (drm->device.info.family < NV_DEVICE_INFO_V0_FERMI) | ||
261 | ret = nv50_fbcon_accel_init(info); | ||
262 | else | ||
263 | ret = nvc0_fbcon_accel_init(info); | ||
264 | |||
265 | if (ret == 0) | ||
266 | info->fbops = &nouveau_fbcon_ops; | ||
267 | } | ||
268 | |||
215 | static void nouveau_fbcon_gamma_set(struct drm_crtc *crtc, u16 red, u16 green, | 269 | static void nouveau_fbcon_gamma_set(struct drm_crtc *crtc, u16 red, u16 green, |
216 | u16 blue, int regno) | 270 | u16 blue, int regno) |
217 | { | 271 | { |
@@ -257,7 +311,7 @@ nouveau_fbcon_create(struct drm_fb_helper *helper, | |||
257 | struct nouveau_fbdev *fbcon = (struct nouveau_fbdev *)helper; | 311 | struct nouveau_fbdev *fbcon = (struct nouveau_fbdev *)helper; |
258 | struct drm_device *dev = fbcon->dev; | 312 | struct drm_device *dev = fbcon->dev; |
259 | struct nouveau_drm *drm = nouveau_drm(dev); | 313 | struct nouveau_drm *drm = nouveau_drm(dev); |
260 | struct nouveau_device *device = nv_device(drm->device); | 314 | struct nvif_device *device = &drm->device; |
261 | struct fb_info *info; | 315 | struct fb_info *info; |
262 | struct drm_framebuffer *fb; | 316 | struct drm_framebuffer *fb; |
263 | struct nouveau_framebuffer *nouveau_fb; | 317 | struct nouveau_framebuffer *nouveau_fb; |
@@ -299,8 +353,8 @@ nouveau_fbcon_create(struct drm_fb_helper *helper, | |||
299 | } | 353 | } |
300 | 354 | ||
301 | chan = nouveau_nofbaccel ? NULL : drm->channel; | 355 | chan = nouveau_nofbaccel ? NULL : drm->channel; |
302 | if (chan && device->card_type >= NV_50) { | 356 | if (chan && device->info.family >= NV_DEVICE_INFO_V0_TESLA) { |
303 | ret = nouveau_bo_vma_add(nvbo, nv_client(chan->cli)->vm, | 357 | ret = nouveau_bo_vma_add(nvbo, drm->client.vm, |
304 | &fbcon->nouveau_fb.vma); | 358 | &fbcon->nouveau_fb.vma); |
305 | if (ret) { | 359 | if (ret) { |
306 | NV_ERROR(drm, "failed to map fb into chan: %d\n", ret); | 360 | NV_ERROR(drm, "failed to map fb into chan: %d\n", ret); |
@@ -357,20 +411,8 @@ nouveau_fbcon_create(struct drm_fb_helper *helper, | |||
357 | 411 | ||
358 | mutex_unlock(&dev->struct_mutex); | 412 | mutex_unlock(&dev->struct_mutex); |
359 | 413 | ||
360 | if (chan) { | 414 | if (chan) |
361 | ret = -ENODEV; | 415 | nouveau_fbcon_accel_init(dev); |
362 | if (device->card_type < NV_50) | ||
363 | ret = nv04_fbcon_accel_init(info); | ||
364 | else | ||
365 | if (device->card_type < NV_C0) | ||
366 | ret = nv50_fbcon_accel_init(info); | ||
367 | else | ||
368 | ret = nvc0_fbcon_accel_init(info); | ||
369 | |||
370 | if (ret == 0) | ||
371 | info->fbops = &nouveau_fbcon_ops; | ||
372 | } | ||
373 | |||
374 | nouveau_fbcon_zfill(dev, fbcon); | 416 | nouveau_fbcon_zfill(dev, fbcon); |
375 | 417 | ||
376 | /* To allow resizeing without swapping buffers */ | 418 | /* To allow resizeing without swapping buffers */ |
@@ -449,7 +491,6 @@ int | |||
449 | nouveau_fbcon_init(struct drm_device *dev) | 491 | nouveau_fbcon_init(struct drm_device *dev) |
450 | { | 492 | { |
451 | struct nouveau_drm *drm = nouveau_drm(dev); | 493 | struct nouveau_drm *drm = nouveau_drm(dev); |
452 | struct nouveau_fb *pfb = nouveau_fb(drm->device); | ||
453 | struct nouveau_fbdev *fbcon; | 494 | struct nouveau_fbdev *fbcon; |
454 | int preferred_bpp; | 495 | int preferred_bpp; |
455 | int ret; | 496 | int ret; |
@@ -476,10 +517,10 @@ nouveau_fbcon_init(struct drm_device *dev) | |||
476 | 517 | ||
477 | drm_fb_helper_single_add_all_connectors(&fbcon->helper); | 518 | drm_fb_helper_single_add_all_connectors(&fbcon->helper); |
478 | 519 | ||
479 | if (pfb->ram->size <= 32 * 1024 * 1024) | 520 | if (drm->device.info.ram_size <= 32 * 1024 * 1024) |
480 | preferred_bpp = 8; | 521 | preferred_bpp = 8; |
481 | else | 522 | else |
482 | if (pfb->ram->size <= 64 * 1024 * 1024) | 523 | if (drm->device.info.ram_size <= 64 * 1024 * 1024) |
483 | preferred_bpp = 16; | 524 | preferred_bpp = 16; |
484 | else | 525 | else |
485 | preferred_bpp = 32; | 526 | preferred_bpp = 32; |
@@ -499,43 +540,25 @@ nouveau_fbcon_fini(struct drm_device *dev) | |||
499 | if (!drm->fbcon) | 540 | if (!drm->fbcon) |
500 | return; | 541 | return; |
501 | 542 | ||
543 | nouveau_fbcon_accel_fini(dev); | ||
502 | nouveau_fbcon_destroy(dev, drm->fbcon); | 544 | nouveau_fbcon_destroy(dev, drm->fbcon); |
503 | kfree(drm->fbcon); | 545 | kfree(drm->fbcon); |
504 | drm->fbcon = NULL; | 546 | drm->fbcon = NULL; |
505 | } | 547 | } |
506 | 548 | ||
507 | void | 549 | void |
508 | nouveau_fbcon_save_disable_accel(struct drm_device *dev) | ||
509 | { | ||
510 | struct nouveau_drm *drm = nouveau_drm(dev); | ||
511 | if (drm->fbcon) { | ||
512 | drm->fbcon->saved_flags = drm->fbcon->helper.fbdev->flags; | ||
513 | drm->fbcon->helper.fbdev->flags |= FBINFO_HWACCEL_DISABLED; | ||
514 | } | ||
515 | } | ||
516 | |||
517 | void | ||
518 | nouveau_fbcon_restore_accel(struct drm_device *dev) | ||
519 | { | ||
520 | struct nouveau_drm *drm = nouveau_drm(dev); | ||
521 | if (drm->fbcon) { | ||
522 | drm->fbcon->helper.fbdev->flags = drm->fbcon->saved_flags; | ||
523 | } | ||
524 | } | ||
525 | |||
526 | void | ||
527 | nouveau_fbcon_set_suspend(struct drm_device *dev, int state) | 550 | nouveau_fbcon_set_suspend(struct drm_device *dev, int state) |
528 | { | 551 | { |
529 | struct nouveau_drm *drm = nouveau_drm(dev); | 552 | struct nouveau_drm *drm = nouveau_drm(dev); |
530 | if (drm->fbcon) { | 553 | if (drm->fbcon) { |
531 | console_lock(); | 554 | console_lock(); |
532 | if (state == 1) | ||
533 | nouveau_fbcon_save_disable_accel(dev); | ||
534 | fb_set_suspend(drm->fbcon->helper.fbdev, state); | ||
535 | if (state == 0) { | 555 | if (state == 0) { |
536 | nouveau_fbcon_restore_accel(dev); | 556 | nouveau_fbcon_accel_restore(dev); |
537 | nouveau_fbcon_zfill(dev, drm->fbcon); | 557 | nouveau_fbcon_zfill(dev, drm->fbcon); |
538 | } | 558 | } |
559 | fb_set_suspend(drm->fbcon->helper.fbdev, state); | ||
560 | if (state == 1) | ||
561 | nouveau_fbcon_accel_save_disable(dev); | ||
539 | console_unlock(); | 562 | console_unlock(); |
540 | } | 563 | } |
541 | } | 564 | } |
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.h b/drivers/gpu/drm/nouveau/nouveau_fbcon.h index fcff797d2084..34658cfa8f5d 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fbcon.h +++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.h | |||
@@ -37,6 +37,13 @@ struct nouveau_fbdev { | |||
37 | struct list_head fbdev_list; | 37 | struct list_head fbdev_list; |
38 | struct drm_device *dev; | 38 | struct drm_device *dev; |
39 | unsigned int saved_flags; | 39 | unsigned int saved_flags; |
40 | struct nvif_object surf2d; | ||
41 | struct nvif_object clip; | ||
42 | struct nvif_object rop; | ||
43 | struct nvif_object patt; | ||
44 | struct nvif_object gdi; | ||
45 | struct nvif_object blit; | ||
46 | struct nvif_object twod; | ||
40 | }; | 47 | }; |
41 | 48 | ||
42 | void nouveau_fbcon_restore(void); | 49 | void nouveau_fbcon_restore(void); |
@@ -61,8 +68,8 @@ void nouveau_fbcon_gpu_lockup(struct fb_info *info); | |||
61 | int nouveau_fbcon_init(struct drm_device *dev); | 68 | int nouveau_fbcon_init(struct drm_device *dev); |
62 | void nouveau_fbcon_fini(struct drm_device *dev); | 69 | void nouveau_fbcon_fini(struct drm_device *dev); |
63 | void nouveau_fbcon_set_suspend(struct drm_device *dev, int state); | 70 | void nouveau_fbcon_set_suspend(struct drm_device *dev, int state); |
64 | void nouveau_fbcon_save_disable_accel(struct drm_device *dev); | 71 | void nouveau_fbcon_accel_save_disable(struct drm_device *dev); |
65 | void nouveau_fbcon_restore_accel(struct drm_device *dev); | 72 | void nouveau_fbcon_accel_restore(struct drm_device *dev); |
66 | 73 | ||
67 | void nouveau_fbcon_output_poll_changed(struct drm_device *dev); | 74 | void nouveau_fbcon_output_poll_changed(struct drm_device *dev); |
68 | #endif /* __NV50_FBCON_H__ */ | 75 | #endif /* __NV50_FBCON_H__ */ |
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c index ab5ea3b0d666..0a93114158cd 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fence.c +++ b/drivers/gpu/drm/nouveau/nouveau_fence.c | |||
@@ -29,12 +29,13 @@ | |||
29 | #include <linux/ktime.h> | 29 | #include <linux/ktime.h> |
30 | #include <linux/hrtimer.h> | 30 | #include <linux/hrtimer.h> |
31 | 31 | ||
32 | #include <nvif/notify.h> | ||
33 | #include <nvif/event.h> | ||
34 | |||
32 | #include "nouveau_drm.h" | 35 | #include "nouveau_drm.h" |
33 | #include "nouveau_dma.h" | 36 | #include "nouveau_dma.h" |
34 | #include "nouveau_fence.h" | 37 | #include "nouveau_fence.h" |
35 | 38 | ||
36 | #include <engine/fifo.h> | ||
37 | |||
38 | struct fence_work { | 39 | struct fence_work { |
39 | struct work_struct base; | 40 | struct work_struct base; |
40 | struct list_head head; | 41 | struct list_head head; |
@@ -165,12 +166,18 @@ nouveau_fence_done(struct nouveau_fence *fence) | |||
165 | return !fence->channel; | 166 | return !fence->channel; |
166 | } | 167 | } |
167 | 168 | ||
169 | struct nouveau_fence_wait { | ||
170 | struct nouveau_fence_priv *priv; | ||
171 | struct nvif_notify notify; | ||
172 | }; | ||
173 | |||
168 | static int | 174 | static int |
169 | nouveau_fence_wait_uevent_handler(void *data, u32 type, int index) | 175 | nouveau_fence_wait_uevent_handler(struct nvif_notify *notify) |
170 | { | 176 | { |
171 | struct nouveau_fence_priv *priv = data; | 177 | struct nouveau_fence_wait *wait = |
172 | wake_up_all(&priv->waiting); | 178 | container_of(notify, typeof(*wait), notify); |
173 | return NVKM_EVENT_KEEP; | 179 | wake_up_all(&wait->priv->waiting); |
180 | return NVIF_NOTIFY_KEEP; | ||
174 | } | 181 | } |
175 | 182 | ||
176 | static int | 183 | static int |
@@ -178,18 +185,22 @@ nouveau_fence_wait_uevent(struct nouveau_fence *fence, bool intr) | |||
178 | 185 | ||
179 | { | 186 | { |
180 | struct nouveau_channel *chan = fence->channel; | 187 | struct nouveau_channel *chan = fence->channel; |
181 | struct nouveau_fifo *pfifo = nouveau_fifo(chan->drm->device); | ||
182 | struct nouveau_fence_priv *priv = chan->drm->fence; | 188 | struct nouveau_fence_priv *priv = chan->drm->fence; |
183 | struct nouveau_eventh *handler; | 189 | struct nouveau_fence_wait wait = { .priv = priv }; |
184 | int ret = 0; | 190 | int ret = 0; |
185 | 191 | ||
186 | ret = nouveau_event_new(pfifo->uevent, 1, 0, | 192 | ret = nvif_notify_init(chan->object, NULL, |
187 | nouveau_fence_wait_uevent_handler, | 193 | nouveau_fence_wait_uevent_handler, false, |
188 | priv, &handler); | 194 | G82_CHANNEL_DMA_V0_NTFY_UEVENT, |
195 | &(struct nvif_notify_uevent_req) { | ||
196 | }, | ||
197 | sizeof(struct nvif_notify_uevent_req), | ||
198 | sizeof(struct nvif_notify_uevent_rep), | ||
199 | &wait.notify); | ||
189 | if (ret) | 200 | if (ret) |
190 | return ret; | 201 | return ret; |
191 | 202 | ||
192 | nouveau_event_get(handler); | 203 | nvif_notify_get(&wait.notify); |
193 | 204 | ||
194 | if (fence->timeout) { | 205 | if (fence->timeout) { |
195 | unsigned long timeout = fence->timeout - jiffies; | 206 | unsigned long timeout = fence->timeout - jiffies; |
@@ -221,7 +232,7 @@ nouveau_fence_wait_uevent(struct nouveau_fence *fence, bool intr) | |||
221 | } | 232 | } |
222 | } | 233 | } |
223 | 234 | ||
224 | nouveau_event_ref(NULL, &handler); | 235 | nvif_notify_fini(&wait.notify); |
225 | if (unlikely(ret < 0)) | 236 | if (unlikely(ret < 0)) |
226 | return ret; | 237 | return ret; |
227 | 238 | ||
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c index df9d451afdcd..292a677bfed4 100644 --- a/drivers/gpu/drm/nouveau/nouveau_gem.c +++ b/drivers/gpu/drm/nouveau/nouveau_gem.c | |||
@@ -24,8 +24,6 @@ | |||
24 | * | 24 | * |
25 | */ | 25 | */ |
26 | 26 | ||
27 | #include <subdev/fb.h> | ||
28 | |||
29 | #include "nouveau_drm.h" | 27 | #include "nouveau_drm.h" |
30 | #include "nouveau_dma.h" | 28 | #include "nouveau_dma.h" |
31 | #include "nouveau_fence.h" | 29 | #include "nouveau_fence.h" |
@@ -58,14 +56,14 @@ nouveau_gem_object_open(struct drm_gem_object *gem, struct drm_file *file_priv) | |||
58 | struct nouveau_vma *vma; | 56 | struct nouveau_vma *vma; |
59 | int ret; | 57 | int ret; |
60 | 58 | ||
61 | if (!cli->base.vm) | 59 | if (!cli->vm) |
62 | return 0; | 60 | return 0; |
63 | 61 | ||
64 | ret = ttm_bo_reserve(&nvbo->bo, false, false, false, NULL); | 62 | ret = ttm_bo_reserve(&nvbo->bo, false, false, false, NULL); |
65 | if (ret) | 63 | if (ret) |
66 | return ret; | 64 | return ret; |
67 | 65 | ||
68 | vma = nouveau_bo_vma_find(nvbo, cli->base.vm); | 66 | vma = nouveau_bo_vma_find(nvbo, cli->vm); |
69 | if (!vma) { | 67 | if (!vma) { |
70 | vma = kzalloc(sizeof(*vma), GFP_KERNEL); | 68 | vma = kzalloc(sizeof(*vma), GFP_KERNEL); |
71 | if (!vma) { | 69 | if (!vma) { |
@@ -73,7 +71,7 @@ nouveau_gem_object_open(struct drm_gem_object *gem, struct drm_file *file_priv) | |||
73 | goto out; | 71 | goto out; |
74 | } | 72 | } |
75 | 73 | ||
76 | ret = nouveau_bo_vma_add(nvbo, cli->base.vm, vma); | 74 | ret = nouveau_bo_vma_add(nvbo, cli->vm, vma); |
77 | if (ret) { | 75 | if (ret) { |
78 | kfree(vma); | 76 | kfree(vma); |
79 | goto out; | 77 | goto out; |
@@ -129,14 +127,14 @@ nouveau_gem_object_close(struct drm_gem_object *gem, struct drm_file *file_priv) | |||
129 | struct nouveau_vma *vma; | 127 | struct nouveau_vma *vma; |
130 | int ret; | 128 | int ret; |
131 | 129 | ||
132 | if (!cli->base.vm) | 130 | if (!cli->vm) |
133 | return; | 131 | return; |
134 | 132 | ||
135 | ret = ttm_bo_reserve(&nvbo->bo, false, false, false, NULL); | 133 | ret = ttm_bo_reserve(&nvbo->bo, false, false, false, NULL); |
136 | if (ret) | 134 | if (ret) |
137 | return; | 135 | return; |
138 | 136 | ||
139 | vma = nouveau_bo_vma_find(nvbo, cli->base.vm); | 137 | vma = nouveau_bo_vma_find(nvbo, cli->vm); |
140 | if (vma) { | 138 | if (vma) { |
141 | if (--vma->refcount == 0) | 139 | if (--vma->refcount == 0) |
142 | nouveau_gem_object_unmap(nvbo, vma); | 140 | nouveau_gem_object_unmap(nvbo, vma); |
@@ -173,7 +171,7 @@ nouveau_gem_new(struct drm_device *dev, int size, int align, uint32_t domain, | |||
173 | */ | 171 | */ |
174 | nvbo->valid_domains = NOUVEAU_GEM_DOMAIN_VRAM | | 172 | nvbo->valid_domains = NOUVEAU_GEM_DOMAIN_VRAM | |
175 | NOUVEAU_GEM_DOMAIN_GART; | 173 | NOUVEAU_GEM_DOMAIN_GART; |
176 | if (nv_device(drm->device)->card_type >= NV_50) | 174 | if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA) |
177 | nvbo->valid_domains &= domain; | 175 | nvbo->valid_domains &= domain; |
178 | 176 | ||
179 | /* Initialize the embedded gem-object. We return a single gem-reference | 177 | /* Initialize the embedded gem-object. We return a single gem-reference |
@@ -202,8 +200,8 @@ nouveau_gem_info(struct drm_file *file_priv, struct drm_gem_object *gem, | |||
202 | rep->domain = NOUVEAU_GEM_DOMAIN_VRAM; | 200 | rep->domain = NOUVEAU_GEM_DOMAIN_VRAM; |
203 | 201 | ||
204 | rep->offset = nvbo->bo.offset; | 202 | rep->offset = nvbo->bo.offset; |
205 | if (cli->base.vm) { | 203 | if (cli->vm) { |
206 | vma = nouveau_bo_vma_find(nvbo, cli->base.vm); | 204 | vma = nouveau_bo_vma_find(nvbo, cli->vm); |
207 | if (!vma) | 205 | if (!vma) |
208 | return -EINVAL; | 206 | return -EINVAL; |
209 | 207 | ||
@@ -223,13 +221,13 @@ nouveau_gem_ioctl_new(struct drm_device *dev, void *data, | |||
223 | { | 221 | { |
224 | struct nouveau_drm *drm = nouveau_drm(dev); | 222 | struct nouveau_drm *drm = nouveau_drm(dev); |
225 | struct nouveau_cli *cli = nouveau_cli(file_priv); | 223 | struct nouveau_cli *cli = nouveau_cli(file_priv); |
226 | struct nouveau_fb *pfb = nouveau_fb(drm->device); | 224 | struct nouveau_fb *pfb = nvkm_fb(&drm->device); |
227 | struct drm_nouveau_gem_new *req = data; | 225 | struct drm_nouveau_gem_new *req = data; |
228 | struct nouveau_bo *nvbo = NULL; | 226 | struct nouveau_bo *nvbo = NULL; |
229 | int ret = 0; | 227 | int ret = 0; |
230 | 228 | ||
231 | if (!pfb->memtype_valid(pfb, req->info.tile_flags)) { | 229 | if (!pfb->memtype_valid(pfb, req->info.tile_flags)) { |
232 | NV_ERROR(cli, "bad page flags: 0x%08x\n", req->info.tile_flags); | 230 | NV_PRINTK(error, cli, "bad page flags: 0x%08x\n", req->info.tile_flags); |
233 | return -EINVAL; | 231 | return -EINVAL; |
234 | } | 232 | } |
235 | 233 | ||
@@ -350,7 +348,7 @@ validate_init(struct nouveau_channel *chan, struct drm_file *file_priv, | |||
350 | ww_acquire_init(&op->ticket, &reservation_ww_class); | 348 | ww_acquire_init(&op->ticket, &reservation_ww_class); |
351 | retry: | 349 | retry: |
352 | if (++trycnt > 100000) { | 350 | if (++trycnt > 100000) { |
353 | NV_ERROR(cli, "%s failed and gave up.\n", __func__); | 351 | NV_PRINTK(error, cli, "%s failed and gave up.\n", __func__); |
354 | return -EINVAL; | 352 | return -EINVAL; |
355 | } | 353 | } |
356 | 354 | ||
@@ -361,7 +359,7 @@ retry: | |||
361 | 359 | ||
362 | gem = drm_gem_object_lookup(dev, file_priv, b->handle); | 360 | gem = drm_gem_object_lookup(dev, file_priv, b->handle); |
363 | if (!gem) { | 361 | if (!gem) { |
364 | NV_ERROR(cli, "Unknown handle 0x%08x\n", b->handle); | 362 | NV_PRINTK(error, cli, "Unknown handle 0x%08x\n", b->handle); |
365 | ww_acquire_done(&op->ticket); | 363 | ww_acquire_done(&op->ticket); |
366 | validate_fini(op, NULL); | 364 | validate_fini(op, NULL); |
367 | return -ENOENT; | 365 | return -ENOENT; |
@@ -374,7 +372,7 @@ retry: | |||
374 | } | 372 | } |
375 | 373 | ||
376 | if (nvbo->reserved_by && nvbo->reserved_by == file_priv) { | 374 | if (nvbo->reserved_by && nvbo->reserved_by == file_priv) { |
377 | NV_ERROR(cli, "multiple instances of buffer %d on " | 375 | NV_PRINTK(error, cli, "multiple instances of buffer %d on " |
378 | "validation list\n", b->handle); | 376 | "validation list\n", b->handle); |
379 | drm_gem_object_unreference_unlocked(gem); | 377 | drm_gem_object_unreference_unlocked(gem); |
380 | ww_acquire_done(&op->ticket); | 378 | ww_acquire_done(&op->ticket); |
@@ -396,7 +394,7 @@ retry: | |||
396 | ww_acquire_fini(&op->ticket); | 394 | ww_acquire_fini(&op->ticket); |
397 | drm_gem_object_unreference_unlocked(gem); | 395 | drm_gem_object_unreference_unlocked(gem); |
398 | if (ret != -ERESTARTSYS) | 396 | if (ret != -ERESTARTSYS) |
399 | NV_ERROR(cli, "fail reserve\n"); | 397 | NV_PRINTK(error, cli, "fail reserve\n"); |
400 | return ret; | 398 | return ret; |
401 | } | 399 | } |
402 | } | 400 | } |
@@ -414,7 +412,7 @@ retry: | |||
414 | if (b->valid_domains & NOUVEAU_GEM_DOMAIN_GART) | 412 | if (b->valid_domains & NOUVEAU_GEM_DOMAIN_GART) |
415 | list_add_tail(&nvbo->entry, &op->gart_list); | 413 | list_add_tail(&nvbo->entry, &op->gart_list); |
416 | else { | 414 | else { |
417 | NV_ERROR(cli, "invalid valid domains: 0x%08x\n", | 415 | NV_PRINTK(error, cli, "invalid valid domains: 0x%08x\n", |
418 | b->valid_domains); | 416 | b->valid_domains); |
419 | list_add_tail(&nvbo->entry, &op->both_list); | 417 | list_add_tail(&nvbo->entry, &op->both_list); |
420 | ww_acquire_done(&op->ticket); | 418 | ww_acquire_done(&op->ticket); |
@@ -465,24 +463,24 @@ validate_list(struct nouveau_channel *chan, struct nouveau_cli *cli, | |||
465 | b->write_domains, | 463 | b->write_domains, |
466 | b->valid_domains); | 464 | b->valid_domains); |
467 | if (unlikely(ret)) { | 465 | if (unlikely(ret)) { |
468 | NV_ERROR(cli, "fail set_domain\n"); | 466 | NV_PRINTK(error, cli, "fail set_domain\n"); |
469 | return ret; | 467 | return ret; |
470 | } | 468 | } |
471 | 469 | ||
472 | ret = nouveau_bo_validate(nvbo, true, false); | 470 | ret = nouveau_bo_validate(nvbo, true, false); |
473 | if (unlikely(ret)) { | 471 | if (unlikely(ret)) { |
474 | if (ret != -ERESTARTSYS) | 472 | if (ret != -ERESTARTSYS) |
475 | NV_ERROR(cli, "fail ttm_validate\n"); | 473 | NV_PRINTK(error, cli, "fail ttm_validate\n"); |
476 | return ret; | 474 | return ret; |
477 | } | 475 | } |
478 | 476 | ||
479 | ret = validate_sync(chan, nvbo); | 477 | ret = validate_sync(chan, nvbo); |
480 | if (unlikely(ret)) { | 478 | if (unlikely(ret)) { |
481 | NV_ERROR(cli, "fail post-validate sync\n"); | 479 | NV_PRINTK(error, cli, "fail post-validate sync\n"); |
482 | return ret; | 480 | return ret; |
483 | } | 481 | } |
484 | 482 | ||
485 | if (nv_device(drm->device)->card_type < NV_50) { | 483 | if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA) { |
486 | if (nvbo->bo.offset == b->presumed.offset && | 484 | if (nvbo->bo.offset == b->presumed.offset && |
487 | ((nvbo->bo.mem.mem_type == TTM_PL_VRAM && | 485 | ((nvbo->bo.mem.mem_type == TTM_PL_VRAM && |
488 | b->presumed.domain & NOUVEAU_GEM_DOMAIN_VRAM) || | 486 | b->presumed.domain & NOUVEAU_GEM_DOMAIN_VRAM) || |
@@ -527,14 +525,14 @@ nouveau_gem_pushbuf_validate(struct nouveau_channel *chan, | |||
527 | ret = validate_init(chan, file_priv, pbbo, nr_buffers, op); | 525 | ret = validate_init(chan, file_priv, pbbo, nr_buffers, op); |
528 | if (unlikely(ret)) { | 526 | if (unlikely(ret)) { |
529 | if (ret != -ERESTARTSYS) | 527 | if (ret != -ERESTARTSYS) |
530 | NV_ERROR(cli, "validate_init\n"); | 528 | NV_PRINTK(error, cli, "validate_init\n"); |
531 | return ret; | 529 | return ret; |
532 | } | 530 | } |
533 | 531 | ||
534 | ret = validate_list(chan, cli, &op->vram_list, pbbo, user_buffers); | 532 | ret = validate_list(chan, cli, &op->vram_list, pbbo, user_buffers); |
535 | if (unlikely(ret < 0)) { | 533 | if (unlikely(ret < 0)) { |
536 | if (ret != -ERESTARTSYS) | 534 | if (ret != -ERESTARTSYS) |
537 | NV_ERROR(cli, "validate vram_list\n"); | 535 | NV_PRINTK(error, cli, "validate vram_list\n"); |
538 | validate_fini(op, NULL); | 536 | validate_fini(op, NULL); |
539 | return ret; | 537 | return ret; |
540 | } | 538 | } |
@@ -543,7 +541,7 @@ nouveau_gem_pushbuf_validate(struct nouveau_channel *chan, | |||
543 | ret = validate_list(chan, cli, &op->gart_list, pbbo, user_buffers); | 541 | ret = validate_list(chan, cli, &op->gart_list, pbbo, user_buffers); |
544 | if (unlikely(ret < 0)) { | 542 | if (unlikely(ret < 0)) { |
545 | if (ret != -ERESTARTSYS) | 543 | if (ret != -ERESTARTSYS) |
546 | NV_ERROR(cli, "validate gart_list\n"); | 544 | NV_PRINTK(error, cli, "validate gart_list\n"); |
547 | validate_fini(op, NULL); | 545 | validate_fini(op, NULL); |
548 | return ret; | 546 | return ret; |
549 | } | 547 | } |
@@ -552,7 +550,7 @@ nouveau_gem_pushbuf_validate(struct nouveau_channel *chan, | |||
552 | ret = validate_list(chan, cli, &op->both_list, pbbo, user_buffers); | 550 | ret = validate_list(chan, cli, &op->both_list, pbbo, user_buffers); |
553 | if (unlikely(ret < 0)) { | 551 | if (unlikely(ret < 0)) { |
554 | if (ret != -ERESTARTSYS) | 552 | if (ret != -ERESTARTSYS) |
555 | NV_ERROR(cli, "validate both_list\n"); | 553 | NV_PRINTK(error, cli, "validate both_list\n"); |
556 | validate_fini(op, NULL); | 554 | validate_fini(op, NULL); |
557 | return ret; | 555 | return ret; |
558 | } | 556 | } |
@@ -613,7 +611,7 @@ nouveau_gem_pushbuf_reloc_apply(struct nouveau_cli *cli, | |||
613 | uint32_t data; | 611 | uint32_t data; |
614 | 612 | ||
615 | if (unlikely(r->bo_index > req->nr_buffers)) { | 613 | if (unlikely(r->bo_index > req->nr_buffers)) { |
616 | NV_ERROR(cli, "reloc bo index invalid\n"); | 614 | NV_PRINTK(error, cli, "reloc bo index invalid\n"); |
617 | ret = -EINVAL; | 615 | ret = -EINVAL; |
618 | break; | 616 | break; |
619 | } | 617 | } |
@@ -623,7 +621,7 @@ nouveau_gem_pushbuf_reloc_apply(struct nouveau_cli *cli, | |||
623 | continue; | 621 | continue; |
624 | 622 | ||
625 | if (unlikely(r->reloc_bo_index > req->nr_buffers)) { | 623 | if (unlikely(r->reloc_bo_index > req->nr_buffers)) { |
626 | NV_ERROR(cli, "reloc container bo index invalid\n"); | 624 | NV_PRINTK(error, cli, "reloc container bo index invalid\n"); |
627 | ret = -EINVAL; | 625 | ret = -EINVAL; |
628 | break; | 626 | break; |
629 | } | 627 | } |
@@ -631,7 +629,7 @@ nouveau_gem_pushbuf_reloc_apply(struct nouveau_cli *cli, | |||
631 | 629 | ||
632 | if (unlikely(r->reloc_bo_offset + 4 > | 630 | if (unlikely(r->reloc_bo_offset + 4 > |
633 | nvbo->bo.mem.num_pages << PAGE_SHIFT)) { | 631 | nvbo->bo.mem.num_pages << PAGE_SHIFT)) { |
634 | NV_ERROR(cli, "reloc outside of bo\n"); | 632 | NV_PRINTK(error, cli, "reloc outside of bo\n"); |
635 | ret = -EINVAL; | 633 | ret = -EINVAL; |
636 | break; | 634 | break; |
637 | } | 635 | } |
@@ -640,7 +638,7 @@ nouveau_gem_pushbuf_reloc_apply(struct nouveau_cli *cli, | |||
640 | ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages, | 638 | ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages, |
641 | &nvbo->kmap); | 639 | &nvbo->kmap); |
642 | if (ret) { | 640 | if (ret) { |
643 | NV_ERROR(cli, "failed kmap for reloc\n"); | 641 | NV_PRINTK(error, cli, "failed kmap for reloc\n"); |
644 | break; | 642 | break; |
645 | } | 643 | } |
646 | nvbo->validate_mapped = true; | 644 | nvbo->validate_mapped = true; |
@@ -665,7 +663,7 @@ nouveau_gem_pushbuf_reloc_apply(struct nouveau_cli *cli, | |||
665 | ret = ttm_bo_wait(&nvbo->bo, false, false, false); | 663 | ret = ttm_bo_wait(&nvbo->bo, false, false, false); |
666 | spin_unlock(&nvbo->bo.bdev->fence_lock); | 664 | spin_unlock(&nvbo->bo.bdev->fence_lock); |
667 | if (ret) { | 665 | if (ret) { |
668 | NV_ERROR(cli, "reloc wait_idle failed: %d\n", ret); | 666 | NV_PRINTK(error, cli, "reloc wait_idle failed: %d\n", ret); |
669 | break; | 667 | break; |
670 | } | 668 | } |
671 | 669 | ||
@@ -696,7 +694,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data, | |||
696 | return -ENOMEM; | 694 | return -ENOMEM; |
697 | 695 | ||
698 | list_for_each_entry(temp, &abi16->channels, head) { | 696 | list_for_each_entry(temp, &abi16->channels, head) { |
699 | if (temp->chan->handle == (NVDRM_CHAN | req->channel)) { | 697 | if (temp->chan->object->handle == (NVDRM_CHAN | req->channel)) { |
700 | chan = temp->chan; | 698 | chan = temp->chan; |
701 | break; | 699 | break; |
702 | } | 700 | } |
@@ -711,19 +709,19 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data, | |||
711 | goto out_next; | 709 | goto out_next; |
712 | 710 | ||
713 | if (unlikely(req->nr_push > NOUVEAU_GEM_MAX_PUSH)) { | 711 | if (unlikely(req->nr_push > NOUVEAU_GEM_MAX_PUSH)) { |
714 | NV_ERROR(cli, "pushbuf push count exceeds limit: %d max %d\n", | 712 | NV_PRINTK(error, cli, "pushbuf push count exceeds limit: %d max %d\n", |
715 | req->nr_push, NOUVEAU_GEM_MAX_PUSH); | 713 | req->nr_push, NOUVEAU_GEM_MAX_PUSH); |
716 | return nouveau_abi16_put(abi16, -EINVAL); | 714 | return nouveau_abi16_put(abi16, -EINVAL); |
717 | } | 715 | } |
718 | 716 | ||
719 | if (unlikely(req->nr_buffers > NOUVEAU_GEM_MAX_BUFFERS)) { | 717 | if (unlikely(req->nr_buffers > NOUVEAU_GEM_MAX_BUFFERS)) { |
720 | NV_ERROR(cli, "pushbuf bo count exceeds limit: %d max %d\n", | 718 | NV_PRINTK(error, cli, "pushbuf bo count exceeds limit: %d max %d\n", |
721 | req->nr_buffers, NOUVEAU_GEM_MAX_BUFFERS); | 719 | req->nr_buffers, NOUVEAU_GEM_MAX_BUFFERS); |
722 | return nouveau_abi16_put(abi16, -EINVAL); | 720 | return nouveau_abi16_put(abi16, -EINVAL); |
723 | } | 721 | } |
724 | 722 | ||
725 | if (unlikely(req->nr_relocs > NOUVEAU_GEM_MAX_RELOCS)) { | 723 | if (unlikely(req->nr_relocs > NOUVEAU_GEM_MAX_RELOCS)) { |
726 | NV_ERROR(cli, "pushbuf reloc count exceeds limit: %d max %d\n", | 724 | NV_PRINTK(error, cli, "pushbuf reloc count exceeds limit: %d max %d\n", |
727 | req->nr_relocs, NOUVEAU_GEM_MAX_RELOCS); | 725 | req->nr_relocs, NOUVEAU_GEM_MAX_RELOCS); |
728 | return nouveau_abi16_put(abi16, -EINVAL); | 726 | return nouveau_abi16_put(abi16, -EINVAL); |
729 | } | 727 | } |
@@ -741,7 +739,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data, | |||
741 | /* Ensure all push buffers are on validate list */ | 739 | /* Ensure all push buffers are on validate list */ |
742 | for (i = 0; i < req->nr_push; i++) { | 740 | for (i = 0; i < req->nr_push; i++) { |
743 | if (push[i].bo_index >= req->nr_buffers) { | 741 | if (push[i].bo_index >= req->nr_buffers) { |
744 | NV_ERROR(cli, "push %d buffer not in list\n", i); | 742 | NV_PRINTK(error, cli, "push %d buffer not in list\n", i); |
745 | ret = -EINVAL; | 743 | ret = -EINVAL; |
746 | goto out_prevalid; | 744 | goto out_prevalid; |
747 | } | 745 | } |
@@ -752,7 +750,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data, | |||
752 | req->nr_buffers, &op, &do_reloc); | 750 | req->nr_buffers, &op, &do_reloc); |
753 | if (ret) { | 751 | if (ret) { |
754 | if (ret != -ERESTARTSYS) | 752 | if (ret != -ERESTARTSYS) |
755 | NV_ERROR(cli, "validate: %d\n", ret); | 753 | NV_PRINTK(error, cli, "validate: %d\n", ret); |
756 | goto out_prevalid; | 754 | goto out_prevalid; |
757 | } | 755 | } |
758 | 756 | ||
@@ -760,7 +758,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data, | |||
760 | if (do_reloc) { | 758 | if (do_reloc) { |
761 | ret = nouveau_gem_pushbuf_reloc_apply(cli, req, bo); | 759 | ret = nouveau_gem_pushbuf_reloc_apply(cli, req, bo); |
762 | if (ret) { | 760 | if (ret) { |
763 | NV_ERROR(cli, "reloc apply: %d\n", ret); | 761 | NV_PRINTK(error, cli, "reloc apply: %d\n", ret); |
764 | goto out; | 762 | goto out; |
765 | } | 763 | } |
766 | } | 764 | } |
@@ -768,7 +766,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data, | |||
768 | if (chan->dma.ib_max) { | 766 | if (chan->dma.ib_max) { |
769 | ret = nouveau_dma_wait(chan, req->nr_push + 1, 16); | 767 | ret = nouveau_dma_wait(chan, req->nr_push + 1, 16); |
770 | if (ret) { | 768 | if (ret) { |
771 | NV_ERROR(cli, "nv50cal_space: %d\n", ret); | 769 | NV_PRINTK(error, cli, "nv50cal_space: %d\n", ret); |
772 | goto out; | 770 | goto out; |
773 | } | 771 | } |
774 | 772 | ||
@@ -780,10 +778,10 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data, | |||
780 | push[i].length); | 778 | push[i].length); |
781 | } | 779 | } |
782 | } else | 780 | } else |
783 | if (nv_device(drm->device)->chipset >= 0x25) { | 781 | if (drm->device.info.chipset >= 0x25) { |
784 | ret = RING_SPACE(chan, req->nr_push * 2); | 782 | ret = RING_SPACE(chan, req->nr_push * 2); |
785 | if (ret) { | 783 | if (ret) { |
786 | NV_ERROR(cli, "cal_space: %d\n", ret); | 784 | NV_PRINTK(error, cli, "cal_space: %d\n", ret); |
787 | goto out; | 785 | goto out; |
788 | } | 786 | } |
789 | 787 | ||
@@ -797,7 +795,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data, | |||
797 | } else { | 795 | } else { |
798 | ret = RING_SPACE(chan, req->nr_push * (2 + NOUVEAU_DMA_SKIPS)); | 796 | ret = RING_SPACE(chan, req->nr_push * (2 + NOUVEAU_DMA_SKIPS)); |
799 | if (ret) { | 797 | if (ret) { |
800 | NV_ERROR(cli, "jmp_space: %d\n", ret); | 798 | NV_PRINTK(error, cli, "jmp_space: %d\n", ret); |
801 | goto out; | 799 | goto out; |
802 | } | 800 | } |
803 | 801 | ||
@@ -835,7 +833,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data, | |||
835 | 833 | ||
836 | ret = nouveau_fence_new(chan, false, &fence); | 834 | ret = nouveau_fence_new(chan, false, &fence); |
837 | if (ret) { | 835 | if (ret) { |
838 | NV_ERROR(cli, "error fencing pushbuf: %d\n", ret); | 836 | NV_PRINTK(error, cli, "error fencing pushbuf: %d\n", ret); |
839 | WIND_RING(chan); | 837 | WIND_RING(chan); |
840 | goto out; | 838 | goto out; |
841 | } | 839 | } |
@@ -853,7 +851,7 @@ out_next: | |||
853 | req->suffix0 = 0x00000000; | 851 | req->suffix0 = 0x00000000; |
854 | req->suffix1 = 0x00000000; | 852 | req->suffix1 = 0x00000000; |
855 | } else | 853 | } else |
856 | if (nv_device(drm->device)->chipset >= 0x25) { | 854 | if (drm->device.info.chipset >= 0x25) { |
857 | req->suffix0 = 0x00020000; | 855 | req->suffix0 = 0x00020000; |
858 | req->suffix1 = 0x00000000; | 856 | req->suffix1 = 0x00000000; |
859 | } else { | 857 | } else { |
diff --git a/drivers/gpu/drm/nouveau/nouveau_hwmon.c b/drivers/gpu/drm/nouveau/nouveau_hwmon.c index 19fd767bab10..afb36d66e78d 100644 --- a/drivers/gpu/drm/nouveau/nouveau_hwmon.c +++ b/drivers/gpu/drm/nouveau/nouveau_hwmon.c | |||
@@ -34,17 +34,13 @@ | |||
34 | #include "nouveau_drm.h" | 34 | #include "nouveau_drm.h" |
35 | #include "nouveau_hwmon.h" | 35 | #include "nouveau_hwmon.h" |
36 | 36 | ||
37 | #include <subdev/gpio.h> | ||
38 | #include <subdev/timer.h> | ||
39 | #include <subdev/therm.h> | ||
40 | |||
41 | #if defined(CONFIG_HWMON) || (defined(MODULE) && defined(CONFIG_HWMON_MODULE)) | 37 | #if defined(CONFIG_HWMON) || (defined(MODULE) && defined(CONFIG_HWMON_MODULE)) |
42 | static ssize_t | 38 | static ssize_t |
43 | nouveau_hwmon_show_temp(struct device *d, struct device_attribute *a, char *buf) | 39 | nouveau_hwmon_show_temp(struct device *d, struct device_attribute *a, char *buf) |
44 | { | 40 | { |
45 | struct drm_device *dev = dev_get_drvdata(d); | 41 | struct drm_device *dev = dev_get_drvdata(d); |
46 | struct nouveau_drm *drm = nouveau_drm(dev); | 42 | struct nouveau_drm *drm = nouveau_drm(dev); |
47 | struct nouveau_therm *therm = nouveau_therm(drm->device); | 43 | struct nouveau_therm *therm = nvkm_therm(&drm->device); |
48 | int temp = therm->temp_get(therm); | 44 | int temp = therm->temp_get(therm); |
49 | 45 | ||
50 | if (temp < 0) | 46 | if (temp < 0) |
@@ -70,7 +66,7 @@ nouveau_hwmon_temp1_auto_point1_temp(struct device *d, | |||
70 | { | 66 | { |
71 | struct drm_device *dev = dev_get_drvdata(d); | 67 | struct drm_device *dev = dev_get_drvdata(d); |
72 | struct nouveau_drm *drm = nouveau_drm(dev); | 68 | struct nouveau_drm *drm = nouveau_drm(dev); |
73 | struct nouveau_therm *therm = nouveau_therm(drm->device); | 69 | struct nouveau_therm *therm = nvkm_therm(&drm->device); |
74 | 70 | ||
75 | return snprintf(buf, PAGE_SIZE, "%d\n", | 71 | return snprintf(buf, PAGE_SIZE, "%d\n", |
76 | therm->attr_get(therm, NOUVEAU_THERM_ATTR_THRS_FAN_BOOST) * 1000); | 72 | therm->attr_get(therm, NOUVEAU_THERM_ATTR_THRS_FAN_BOOST) * 1000); |
@@ -82,7 +78,7 @@ nouveau_hwmon_set_temp1_auto_point1_temp(struct device *d, | |||
82 | { | 78 | { |
83 | struct drm_device *dev = dev_get_drvdata(d); | 79 | struct drm_device *dev = dev_get_drvdata(d); |
84 | struct nouveau_drm *drm = nouveau_drm(dev); | 80 | struct nouveau_drm *drm = nouveau_drm(dev); |
85 | struct nouveau_therm *therm = nouveau_therm(drm->device); | 81 | struct nouveau_therm *therm = nvkm_therm(&drm->device); |
86 | long value; | 82 | long value; |
87 | 83 | ||
88 | if (kstrtol(buf, 10, &value) == -EINVAL) | 84 | if (kstrtol(buf, 10, &value) == -EINVAL) |
@@ -103,7 +99,7 @@ nouveau_hwmon_temp1_auto_point1_temp_hyst(struct device *d, | |||
103 | { | 99 | { |
104 | struct drm_device *dev = dev_get_drvdata(d); | 100 | struct drm_device *dev = dev_get_drvdata(d); |
105 | struct nouveau_drm *drm = nouveau_drm(dev); | 101 | struct nouveau_drm *drm = nouveau_drm(dev); |
106 | struct nouveau_therm *therm = nouveau_therm(drm->device); | 102 | struct nouveau_therm *therm = nvkm_therm(&drm->device); |
107 | 103 | ||
108 | return snprintf(buf, PAGE_SIZE, "%d\n", | 104 | return snprintf(buf, PAGE_SIZE, "%d\n", |
109 | therm->attr_get(therm, NOUVEAU_THERM_ATTR_THRS_FAN_BOOST_HYST) * 1000); | 105 | therm->attr_get(therm, NOUVEAU_THERM_ATTR_THRS_FAN_BOOST_HYST) * 1000); |
@@ -115,7 +111,7 @@ nouveau_hwmon_set_temp1_auto_point1_temp_hyst(struct device *d, | |||
115 | { | 111 | { |
116 | struct drm_device *dev = dev_get_drvdata(d); | 112 | struct drm_device *dev = dev_get_drvdata(d); |
117 | struct nouveau_drm *drm = nouveau_drm(dev); | 113 | struct nouveau_drm *drm = nouveau_drm(dev); |
118 | struct nouveau_therm *therm = nouveau_therm(drm->device); | 114 | struct nouveau_therm *therm = nvkm_therm(&drm->device); |
119 | long value; | 115 | long value; |
120 | 116 | ||
121 | if (kstrtol(buf, 10, &value) == -EINVAL) | 117 | if (kstrtol(buf, 10, &value) == -EINVAL) |
@@ -135,7 +131,7 @@ nouveau_hwmon_max_temp(struct device *d, struct device_attribute *a, char *buf) | |||
135 | { | 131 | { |
136 | struct drm_device *dev = dev_get_drvdata(d); | 132 | struct drm_device *dev = dev_get_drvdata(d); |
137 | struct nouveau_drm *drm = nouveau_drm(dev); | 133 | struct nouveau_drm *drm = nouveau_drm(dev); |
138 | struct nouveau_therm *therm = nouveau_therm(drm->device); | 134 | struct nouveau_therm *therm = nvkm_therm(&drm->device); |
139 | 135 | ||
140 | return snprintf(buf, PAGE_SIZE, "%d\n", | 136 | return snprintf(buf, PAGE_SIZE, "%d\n", |
141 | therm->attr_get(therm, NOUVEAU_THERM_ATTR_THRS_DOWN_CLK) * 1000); | 137 | therm->attr_get(therm, NOUVEAU_THERM_ATTR_THRS_DOWN_CLK) * 1000); |
@@ -146,7 +142,7 @@ nouveau_hwmon_set_max_temp(struct device *d, struct device_attribute *a, | |||
146 | { | 142 | { |
147 | struct drm_device *dev = dev_get_drvdata(d); | 143 | struct drm_device *dev = dev_get_drvdata(d); |
148 | struct nouveau_drm *drm = nouveau_drm(dev); | 144 | struct nouveau_drm *drm = nouveau_drm(dev); |
149 | struct nouveau_therm *therm = nouveau_therm(drm->device); | 145 | struct nouveau_therm *therm = nvkm_therm(&drm->device); |
150 | long value; | 146 | long value; |
151 | 147 | ||
152 | if (kstrtol(buf, 10, &value) == -EINVAL) | 148 | if (kstrtol(buf, 10, &value) == -EINVAL) |
@@ -166,7 +162,7 @@ nouveau_hwmon_max_temp_hyst(struct device *d, struct device_attribute *a, | |||
166 | { | 162 | { |
167 | struct drm_device *dev = dev_get_drvdata(d); | 163 | struct drm_device *dev = dev_get_drvdata(d); |
168 | struct nouveau_drm *drm = nouveau_drm(dev); | 164 | struct nouveau_drm *drm = nouveau_drm(dev); |
169 | struct nouveau_therm *therm = nouveau_therm(drm->device); | 165 | struct nouveau_therm *therm = nvkm_therm(&drm->device); |
170 | 166 | ||
171 | return snprintf(buf, PAGE_SIZE, "%d\n", | 167 | return snprintf(buf, PAGE_SIZE, "%d\n", |
172 | therm->attr_get(therm, NOUVEAU_THERM_ATTR_THRS_DOWN_CLK_HYST) * 1000); | 168 | therm->attr_get(therm, NOUVEAU_THERM_ATTR_THRS_DOWN_CLK_HYST) * 1000); |
@@ -177,7 +173,7 @@ nouveau_hwmon_set_max_temp_hyst(struct device *d, struct device_attribute *a, | |||
177 | { | 173 | { |
178 | struct drm_device *dev = dev_get_drvdata(d); | 174 | struct drm_device *dev = dev_get_drvdata(d); |
179 | struct nouveau_drm *drm = nouveau_drm(dev); | 175 | struct nouveau_drm *drm = nouveau_drm(dev); |
180 | struct nouveau_therm *therm = nouveau_therm(drm->device); | 176 | struct nouveau_therm *therm = nvkm_therm(&drm->device); |
181 | long value; | 177 | long value; |
182 | 178 | ||
183 | if (kstrtol(buf, 10, &value) == -EINVAL) | 179 | if (kstrtol(buf, 10, &value) == -EINVAL) |
@@ -198,7 +194,7 @@ nouveau_hwmon_critical_temp(struct device *d, struct device_attribute *a, | |||
198 | { | 194 | { |
199 | struct drm_device *dev = dev_get_drvdata(d); | 195 | struct drm_device *dev = dev_get_drvdata(d); |
200 | struct nouveau_drm *drm = nouveau_drm(dev); | 196 | struct nouveau_drm *drm = nouveau_drm(dev); |
201 | struct nouveau_therm *therm = nouveau_therm(drm->device); | 197 | struct nouveau_therm *therm = nvkm_therm(&drm->device); |
202 | 198 | ||
203 | return snprintf(buf, PAGE_SIZE, "%d\n", | 199 | return snprintf(buf, PAGE_SIZE, "%d\n", |
204 | therm->attr_get(therm, NOUVEAU_THERM_ATTR_THRS_CRITICAL) * 1000); | 200 | therm->attr_get(therm, NOUVEAU_THERM_ATTR_THRS_CRITICAL) * 1000); |
@@ -210,7 +206,7 @@ nouveau_hwmon_set_critical_temp(struct device *d, struct device_attribute *a, | |||
210 | { | 206 | { |
211 | struct drm_device *dev = dev_get_drvdata(d); | 207 | struct drm_device *dev = dev_get_drvdata(d); |
212 | struct nouveau_drm *drm = nouveau_drm(dev); | 208 | struct nouveau_drm *drm = nouveau_drm(dev); |
213 | struct nouveau_therm *therm = nouveau_therm(drm->device); | 209 | struct nouveau_therm *therm = nvkm_therm(&drm->device); |
214 | long value; | 210 | long value; |
215 | 211 | ||
216 | if (kstrtol(buf, 10, &value) == -EINVAL) | 212 | if (kstrtol(buf, 10, &value) == -EINVAL) |
@@ -231,7 +227,7 @@ nouveau_hwmon_critical_temp_hyst(struct device *d, struct device_attribute *a, | |||
231 | { | 227 | { |
232 | struct drm_device *dev = dev_get_drvdata(d); | 228 | struct drm_device *dev = dev_get_drvdata(d); |
233 | struct nouveau_drm *drm = nouveau_drm(dev); | 229 | struct nouveau_drm *drm = nouveau_drm(dev); |
234 | struct nouveau_therm *therm = nouveau_therm(drm->device); | 230 | struct nouveau_therm *therm = nvkm_therm(&drm->device); |
235 | 231 | ||
236 | return snprintf(buf, PAGE_SIZE, "%d\n", | 232 | return snprintf(buf, PAGE_SIZE, "%d\n", |
237 | therm->attr_get(therm, NOUVEAU_THERM_ATTR_THRS_CRITICAL_HYST) * 1000); | 233 | therm->attr_get(therm, NOUVEAU_THERM_ATTR_THRS_CRITICAL_HYST) * 1000); |
@@ -244,7 +240,7 @@ nouveau_hwmon_set_critical_temp_hyst(struct device *d, | |||
244 | { | 240 | { |
245 | struct drm_device *dev = dev_get_drvdata(d); | 241 | struct drm_device *dev = dev_get_drvdata(d); |
246 | struct nouveau_drm *drm = nouveau_drm(dev); | 242 | struct nouveau_drm *drm = nouveau_drm(dev); |
247 | struct nouveau_therm *therm = nouveau_therm(drm->device); | 243 | struct nouveau_therm *therm = nvkm_therm(&drm->device); |
248 | long value; | 244 | long value; |
249 | 245 | ||
250 | if (kstrtol(buf, 10, &value) == -EINVAL) | 246 | if (kstrtol(buf, 10, &value) == -EINVAL) |
@@ -264,7 +260,7 @@ nouveau_hwmon_emergency_temp(struct device *d, struct device_attribute *a, | |||
264 | { | 260 | { |
265 | struct drm_device *dev = dev_get_drvdata(d); | 261 | struct drm_device *dev = dev_get_drvdata(d); |
266 | struct nouveau_drm *drm = nouveau_drm(dev); | 262 | struct nouveau_drm *drm = nouveau_drm(dev); |
267 | struct nouveau_therm *therm = nouveau_therm(drm->device); | 263 | struct nouveau_therm *therm = nvkm_therm(&drm->device); |
268 | 264 | ||
269 | return snprintf(buf, PAGE_SIZE, "%d\n", | 265 | return snprintf(buf, PAGE_SIZE, "%d\n", |
270 | therm->attr_get(therm, NOUVEAU_THERM_ATTR_THRS_SHUTDOWN) * 1000); | 266 | therm->attr_get(therm, NOUVEAU_THERM_ATTR_THRS_SHUTDOWN) * 1000); |
@@ -276,7 +272,7 @@ nouveau_hwmon_set_emergency_temp(struct device *d, struct device_attribute *a, | |||
276 | { | 272 | { |
277 | struct drm_device *dev = dev_get_drvdata(d); | 273 | struct drm_device *dev = dev_get_drvdata(d); |
278 | struct nouveau_drm *drm = nouveau_drm(dev); | 274 | struct nouveau_drm *drm = nouveau_drm(dev); |
279 | struct nouveau_therm *therm = nouveau_therm(drm->device); | 275 | struct nouveau_therm *therm = nvkm_therm(&drm->device); |
280 | long value; | 276 | long value; |
281 | 277 | ||
282 | if (kstrtol(buf, 10, &value) == -EINVAL) | 278 | if (kstrtol(buf, 10, &value) == -EINVAL) |
@@ -297,7 +293,7 @@ nouveau_hwmon_emergency_temp_hyst(struct device *d, struct device_attribute *a, | |||
297 | { | 293 | { |
298 | struct drm_device *dev = dev_get_drvdata(d); | 294 | struct drm_device *dev = dev_get_drvdata(d); |
299 | struct nouveau_drm *drm = nouveau_drm(dev); | 295 | struct nouveau_drm *drm = nouveau_drm(dev); |
300 | struct nouveau_therm *therm = nouveau_therm(drm->device); | 296 | struct nouveau_therm *therm = nvkm_therm(&drm->device); |
301 | 297 | ||
302 | return snprintf(buf, PAGE_SIZE, "%d\n", | 298 | return snprintf(buf, PAGE_SIZE, "%d\n", |
303 | therm->attr_get(therm, NOUVEAU_THERM_ATTR_THRS_SHUTDOWN_HYST) * 1000); | 299 | therm->attr_get(therm, NOUVEAU_THERM_ATTR_THRS_SHUTDOWN_HYST) * 1000); |
@@ -310,7 +306,7 @@ nouveau_hwmon_set_emergency_temp_hyst(struct device *d, | |||
310 | { | 306 | { |
311 | struct drm_device *dev = dev_get_drvdata(d); | 307 | struct drm_device *dev = dev_get_drvdata(d); |
312 | struct nouveau_drm *drm = nouveau_drm(dev); | 308 | struct nouveau_drm *drm = nouveau_drm(dev); |
313 | struct nouveau_therm *therm = nouveau_therm(drm->device); | 309 | struct nouveau_therm *therm = nvkm_therm(&drm->device); |
314 | long value; | 310 | long value; |
315 | 311 | ||
316 | if (kstrtol(buf, 10, &value) == -EINVAL) | 312 | if (kstrtol(buf, 10, &value) == -EINVAL) |
@@ -350,7 +346,7 @@ nouveau_hwmon_show_fan1_input(struct device *d, struct device_attribute *attr, | |||
350 | { | 346 | { |
351 | struct drm_device *dev = dev_get_drvdata(d); | 347 | struct drm_device *dev = dev_get_drvdata(d); |
352 | struct nouveau_drm *drm = nouveau_drm(dev); | 348 | struct nouveau_drm *drm = nouveau_drm(dev); |
353 | struct nouveau_therm *therm = nouveau_therm(drm->device); | 349 | struct nouveau_therm *therm = nvkm_therm(&drm->device); |
354 | 350 | ||
355 | return snprintf(buf, PAGE_SIZE, "%d\n", therm->fan_sense(therm)); | 351 | return snprintf(buf, PAGE_SIZE, "%d\n", therm->fan_sense(therm)); |
356 | } | 352 | } |
@@ -363,7 +359,7 @@ nouveau_hwmon_get_pwm1_enable(struct device *d, | |||
363 | { | 359 | { |
364 | struct drm_device *dev = dev_get_drvdata(d); | 360 | struct drm_device *dev = dev_get_drvdata(d); |
365 | struct nouveau_drm *drm = nouveau_drm(dev); | 361 | struct nouveau_drm *drm = nouveau_drm(dev); |
366 | struct nouveau_therm *therm = nouveau_therm(drm->device); | 362 | struct nouveau_therm *therm = nvkm_therm(&drm->device); |
367 | int ret; | 363 | int ret; |
368 | 364 | ||
369 | ret = therm->attr_get(therm, NOUVEAU_THERM_ATTR_FAN_MODE); | 365 | ret = therm->attr_get(therm, NOUVEAU_THERM_ATTR_FAN_MODE); |
@@ -379,7 +375,7 @@ nouveau_hwmon_set_pwm1_enable(struct device *d, struct device_attribute *a, | |||
379 | { | 375 | { |
380 | struct drm_device *dev = dev_get_drvdata(d); | 376 | struct drm_device *dev = dev_get_drvdata(d); |
381 | struct nouveau_drm *drm = nouveau_drm(dev); | 377 | struct nouveau_drm *drm = nouveau_drm(dev); |
382 | struct nouveau_therm *therm = nouveau_therm(drm->device); | 378 | struct nouveau_therm *therm = nvkm_therm(&drm->device); |
383 | long value; | 379 | long value; |
384 | int ret; | 380 | int ret; |
385 | 381 | ||
@@ -402,7 +398,7 @@ nouveau_hwmon_get_pwm1(struct device *d, struct device_attribute *a, char *buf) | |||
402 | { | 398 | { |
403 | struct drm_device *dev = dev_get_drvdata(d); | 399 | struct drm_device *dev = dev_get_drvdata(d); |
404 | struct nouveau_drm *drm = nouveau_drm(dev); | 400 | struct nouveau_drm *drm = nouveau_drm(dev); |
405 | struct nouveau_therm *therm = nouveau_therm(drm->device); | 401 | struct nouveau_therm *therm = nvkm_therm(&drm->device); |
406 | int ret; | 402 | int ret; |
407 | 403 | ||
408 | ret = therm->fan_get(therm); | 404 | ret = therm->fan_get(therm); |
@@ -418,7 +414,7 @@ nouveau_hwmon_set_pwm1(struct device *d, struct device_attribute *a, | |||
418 | { | 414 | { |
419 | struct drm_device *dev = dev_get_drvdata(d); | 415 | struct drm_device *dev = dev_get_drvdata(d); |
420 | struct nouveau_drm *drm = nouveau_drm(dev); | 416 | struct nouveau_drm *drm = nouveau_drm(dev); |
421 | struct nouveau_therm *therm = nouveau_therm(drm->device); | 417 | struct nouveau_therm *therm = nvkm_therm(&drm->device); |
422 | int ret = -ENODEV; | 418 | int ret = -ENODEV; |
423 | long value; | 419 | long value; |
424 | 420 | ||
@@ -442,7 +438,7 @@ nouveau_hwmon_get_pwm1_min(struct device *d, | |||
442 | { | 438 | { |
443 | struct drm_device *dev = dev_get_drvdata(d); | 439 | struct drm_device *dev = dev_get_drvdata(d); |
444 | struct nouveau_drm *drm = nouveau_drm(dev); | 440 | struct nouveau_drm *drm = nouveau_drm(dev); |
445 | struct nouveau_therm *therm = nouveau_therm(drm->device); | 441 | struct nouveau_therm *therm = nvkm_therm(&drm->device); |
446 | int ret; | 442 | int ret; |
447 | 443 | ||
448 | ret = therm->attr_get(therm, NOUVEAU_THERM_ATTR_FAN_MIN_DUTY); | 444 | ret = therm->attr_get(therm, NOUVEAU_THERM_ATTR_FAN_MIN_DUTY); |
@@ -458,7 +454,7 @@ nouveau_hwmon_set_pwm1_min(struct device *d, struct device_attribute *a, | |||
458 | { | 454 | { |
459 | struct drm_device *dev = dev_get_drvdata(d); | 455 | struct drm_device *dev = dev_get_drvdata(d); |
460 | struct nouveau_drm *drm = nouveau_drm(dev); | 456 | struct nouveau_drm *drm = nouveau_drm(dev); |
461 | struct nouveau_therm *therm = nouveau_therm(drm->device); | 457 | struct nouveau_therm *therm = nvkm_therm(&drm->device); |
462 | long value; | 458 | long value; |
463 | int ret; | 459 | int ret; |
464 | 460 | ||
@@ -482,7 +478,7 @@ nouveau_hwmon_get_pwm1_max(struct device *d, | |||
482 | { | 478 | { |
483 | struct drm_device *dev = dev_get_drvdata(d); | 479 | struct drm_device *dev = dev_get_drvdata(d); |
484 | struct nouveau_drm *drm = nouveau_drm(dev); | 480 | struct nouveau_drm *drm = nouveau_drm(dev); |
485 | struct nouveau_therm *therm = nouveau_therm(drm->device); | 481 | struct nouveau_therm *therm = nvkm_therm(&drm->device); |
486 | int ret; | 482 | int ret; |
487 | 483 | ||
488 | ret = therm->attr_get(therm, NOUVEAU_THERM_ATTR_FAN_MAX_DUTY); | 484 | ret = therm->attr_get(therm, NOUVEAU_THERM_ATTR_FAN_MAX_DUTY); |
@@ -498,7 +494,7 @@ nouveau_hwmon_set_pwm1_max(struct device *d, struct device_attribute *a, | |||
498 | { | 494 | { |
499 | struct drm_device *dev = dev_get_drvdata(d); | 495 | struct drm_device *dev = dev_get_drvdata(d); |
500 | struct nouveau_drm *drm = nouveau_drm(dev); | 496 | struct nouveau_drm *drm = nouveau_drm(dev); |
501 | struct nouveau_therm *therm = nouveau_therm(drm->device); | 497 | struct nouveau_therm *therm = nvkm_therm(&drm->device); |
502 | long value; | 498 | long value; |
503 | int ret; | 499 | int ret; |
504 | 500 | ||
@@ -565,7 +561,7 @@ nouveau_hwmon_init(struct drm_device *dev) | |||
565 | { | 561 | { |
566 | #if defined(CONFIG_HWMON) || (defined(MODULE) && defined(CONFIG_HWMON_MODULE)) | 562 | #if defined(CONFIG_HWMON) || (defined(MODULE) && defined(CONFIG_HWMON_MODULE)) |
567 | struct nouveau_drm *drm = nouveau_drm(dev); | 563 | struct nouveau_drm *drm = nouveau_drm(dev); |
568 | struct nouveau_therm *therm = nouveau_therm(drm->device); | 564 | struct nouveau_therm *therm = nvkm_therm(&drm->device); |
569 | struct nouveau_hwmon *hwmon; | 565 | struct nouveau_hwmon *hwmon; |
570 | struct device *hwmon_dev; | 566 | struct device *hwmon_dev; |
571 | int ret = 0; | 567 | int ret = 0; |
diff --git a/drivers/gpu/drm/nouveau/nouveau_nvif.c b/drivers/gpu/drm/nouveau/nouveau_nvif.c new file mode 100644 index 000000000000..47ca88623753 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nouveau_nvif.c | |||
@@ -0,0 +1,136 @@ | |||
1 | /* | ||
2 | * Copyright 2014 Red Hat Inc. | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice shall be included in | ||
12 | * all copies or substantial portions of the Software. | ||
13 | * | ||
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
21 | * | ||
22 | * Authors: Ben Skeggs <bskeggs@redhat.com> | ||
23 | */ | ||
24 | |||
25 | /******************************************************************************* | ||
26 | * NVIF client driver - NVKM directly linked | ||
27 | ******************************************************************************/ | ||
28 | |||
29 | #include <core/client.h> | ||
30 | #include <core/notify.h> | ||
31 | #include <core/ioctl.h> | ||
32 | |||
33 | #include <nvif/client.h> | ||
34 | #include <nvif/driver.h> | ||
35 | #include <nvif/notify.h> | ||
36 | #include <nvif/event.h> | ||
37 | #include <nvif/ioctl.h> | ||
38 | |||
39 | #include "nouveau_drm.h" | ||
40 | #include "nouveau_usif.h" | ||
41 | |||
42 | static void | ||
43 | nvkm_client_unmap(void *priv, void *ptr, u32 size) | ||
44 | { | ||
45 | iounmap(ptr); | ||
46 | } | ||
47 | |||
48 | static void * | ||
49 | nvkm_client_map(void *priv, u64 handle, u32 size) | ||
50 | { | ||
51 | return ioremap(handle, size); | ||
52 | } | ||
53 | |||
54 | static int | ||
55 | nvkm_client_ioctl(void *priv, bool super, void *data, u32 size, void **hack) | ||
56 | { | ||
57 | return nvkm_ioctl(priv, super, data, size, hack); | ||
58 | } | ||
59 | |||
60 | static int | ||
61 | nvkm_client_resume(void *priv) | ||
62 | { | ||
63 | return nouveau_client_init(priv); | ||
64 | } | ||
65 | |||
66 | static int | ||
67 | nvkm_client_suspend(void *priv) | ||
68 | { | ||
69 | return nouveau_client_fini(priv, true); | ||
70 | } | ||
71 | |||
72 | static void | ||
73 | nvkm_client_fini(void *priv) | ||
74 | { | ||
75 | struct nouveau_object *client = priv; | ||
76 | nouveau_client_fini(nv_client(client), false); | ||
77 | atomic_set(&client->refcount, 1); | ||
78 | nouveau_object_ref(NULL, &client); | ||
79 | } | ||
80 | |||
81 | static int | ||
82 | nvkm_client_ntfy(const void *header, u32 length, const void *data, u32 size) | ||
83 | { | ||
84 | const union { | ||
85 | struct nvif_notify_req_v0 v0; | ||
86 | } *args = header; | ||
87 | u8 route; | ||
88 | |||
89 | if (length == sizeof(args->v0) && args->v0.version == 0) { | ||
90 | route = args->v0.route; | ||
91 | } else { | ||
92 | WARN_ON(1); | ||
93 | return NVKM_NOTIFY_DROP; | ||
94 | } | ||
95 | |||
96 | switch (route) { | ||
97 | case NVDRM_NOTIFY_NVIF: | ||
98 | return nvif_notify(header, length, data, size); | ||
99 | case NVDRM_NOTIFY_USIF: | ||
100 | return usif_notify(header, length, data, size); | ||
101 | default: | ||
102 | WARN_ON(1); | ||
103 | break; | ||
104 | } | ||
105 | |||
106 | return NVKM_NOTIFY_DROP; | ||
107 | } | ||
108 | |||
109 | static int | ||
110 | nvkm_client_init(const char *name, u64 device, const char *cfg, | ||
111 | const char *dbg, void **ppriv) | ||
112 | { | ||
113 | struct nouveau_client *client; | ||
114 | int ret; | ||
115 | |||
116 | ret = nouveau_client_create(name, device, cfg, dbg, &client); | ||
117 | *ppriv = client; | ||
118 | if (ret) | ||
119 | return ret; | ||
120 | |||
121 | client->ntfy = nvkm_client_ntfy; | ||
122 | return 0; | ||
123 | } | ||
124 | |||
125 | const struct nvif_driver | ||
126 | nvif_driver_nvkm = { | ||
127 | .name = "nvkm", | ||
128 | .init = nvkm_client_init, | ||
129 | .fini = nvkm_client_fini, | ||
130 | .suspend = nvkm_client_suspend, | ||
131 | .resume = nvkm_client_resume, | ||
132 | .ioctl = nvkm_client_ioctl, | ||
133 | .map = nvkm_client_map, | ||
134 | .unmap = nvkm_client_unmap, | ||
135 | .keep = false, | ||
136 | }; | ||
diff --git a/drivers/gpu/drm/nouveau/nouveau_platform.c b/drivers/gpu/drm/nouveau/nouveau_platform.c new file mode 100644 index 000000000000..0ffeb50d0088 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nouveau_platform.c | |||
@@ -0,0 +1,182 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2014, NVIDIA CORPORATION. All rights reserved. | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice shall be included in | ||
12 | * all copies or substantial portions of the Software. | ||
13 | * | ||
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
17 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
18 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | ||
19 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER | ||
20 | * DEALINGS IN THE SOFTWARE. | ||
21 | */ | ||
22 | |||
23 | #include <linux/clk.h> | ||
24 | #include <linux/io.h> | ||
25 | #include <linux/module.h> | ||
26 | #include <linux/platform_device.h> | ||
27 | #include <linux/of.h> | ||
28 | #include <linux/reset.h> | ||
29 | #include <linux/regulator/consumer.h> | ||
30 | #include <soc/tegra/pmc.h> | ||
31 | |||
32 | #include "nouveau_drm.h" | ||
33 | #include "nouveau_platform.h" | ||
34 | |||
35 | static int nouveau_platform_power_up(struct nouveau_platform_gpu *gpu) | ||
36 | { | ||
37 | int err; | ||
38 | |||
39 | err = regulator_enable(gpu->vdd); | ||
40 | if (err) | ||
41 | goto err_power; | ||
42 | |||
43 | err = clk_prepare_enable(gpu->clk); | ||
44 | if (err) | ||
45 | goto err_clk; | ||
46 | err = clk_prepare_enable(gpu->clk_pwr); | ||
47 | if (err) | ||
48 | goto err_clk_pwr; | ||
49 | clk_set_rate(gpu->clk_pwr, 204000000); | ||
50 | udelay(10); | ||
51 | |||
52 | reset_control_assert(gpu->rst); | ||
53 | udelay(10); | ||
54 | |||
55 | err = tegra_powergate_remove_clamping(TEGRA_POWERGATE_3D); | ||
56 | if (err) | ||
57 | goto err_clamp; | ||
58 | udelay(10); | ||
59 | |||
60 | reset_control_deassert(gpu->rst); | ||
61 | udelay(10); | ||
62 | |||
63 | return 0; | ||
64 | |||
65 | err_clamp: | ||
66 | clk_disable_unprepare(gpu->clk_pwr); | ||
67 | err_clk_pwr: | ||
68 | clk_disable_unprepare(gpu->clk); | ||
69 | err_clk: | ||
70 | regulator_disable(gpu->vdd); | ||
71 | err_power: | ||
72 | return err; | ||
73 | } | ||
74 | |||
75 | static int nouveau_platform_power_down(struct nouveau_platform_gpu *gpu) | ||
76 | { | ||
77 | int err; | ||
78 | |||
79 | reset_control_assert(gpu->rst); | ||
80 | udelay(10); | ||
81 | |||
82 | clk_disable_unprepare(gpu->clk_pwr); | ||
83 | clk_disable_unprepare(gpu->clk); | ||
84 | udelay(10); | ||
85 | |||
86 | err = regulator_disable(gpu->vdd); | ||
87 | if (err) | ||
88 | return err; | ||
89 | |||
90 | return 0; | ||
91 | } | ||
92 | |||
93 | static int nouveau_platform_probe(struct platform_device *pdev) | ||
94 | { | ||
95 | struct nouveau_platform_gpu *gpu; | ||
96 | struct nouveau_platform_device *device; | ||
97 | struct drm_device *drm; | ||
98 | int err; | ||
99 | |||
100 | gpu = devm_kzalloc(&pdev->dev, sizeof(*gpu), GFP_KERNEL); | ||
101 | if (!gpu) | ||
102 | return -ENOMEM; | ||
103 | |||
104 | gpu->vdd = devm_regulator_get(&pdev->dev, "vdd"); | ||
105 | if (IS_ERR(gpu->vdd)) | ||
106 | return PTR_ERR(gpu->vdd); | ||
107 | |||
108 | gpu->rst = devm_reset_control_get(&pdev->dev, "gpu"); | ||
109 | if (IS_ERR(gpu->rst)) | ||
110 | return PTR_ERR(gpu->rst); | ||
111 | |||
112 | gpu->clk = devm_clk_get(&pdev->dev, "gpu"); | ||
113 | if (IS_ERR(gpu->clk)) | ||
114 | return PTR_ERR(gpu->clk); | ||
115 | |||
116 | gpu->clk_pwr = devm_clk_get(&pdev->dev, "pwr"); | ||
117 | if (IS_ERR(gpu->clk_pwr)) | ||
118 | return PTR_ERR(gpu->clk_pwr); | ||
119 | |||
120 | err = nouveau_platform_power_up(gpu); | ||
121 | if (err) | ||
122 | return err; | ||
123 | |||
124 | drm = nouveau_platform_device_create(pdev, &device); | ||
125 | if (IS_ERR(drm)) { | ||
126 | err = PTR_ERR(drm); | ||
127 | goto power_down; | ||
128 | } | ||
129 | |||
130 | device->gpu = gpu; | ||
131 | |||
132 | err = drm_dev_register(drm, 0); | ||
133 | if (err < 0) | ||
134 | goto err_unref; | ||
135 | |||
136 | return 0; | ||
137 | |||
138 | err_unref: | ||
139 | drm_dev_unref(drm); | ||
140 | |||
141 | return 0; | ||
142 | |||
143 | power_down: | ||
144 | nouveau_platform_power_down(gpu); | ||
145 | |||
146 | return err; | ||
147 | } | ||
148 | |||
149 | static int nouveau_platform_remove(struct platform_device *pdev) | ||
150 | { | ||
151 | struct drm_device *drm_dev = platform_get_drvdata(pdev); | ||
152 | struct nouveau_device *device = nouveau_dev(drm_dev); | ||
153 | struct nouveau_platform_gpu *gpu = nv_device_to_platform(device)->gpu; | ||
154 | |||
155 | nouveau_drm_device_remove(drm_dev); | ||
156 | |||
157 | return nouveau_platform_power_down(gpu); | ||
158 | } | ||
159 | |||
160 | #if IS_ENABLED(CONFIG_OF) | ||
161 | static const struct of_device_id nouveau_platform_match[] = { | ||
162 | { .compatible = "nvidia,gk20a" }, | ||
163 | { } | ||
164 | }; | ||
165 | |||
166 | MODULE_DEVICE_TABLE(of, nouveau_platform_match); | ||
167 | #endif | ||
168 | |||
169 | struct platform_driver nouveau_platform_driver = { | ||
170 | .driver = { | ||
171 | .name = "nouveau", | ||
172 | .of_match_table = of_match_ptr(nouveau_platform_match), | ||
173 | }, | ||
174 | .probe = nouveau_platform_probe, | ||
175 | .remove = nouveau_platform_remove, | ||
176 | }; | ||
177 | |||
178 | module_platform_driver(nouveau_platform_driver); | ||
179 | |||
180 | MODULE_AUTHOR(DRIVER_AUTHOR); | ||
181 | MODULE_DESCRIPTION(DRIVER_DESC); | ||
182 | MODULE_LICENSE("GPL and additional rights"); | ||
diff --git a/drivers/gpu/drm/nouveau/nouveau_platform.h b/drivers/gpu/drm/nouveau/nouveau_platform.h new file mode 100644 index 000000000000..91f66504900e --- /dev/null +++ b/drivers/gpu/drm/nouveau/nouveau_platform.h | |||
@@ -0,0 +1,49 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2014, NVIDIA CORPORATION. All rights reserved. | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice shall be included in | ||
12 | * all copies or substantial portions of the Software. | ||
13 | * | ||
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
17 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
18 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | ||
19 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER | ||
20 | * DEALINGS IN THE SOFTWARE. | ||
21 | */ | ||
22 | |||
23 | #ifndef __NOUVEAU_PLATFORM_H__ | ||
24 | #define __NOUVEAU_PLATFORM_H__ | ||
25 | |||
26 | #include "core/device.h" | ||
27 | |||
28 | struct reset_control; | ||
29 | struct clk; | ||
30 | struct regulator; | ||
31 | |||
32 | struct nouveau_platform_gpu { | ||
33 | struct reset_control *rst; | ||
34 | struct clk *clk; | ||
35 | struct clk *clk_pwr; | ||
36 | |||
37 | struct regulator *vdd; | ||
38 | }; | ||
39 | |||
40 | struct nouveau_platform_device { | ||
41 | struct nouveau_device device; | ||
42 | |||
43 | struct nouveau_platform_gpu *gpu; | ||
44 | }; | ||
45 | |||
46 | #define nv_device_to_platform(d) \ | ||
47 | container_of(d, struct nouveau_platform_device, device) | ||
48 | |||
49 | #endif | ||
diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c index a4d22e5eb176..01707e7deaf5 100644 --- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c +++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c | |||
@@ -1,8 +1,6 @@ | |||
1 | #include <linux/pagemap.h> | 1 | #include <linux/pagemap.h> |
2 | #include <linux/slab.h> | 2 | #include <linux/slab.h> |
3 | 3 | ||
4 | #include <subdev/fb.h> | ||
5 | |||
6 | #include "nouveau_drm.h" | 4 | #include "nouveau_drm.h" |
7 | #include "nouveau_ttm.h" | 5 | #include "nouveau_ttm.h" |
8 | 6 | ||
@@ -104,7 +102,7 @@ nouveau_sgdma_create_ttm(struct ttm_bo_device *bdev, | |||
104 | return NULL; | 102 | return NULL; |
105 | 103 | ||
106 | nvbe->dev = drm->dev; | 104 | nvbe->dev = drm->dev; |
107 | if (nv_device(drm->device)->card_type < NV_50) | 105 | if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA) |
108 | nvbe->ttm.ttm.func = &nv04_sgdma_backend; | 106 | nvbe->ttm.ttm.func = &nv04_sgdma_backend; |
109 | else | 107 | else |
110 | nvbe->ttm.ttm.func = &nv50_sgdma_backend; | 108 | nvbe->ttm.ttm.func = &nv50_sgdma_backend; |
diff --git a/drivers/gpu/drm/nouveau/nouveau_sysfs.c b/drivers/gpu/drm/nouveau/nouveau_sysfs.c index 75dda2b07176..3c6962d15b26 100644 --- a/drivers/gpu/drm/nouveau/nouveau_sysfs.c +++ b/drivers/gpu/drm/nouveau/nouveau_sysfs.c | |||
@@ -22,10 +22,15 @@ | |||
22 | * Authors: Ben Skeggs <bskeggs@redhat.com> | 22 | * Authors: Ben Skeggs <bskeggs@redhat.com> |
23 | */ | 23 | */ |
24 | 24 | ||
25 | #include <nvif/os.h> | ||
26 | #include <nvif/class.h> | ||
27 | #include <nvif/ioctl.h> | ||
28 | |||
25 | #include "nouveau_sysfs.h" | 29 | #include "nouveau_sysfs.h" |
26 | 30 | ||
27 | #include <core/object.h> | 31 | MODULE_PARM_DESC(pstate, "enable sysfs pstate file, which will be moved in the future"); |
28 | #include <core/class.h> | 32 | static int nouveau_pstate; |
33 | module_param_named(pstate, nouveau_pstate, int, 0400); | ||
29 | 34 | ||
30 | static inline struct drm_device * | 35 | static inline struct drm_device * |
31 | drm_device(struct device *d) | 36 | drm_device(struct device *d) |
@@ -43,38 +48,42 @@ static ssize_t | |||
43 | nouveau_sysfs_pstate_get(struct device *d, struct device_attribute *a, char *b) | 48 | nouveau_sysfs_pstate_get(struct device *d, struct device_attribute *a, char *b) |
44 | { | 49 | { |
45 | struct nouveau_sysfs *sysfs = nouveau_sysfs(drm_device(d)); | 50 | struct nouveau_sysfs *sysfs = nouveau_sysfs(drm_device(d)); |
46 | struct nv_control_pstate_info info; | 51 | struct nvif_control_pstate_info_v0 info = {}; |
47 | size_t cnt = PAGE_SIZE; | 52 | size_t cnt = PAGE_SIZE; |
48 | char *buf = b; | 53 | char *buf = b; |
49 | int ret, i; | 54 | int ret, i; |
50 | 55 | ||
51 | ret = nv_exec(sysfs->ctrl, NV_CONTROL_PSTATE_INFO, &info, sizeof(info)); | 56 | ret = nvif_mthd(&sysfs->ctrl, NVIF_CONTROL_PSTATE_INFO, |
57 | &info, sizeof(info)); | ||
52 | if (ret) | 58 | if (ret) |
53 | return ret; | 59 | return ret; |
54 | 60 | ||
55 | for (i = 0; i < info.count + 1; i++) { | 61 | for (i = 0; i < info.count + 1; i++) { |
56 | const s32 state = i < info.count ? i : | 62 | const s32 state = i < info.count ? i : |
57 | NV_CONTROL_PSTATE_ATTR_STATE_CURRENT; | 63 | NVIF_CONTROL_PSTATE_ATTR_V0_STATE_CURRENT; |
58 | struct nv_control_pstate_attr attr = { | 64 | struct nvif_control_pstate_attr_v0 attr = { |
59 | .state = state, | 65 | .state = state, |
60 | .index = 0, | 66 | .index = 0, |
61 | }; | 67 | }; |
62 | 68 | ||
63 | ret = nv_exec(sysfs->ctrl, NV_CONTROL_PSTATE_ATTR, | 69 | ret = nvif_mthd(&sysfs->ctrl, NVIF_CONTROL_PSTATE_ATTR, |
64 | &attr, sizeof(attr)); | 70 | &attr, sizeof(attr)); |
65 | if (ret) | 71 | if (ret) |
66 | return ret; | 72 | return ret; |
67 | 73 | ||
68 | if (i < info.count) | 74 | if (i < info.count) |
69 | snappendf(buf, cnt, "%02x:", attr.state); | 75 | snappendf(buf, cnt, "%02x:", attr.state); |
70 | else | 76 | else |
71 | snappendf(buf, cnt, "--:"); | 77 | snappendf(buf, cnt, "%s:", info.pwrsrc == 0 ? "DC" : |
78 | info.pwrsrc == 1 ? "AC" : | ||
79 | "--"); | ||
72 | 80 | ||
73 | attr.index = 0; | 81 | attr.index = 0; |
74 | do { | 82 | do { |
75 | attr.state = state; | 83 | attr.state = state; |
76 | ret = nv_exec(sysfs->ctrl, NV_CONTROL_PSTATE_ATTR, | 84 | ret = nvif_mthd(&sysfs->ctrl, |
77 | &attr, sizeof(attr)); | 85 | NVIF_CONTROL_PSTATE_ATTR, |
86 | &attr, sizeof(attr)); | ||
78 | if (ret) | 87 | if (ret) |
79 | return ret; | 88 | return ret; |
80 | 89 | ||
@@ -84,9 +93,20 @@ nouveau_sysfs_pstate_get(struct device *d, struct device_attribute *a, char *b) | |||
84 | snappendf(buf, cnt, " %s", attr.unit); | 93 | snappendf(buf, cnt, " %s", attr.unit); |
85 | } while (attr.index); | 94 | } while (attr.index); |
86 | 95 | ||
87 | if ((state >= 0 && info.pstate == state) || | 96 | if (state >= 0) { |
88 | (state < 0 && info.ustate < 0)) | 97 | if (info.ustate_ac == state) |
89 | snappendf(buf, cnt, " *"); | 98 | snappendf(buf, cnt, " AC"); |
99 | if (info.ustate_dc == state) | ||
100 | snappendf(buf, cnt, " DC"); | ||
101 | if (info.pstate == state) | ||
102 | snappendf(buf, cnt, " *"); | ||
103 | } else { | ||
104 | if (info.ustate_ac < -1) | ||
105 | snappendf(buf, cnt, " AC"); | ||
106 | if (info.ustate_dc < -1) | ||
107 | snappendf(buf, cnt, " DC"); | ||
108 | } | ||
109 | |||
90 | snappendf(buf, cnt, "\n"); | 110 | snappendf(buf, cnt, "\n"); |
91 | } | 111 | } |
92 | 112 | ||
@@ -98,26 +118,36 @@ nouveau_sysfs_pstate_set(struct device *d, struct device_attribute *a, | |||
98 | const char *buf, size_t count) | 118 | const char *buf, size_t count) |
99 | { | 119 | { |
100 | struct nouveau_sysfs *sysfs = nouveau_sysfs(drm_device(d)); | 120 | struct nouveau_sysfs *sysfs = nouveau_sysfs(drm_device(d)); |
101 | struct nv_control_pstate_user args; | 121 | struct nvif_control_pstate_user_v0 args = { .pwrsrc = -EINVAL }; |
102 | long value, ret; | 122 | long value, ret; |
103 | char *tmp; | 123 | char *tmp; |
104 | 124 | ||
105 | if ((tmp = strchr(buf, '\n'))) | 125 | if ((tmp = strchr(buf, '\n'))) |
106 | *tmp = '\0'; | 126 | *tmp = '\0'; |
107 | 127 | ||
128 | if (!strncasecmp(buf, "dc:", 3)) { | ||
129 | args.pwrsrc = 0; | ||
130 | buf += 3; | ||
131 | } else | ||
132 | if (!strncasecmp(buf, "ac:", 3)) { | ||
133 | args.pwrsrc = 1; | ||
134 | buf += 3; | ||
135 | } | ||
136 | |||
108 | if (!strcasecmp(buf, "none")) | 137 | if (!strcasecmp(buf, "none")) |
109 | args.state = NV_CONTROL_PSTATE_USER_STATE_UNKNOWN; | 138 | args.ustate = NVIF_CONTROL_PSTATE_USER_V0_STATE_UNKNOWN; |
110 | else | 139 | else |
111 | if (!strcasecmp(buf, "auto")) | 140 | if (!strcasecmp(buf, "auto")) |
112 | args.state = NV_CONTROL_PSTATE_USER_STATE_PERFMON; | 141 | args.ustate = NVIF_CONTROL_PSTATE_USER_V0_STATE_PERFMON; |
113 | else { | 142 | else { |
114 | ret = kstrtol(buf, 16, &value); | 143 | ret = kstrtol(buf, 16, &value); |
115 | if (ret) | 144 | if (ret) |
116 | return ret; | 145 | return ret; |
117 | args.state = value; | 146 | args.ustate = value; |
118 | } | 147 | } |
119 | 148 | ||
120 | ret = nv_exec(sysfs->ctrl, NV_CONTROL_PSTATE_USER, &args, sizeof(args)); | 149 | ret = nvif_mthd(&sysfs->ctrl, NVIF_CONTROL_PSTATE_USER, |
150 | &args, sizeof(args)); | ||
121 | if (ret < 0) | 151 | if (ret < 0) |
122 | return ret; | 152 | return ret; |
123 | 153 | ||
@@ -132,11 +162,11 @@ nouveau_sysfs_fini(struct drm_device *dev) | |||
132 | { | 162 | { |
133 | struct nouveau_sysfs *sysfs = nouveau_sysfs(dev); | 163 | struct nouveau_sysfs *sysfs = nouveau_sysfs(dev); |
134 | struct nouveau_drm *drm = nouveau_drm(dev); | 164 | struct nouveau_drm *drm = nouveau_drm(dev); |
135 | struct nouveau_device *device = nv_device(drm->device); | 165 | struct nvif_device *device = &drm->device; |
136 | 166 | ||
137 | if (sysfs->ctrl) { | 167 | if (sysfs && sysfs->ctrl.priv) { |
138 | device_remove_file(nv_device_base(device), &dev_attr_pstate); | 168 | device_remove_file(nv_device_base(nvkm_device(device)), &dev_attr_pstate); |
139 | nouveau_object_del(nv_object(drm), NVDRM_DEVICE, NVDRM_CONTROL); | 169 | nvif_object_fini(&sysfs->ctrl); |
140 | } | 170 | } |
141 | 171 | ||
142 | drm->sysfs = NULL; | 172 | drm->sysfs = NULL; |
@@ -147,18 +177,22 @@ int | |||
147 | nouveau_sysfs_init(struct drm_device *dev) | 177 | nouveau_sysfs_init(struct drm_device *dev) |
148 | { | 178 | { |
149 | struct nouveau_drm *drm = nouveau_drm(dev); | 179 | struct nouveau_drm *drm = nouveau_drm(dev); |
150 | struct nouveau_device *device = nv_device(drm->device); | 180 | struct nvif_device *device = &drm->device; |
151 | struct nouveau_sysfs *sysfs; | 181 | struct nouveau_sysfs *sysfs; |
152 | int ret; | 182 | int ret; |
153 | 183 | ||
184 | if (!nouveau_pstate) | ||
185 | return 0; | ||
186 | |||
154 | sysfs = drm->sysfs = kzalloc(sizeof(*sysfs), GFP_KERNEL); | 187 | sysfs = drm->sysfs = kzalloc(sizeof(*sysfs), GFP_KERNEL); |
155 | if (!sysfs) | 188 | if (!sysfs) |
156 | return -ENOMEM; | 189 | return -ENOMEM; |
157 | 190 | ||
158 | ret = nouveau_object_new(nv_object(drm), NVDRM_DEVICE, NVDRM_CONTROL, | 191 | ret = nvif_object_init(nvif_object(device), NULL, NVDRM_CONTROL, |
159 | NV_CONTROL_CLASS, NULL, 0, &sysfs->ctrl); | 192 | NVIF_IOCTL_NEW_V0_CONTROL, NULL, 0, |
193 | &sysfs->ctrl); | ||
160 | if (ret == 0) | 194 | if (ret == 0) |
161 | device_create_file(nv_device_base(device), &dev_attr_pstate); | 195 | device_create_file(nv_device_base(nvkm_device(device)), &dev_attr_pstate); |
162 | 196 | ||
163 | return 0; | 197 | return 0; |
164 | } | 198 | } |
diff --git a/drivers/gpu/drm/nouveau/nouveau_sysfs.h b/drivers/gpu/drm/nouveau/nouveau_sysfs.h index 74b47f1e01ed..f973378160f8 100644 --- a/drivers/gpu/drm/nouveau/nouveau_sysfs.h +++ b/drivers/gpu/drm/nouveau/nouveau_sysfs.h | |||
@@ -4,7 +4,7 @@ | |||
4 | #include "nouveau_drm.h" | 4 | #include "nouveau_drm.h" |
5 | 5 | ||
6 | struct nouveau_sysfs { | 6 | struct nouveau_sysfs { |
7 | struct nouveau_object *ctrl; | 7 | struct nvif_object ctrl; |
8 | }; | 8 | }; |
9 | 9 | ||
10 | static inline struct nouveau_sysfs * | 10 | static inline struct nouveau_sysfs * |
diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c index 7e185c122750..53874b76b031 100644 --- a/drivers/gpu/drm/nouveau/nouveau_ttm.c +++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c | |||
@@ -24,10 +24,6 @@ | |||
24 | * USE OR OTHER DEALINGS IN THE SOFTWARE. | 24 | * USE OR OTHER DEALINGS IN THE SOFTWARE. |
25 | */ | 25 | */ |
26 | 26 | ||
27 | #include <subdev/fb.h> | ||
28 | #include <subdev/vm.h> | ||
29 | #include <subdev/instmem.h> | ||
30 | |||
31 | #include "nouveau_drm.h" | 27 | #include "nouveau_drm.h" |
32 | #include "nouveau_ttm.h" | 28 | #include "nouveau_ttm.h" |
33 | #include "nouveau_gem.h" | 29 | #include "nouveau_gem.h" |
@@ -36,7 +32,7 @@ static int | |||
36 | nouveau_vram_manager_init(struct ttm_mem_type_manager *man, unsigned long psize) | 32 | nouveau_vram_manager_init(struct ttm_mem_type_manager *man, unsigned long psize) |
37 | { | 33 | { |
38 | struct nouveau_drm *drm = nouveau_bdev(man->bdev); | 34 | struct nouveau_drm *drm = nouveau_bdev(man->bdev); |
39 | struct nouveau_fb *pfb = nouveau_fb(drm->device); | 35 | struct nouveau_fb *pfb = nvkm_fb(&drm->device); |
40 | man->priv = pfb; | 36 | man->priv = pfb; |
41 | return 0; | 37 | return 0; |
42 | } | 38 | } |
@@ -67,7 +63,7 @@ nouveau_vram_manager_del(struct ttm_mem_type_manager *man, | |||
67 | struct ttm_mem_reg *mem) | 63 | struct ttm_mem_reg *mem) |
68 | { | 64 | { |
69 | struct nouveau_drm *drm = nouveau_bdev(man->bdev); | 65 | struct nouveau_drm *drm = nouveau_bdev(man->bdev); |
70 | struct nouveau_fb *pfb = nouveau_fb(drm->device); | 66 | struct nouveau_fb *pfb = nvkm_fb(&drm->device); |
71 | nouveau_mem_node_cleanup(mem->mm_node); | 67 | nouveau_mem_node_cleanup(mem->mm_node); |
72 | pfb->ram->put(pfb, (struct nouveau_mem **)&mem->mm_node); | 68 | pfb->ram->put(pfb, (struct nouveau_mem **)&mem->mm_node); |
73 | } | 69 | } |
@@ -80,7 +76,7 @@ nouveau_vram_manager_new(struct ttm_mem_type_manager *man, | |||
80 | struct ttm_mem_reg *mem) | 76 | struct ttm_mem_reg *mem) |
81 | { | 77 | { |
82 | struct nouveau_drm *drm = nouveau_bdev(man->bdev); | 78 | struct nouveau_drm *drm = nouveau_bdev(man->bdev); |
83 | struct nouveau_fb *pfb = nouveau_fb(drm->device); | 79 | struct nouveau_fb *pfb = nvkm_fb(&drm->device); |
84 | struct nouveau_bo *nvbo = nouveau_bo(bo); | 80 | struct nouveau_bo *nvbo = nouveau_bo(bo); |
85 | struct nouveau_mem *node; | 81 | struct nouveau_mem *node; |
86 | u32 size_nc = 0; | 82 | u32 size_nc = 0; |
@@ -176,14 +172,13 @@ nouveau_gart_manager_new(struct ttm_mem_type_manager *man, | |||
176 | 172 | ||
177 | node->page_shift = 12; | 173 | node->page_shift = 12; |
178 | 174 | ||
179 | switch (nv_device(drm->device)->card_type) { | 175 | switch (drm->device.info.family) { |
180 | case NV_50: | 176 | case NV_DEVICE_INFO_V0_TESLA: |
181 | if (nv_device(drm->device)->chipset != 0x50) | 177 | if (drm->device.info.chipset != 0x50) |
182 | node->memtype = (nvbo->tile_flags & 0x7f00) >> 8; | 178 | node->memtype = (nvbo->tile_flags & 0x7f00) >> 8; |
183 | break; | 179 | break; |
184 | case NV_C0: | 180 | case NV_DEVICE_INFO_V0_FERMI: |
185 | case NV_D0: | 181 | case NV_DEVICE_INFO_V0_KEPLER: |
186 | case NV_E0: | ||
187 | node->memtype = (nvbo->tile_flags & 0xff00) >> 8; | 182 | node->memtype = (nvbo->tile_flags & 0xff00) >> 8; |
188 | break; | 183 | break; |
189 | default: | 184 | default: |
@@ -208,12 +203,13 @@ const struct ttm_mem_type_manager_func nouveau_gart_manager = { | |||
208 | nouveau_gart_manager_debug | 203 | nouveau_gart_manager_debug |
209 | }; | 204 | }; |
210 | 205 | ||
206 | /*XXX*/ | ||
211 | #include <core/subdev/vm/nv04.h> | 207 | #include <core/subdev/vm/nv04.h> |
212 | static int | 208 | static int |
213 | nv04_gart_manager_init(struct ttm_mem_type_manager *man, unsigned long psize) | 209 | nv04_gart_manager_init(struct ttm_mem_type_manager *man, unsigned long psize) |
214 | { | 210 | { |
215 | struct nouveau_drm *drm = nouveau_bdev(man->bdev); | 211 | struct nouveau_drm *drm = nouveau_bdev(man->bdev); |
216 | struct nouveau_vmmgr *vmm = nouveau_vmmgr(drm->device); | 212 | struct nouveau_vmmgr *vmm = nvkm_vmmgr(&drm->device); |
217 | struct nv04_vmmgr_priv *priv = (void *)vmm; | 213 | struct nv04_vmmgr_priv *priv = (void *)vmm; |
218 | struct nouveau_vm *vm = NULL; | 214 | struct nouveau_vm *vm = NULL; |
219 | nouveau_vm_ref(priv->vm, &vm, NULL); | 215 | nouveau_vm_ref(priv->vm, &vm, NULL); |
@@ -357,12 +353,11 @@ int | |||
357 | nouveau_ttm_init(struct nouveau_drm *drm) | 353 | nouveau_ttm_init(struct nouveau_drm *drm) |
358 | { | 354 | { |
359 | struct drm_device *dev = drm->dev; | 355 | struct drm_device *dev = drm->dev; |
360 | struct nouveau_device *device = nv_device(drm->device); | ||
361 | u32 bits; | 356 | u32 bits; |
362 | int ret; | 357 | int ret; |
363 | 358 | ||
364 | bits = nouveau_vmmgr(drm->device)->dma_bits; | 359 | bits = nvkm_vmmgr(&drm->device)->dma_bits; |
365 | if (nv_device_is_pci(device)) { | 360 | if (nv_device_is_pci(nvkm_device(&drm->device))) { |
366 | if (drm->agp.stat == ENABLED || | 361 | if (drm->agp.stat == ENABLED || |
367 | !pci_dma_supported(dev->pdev, DMA_BIT_MASK(bits))) | 362 | !pci_dma_supported(dev->pdev, DMA_BIT_MASK(bits))) |
368 | bits = 32; | 363 | bits = 32; |
@@ -394,8 +389,7 @@ nouveau_ttm_init(struct nouveau_drm *drm) | |||
394 | } | 389 | } |
395 | 390 | ||
396 | /* VRAM init */ | 391 | /* VRAM init */ |
397 | drm->gem.vram_available = nouveau_fb(drm->device)->ram->size; | 392 | drm->gem.vram_available = drm->device.info.ram_user; |
398 | drm->gem.vram_available -= nouveau_instmem(drm->device)->reserved; | ||
399 | 393 | ||
400 | ret = ttm_bo_init_mm(&drm->ttm.bdev, TTM_PL_VRAM, | 394 | ret = ttm_bo_init_mm(&drm->ttm.bdev, TTM_PL_VRAM, |
401 | drm->gem.vram_available >> PAGE_SHIFT); | 395 | drm->gem.vram_available >> PAGE_SHIFT); |
@@ -404,12 +398,12 @@ nouveau_ttm_init(struct nouveau_drm *drm) | |||
404 | return ret; | 398 | return ret; |
405 | } | 399 | } |
406 | 400 | ||
407 | drm->ttm.mtrr = arch_phys_wc_add(nv_device_resource_start(device, 1), | 401 | drm->ttm.mtrr = arch_phys_wc_add(nv_device_resource_start(nvkm_device(&drm->device), 1), |
408 | nv_device_resource_len(device, 1)); | 402 | nv_device_resource_len(nvkm_device(&drm->device), 1)); |
409 | 403 | ||
410 | /* GART init */ | 404 | /* GART init */ |
411 | if (drm->agp.stat != ENABLED) { | 405 | if (drm->agp.stat != ENABLED) { |
412 | drm->gem.gart_available = nouveau_vmmgr(drm->device)->limit; | 406 | drm->gem.gart_available = nvkm_vmmgr(&drm->device)->limit; |
413 | } else { | 407 | } else { |
414 | drm->gem.gart_available = drm->agp.size; | 408 | drm->gem.gart_available = drm->agp.size; |
415 | } | 409 | } |
diff --git a/drivers/gpu/drm/nouveau/nouveau_usif.c b/drivers/gpu/drm/nouveau/nouveau_usif.c new file mode 100644 index 000000000000..cb1182d7e80e --- /dev/null +++ b/drivers/gpu/drm/nouveau/nouveau_usif.c | |||
@@ -0,0 +1,384 @@ | |||
1 | /* | ||
2 | * Copyright 2014 Red Hat Inc. | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice shall be included in | ||
12 | * all copies or substantial portions of the Software. | ||
13 | * | ||
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
21 | * | ||
22 | * Authors: Ben Skeggs <bskeggs@redhat.com> | ||
23 | */ | ||
24 | |||
25 | #include "nouveau_drm.h" | ||
26 | #include "nouveau_usif.h" | ||
27 | |||
28 | #include <nvif/notify.h> | ||
29 | #include <nvif/unpack.h> | ||
30 | #include <nvif/client.h> | ||
31 | #include <nvif/event.h> | ||
32 | #include <nvif/ioctl.h> | ||
33 | |||
34 | struct usif_notify_p { | ||
35 | struct drm_pending_event base; | ||
36 | struct { | ||
37 | struct drm_event base; | ||
38 | u8 data[]; | ||
39 | } e; | ||
40 | }; | ||
41 | |||
42 | struct usif_notify { | ||
43 | struct list_head head; | ||
44 | atomic_t enabled; | ||
45 | u32 handle; | ||
46 | u16 reply; | ||
47 | u8 route; | ||
48 | u64 token; | ||
49 | struct usif_notify_p *p; | ||
50 | }; | ||
51 | |||
52 | static inline struct usif_notify * | ||
53 | usif_notify_find(struct drm_file *filp, u32 handle) | ||
54 | { | ||
55 | struct nouveau_cli *cli = nouveau_cli(filp); | ||
56 | struct usif_notify *ntfy; | ||
57 | list_for_each_entry(ntfy, &cli->notifys, head) { | ||
58 | if (ntfy->handle == handle) | ||
59 | return ntfy; | ||
60 | } | ||
61 | return NULL; | ||
62 | } | ||
63 | |||
64 | static inline void | ||
65 | usif_notify_dtor(struct usif_notify *ntfy) | ||
66 | { | ||
67 | list_del(&ntfy->head); | ||
68 | kfree(ntfy); | ||
69 | } | ||
70 | |||
71 | int | ||
72 | usif_notify(const void *header, u32 length, const void *data, u32 size) | ||
73 | { | ||
74 | struct usif_notify *ntfy = NULL; | ||
75 | const union { | ||
76 | struct nvif_notify_rep_v0 v0; | ||
77 | } *rep = header; | ||
78 | struct drm_device *dev; | ||
79 | struct drm_file *filp; | ||
80 | unsigned long flags; | ||
81 | |||
82 | if (length == sizeof(rep->v0) && rep->v0.version == 0) { | ||
83 | if (WARN_ON(!(ntfy = (void *)(unsigned long)rep->v0.token))) | ||
84 | return NVIF_NOTIFY_DROP; | ||
85 | BUG_ON(rep->v0.route != NVDRM_NOTIFY_USIF); | ||
86 | } else | ||
87 | if (WARN_ON(1)) | ||
88 | return NVIF_NOTIFY_DROP; | ||
89 | |||
90 | if (WARN_ON(!ntfy->p || ntfy->reply != (length + size))) | ||
91 | return NVIF_NOTIFY_DROP; | ||
92 | filp = ntfy->p->base.file_priv; | ||
93 | dev = filp->minor->dev; | ||
94 | |||
95 | memcpy(&ntfy->p->e.data[0], header, length); | ||
96 | memcpy(&ntfy->p->e.data[length], data, size); | ||
97 | switch (rep->v0.version) { | ||
98 | case 0: { | ||
99 | struct nvif_notify_rep_v0 *rep = (void *)ntfy->p->e.data; | ||
100 | rep->route = ntfy->route; | ||
101 | rep->token = ntfy->token; | ||
102 | } | ||
103 | break; | ||
104 | default: | ||
105 | BUG_ON(1); | ||
106 | break; | ||
107 | } | ||
108 | |||
109 | spin_lock_irqsave(&dev->event_lock, flags); | ||
110 | if (!WARN_ON(filp->event_space < ntfy->p->e.base.length)) { | ||
111 | list_add_tail(&ntfy->p->base.link, &filp->event_list); | ||
112 | filp->event_space -= ntfy->p->e.base.length; | ||
113 | } | ||
114 | wake_up_interruptible(&filp->event_wait); | ||
115 | spin_unlock_irqrestore(&dev->event_lock, flags); | ||
116 | atomic_set(&ntfy->enabled, 0); | ||
117 | return NVIF_NOTIFY_DROP; | ||
118 | } | ||
119 | |||
120 | static int | ||
121 | usif_notify_new(struct drm_file *f, void *data, u32 size, void *argv, u32 argc) | ||
122 | { | ||
123 | struct nouveau_cli *cli = nouveau_cli(f); | ||
124 | struct nvif_client *client = &cli->base; | ||
125 | union { | ||
126 | struct nvif_ioctl_ntfy_new_v0 v0; | ||
127 | } *args = data; | ||
128 | union { | ||
129 | struct nvif_notify_req_v0 v0; | ||
130 | } *req; | ||
131 | struct usif_notify *ntfy; | ||
132 | int ret; | ||
133 | |||
134 | if (nvif_unpack(args->v0, 0, 0, true)) { | ||
135 | if (usif_notify_find(f, args->v0.index)) | ||
136 | return -EEXIST; | ||
137 | } else | ||
138 | return ret; | ||
139 | req = data; | ||
140 | |||
141 | if (!(ntfy = kmalloc(sizeof(*ntfy), GFP_KERNEL))) | ||
142 | return -ENOMEM; | ||
143 | atomic_set(&ntfy->enabled, 0); | ||
144 | |||
145 | if (nvif_unpack(req->v0, 0, 0, true)) { | ||
146 | ntfy->reply = sizeof(struct nvif_notify_rep_v0) + req->v0.reply; | ||
147 | ntfy->route = req->v0.route; | ||
148 | ntfy->token = req->v0.token; | ||
149 | req->v0.route = NVDRM_NOTIFY_USIF; | ||
150 | req->v0.token = (unsigned long)(void *)ntfy; | ||
151 | ret = nvif_client_ioctl(client, argv, argc); | ||
152 | req->v0.token = ntfy->token; | ||
153 | req->v0.route = ntfy->route; | ||
154 | ntfy->handle = args->v0.index; | ||
155 | } | ||
156 | |||
157 | if (ret == 0) | ||
158 | list_add(&ntfy->head, &cli->notifys); | ||
159 | if (ret) | ||
160 | kfree(ntfy); | ||
161 | return ret; | ||
162 | } | ||
163 | |||
164 | static int | ||
165 | usif_notify_del(struct drm_file *f, void *data, u32 size, void *argv, u32 argc) | ||
166 | { | ||
167 | struct nouveau_cli *cli = nouveau_cli(f); | ||
168 | struct nvif_client *client = &cli->base; | ||
169 | union { | ||
170 | struct nvif_ioctl_ntfy_del_v0 v0; | ||
171 | } *args = data; | ||
172 | struct usif_notify *ntfy; | ||
173 | int ret; | ||
174 | |||
175 | if (nvif_unpack(args->v0, 0, 0, true)) { | ||
176 | if (!(ntfy = usif_notify_find(f, args->v0.index))) | ||
177 | return -ENOENT; | ||
178 | } else | ||
179 | return ret; | ||
180 | |||
181 | ret = nvif_client_ioctl(client, argv, argc); | ||
182 | if (ret == 0) | ||
183 | usif_notify_dtor(ntfy); | ||
184 | return ret; | ||
185 | } | ||
186 | |||
187 | static int | ||
188 | usif_notify_get(struct drm_file *f, void *data, u32 size, void *argv, u32 argc) | ||
189 | { | ||
190 | struct nouveau_cli *cli = nouveau_cli(f); | ||
191 | struct nvif_client *client = &cli->base; | ||
192 | union { | ||
193 | struct nvif_ioctl_ntfy_del_v0 v0; | ||
194 | } *args = data; | ||
195 | struct usif_notify *ntfy; | ||
196 | int ret; | ||
197 | |||
198 | if (nvif_unpack(args->v0, 0, 0, true)) { | ||
199 | if (!(ntfy = usif_notify_find(f, args->v0.index))) | ||
200 | return -ENOENT; | ||
201 | } else | ||
202 | return ret; | ||
203 | |||
204 | if (atomic_xchg(&ntfy->enabled, 1)) | ||
205 | return 0; | ||
206 | |||
207 | ntfy->p = kmalloc(sizeof(*ntfy->p) + ntfy->reply, GFP_KERNEL); | ||
208 | if (ret = -ENOMEM, !ntfy->p) | ||
209 | goto done; | ||
210 | ntfy->p->base.event = &ntfy->p->e.base; | ||
211 | ntfy->p->base.file_priv = f; | ||
212 | ntfy->p->base.pid = current->pid; | ||
213 | ntfy->p->base.destroy =(void(*)(struct drm_pending_event *))kfree; | ||
214 | ntfy->p->e.base.type = DRM_NOUVEAU_EVENT_NVIF; | ||
215 | ntfy->p->e.base.length = sizeof(ntfy->p->e.base) + ntfy->reply; | ||
216 | |||
217 | ret = nvif_client_ioctl(client, argv, argc); | ||
218 | done: | ||
219 | if (ret) { | ||
220 | atomic_set(&ntfy->enabled, 0); | ||
221 | kfree(ntfy->p); | ||
222 | } | ||
223 | return ret; | ||
224 | } | ||
225 | |||
226 | static int | ||
227 | usif_notify_put(struct drm_file *f, void *data, u32 size, void *argv, u32 argc) | ||
228 | { | ||
229 | struct nouveau_cli *cli = nouveau_cli(f); | ||
230 | struct nvif_client *client = &cli->base; | ||
231 | union { | ||
232 | struct nvif_ioctl_ntfy_put_v0 v0; | ||
233 | } *args = data; | ||
234 | struct usif_notify *ntfy; | ||
235 | int ret; | ||
236 | |||
237 | if (nvif_unpack(args->v0, 0, 0, true)) { | ||
238 | if (!(ntfy = usif_notify_find(f, args->v0.index))) | ||
239 | return -ENOENT; | ||
240 | } else | ||
241 | return ret; | ||
242 | |||
243 | ret = nvif_client_ioctl(client, argv, argc); | ||
244 | if (ret == 0 && atomic_xchg(&ntfy->enabled, 0)) | ||
245 | kfree(ntfy->p); | ||
246 | return ret; | ||
247 | } | ||
248 | |||
249 | struct usif_object { | ||
250 | struct list_head head; | ||
251 | struct list_head ntfy; | ||
252 | u8 route; | ||
253 | u64 token; | ||
254 | }; | ||
255 | |||
256 | static void | ||
257 | usif_object_dtor(struct usif_object *object) | ||
258 | { | ||
259 | list_del(&object->head); | ||
260 | kfree(object); | ||
261 | } | ||
262 | |||
263 | static int | ||
264 | usif_object_new(struct drm_file *f, void *data, u32 size, void *argv, u32 argc) | ||
265 | { | ||
266 | struct nouveau_cli *cli = nouveau_cli(f); | ||
267 | struct nvif_client *client = &cli->base; | ||
268 | union { | ||
269 | struct nvif_ioctl_new_v0 v0; | ||
270 | } *args = data; | ||
271 | struct usif_object *object; | ||
272 | int ret; | ||
273 | |||
274 | if (!(object = kmalloc(sizeof(*object), GFP_KERNEL))) | ||
275 | return -ENOMEM; | ||
276 | list_add(&object->head, &cli->objects); | ||
277 | |||
278 | if (nvif_unpack(args->v0, 0, 0, true)) { | ||
279 | object->route = args->v0.route; | ||
280 | object->token = args->v0.token; | ||
281 | args->v0.route = NVDRM_OBJECT_USIF; | ||
282 | args->v0.token = (unsigned long)(void *)object; | ||
283 | ret = nvif_client_ioctl(client, argv, argc); | ||
284 | args->v0.token = object->token; | ||
285 | args->v0.route = object->route; | ||
286 | } | ||
287 | |||
288 | if (ret) | ||
289 | usif_object_dtor(object); | ||
290 | return ret; | ||
291 | } | ||
292 | |||
293 | int | ||
294 | usif_ioctl(struct drm_file *filp, void __user *user, u32 argc) | ||
295 | { | ||
296 | struct nouveau_cli *cli = nouveau_cli(filp); | ||
297 | struct nvif_client *client = &cli->base; | ||
298 | void *data = kmalloc(argc, GFP_KERNEL); | ||
299 | u32 size = argc; | ||
300 | union { | ||
301 | struct nvif_ioctl_v0 v0; | ||
302 | } *argv = data; | ||
303 | struct usif_object *object; | ||
304 | u8 owner; | ||
305 | int ret; | ||
306 | |||
307 | if (ret = -ENOMEM, !argv) | ||
308 | goto done; | ||
309 | if (ret = -EFAULT, copy_from_user(argv, user, size)) | ||
310 | goto done; | ||
311 | |||
312 | if (nvif_unpack(argv->v0, 0, 0, true)) { | ||
313 | /* block access to objects not created via this interface */ | ||
314 | owner = argv->v0.owner; | ||
315 | argv->v0.owner = NVDRM_OBJECT_USIF; | ||
316 | } else | ||
317 | goto done; | ||
318 | |||
319 | mutex_lock(&cli->mutex); | ||
320 | switch (argv->v0.type) { | ||
321 | case NVIF_IOCTL_V0_NEW: | ||
322 | /* ... except if we're creating children */ | ||
323 | argv->v0.owner = NVIF_IOCTL_V0_OWNER_ANY; | ||
324 | ret = usif_object_new(filp, data, size, argv, argc); | ||
325 | break; | ||
326 | case NVIF_IOCTL_V0_NTFY_NEW: | ||
327 | ret = usif_notify_new(filp, data, size, argv, argc); | ||
328 | break; | ||
329 | case NVIF_IOCTL_V0_NTFY_DEL: | ||
330 | ret = usif_notify_del(filp, data, size, argv, argc); | ||
331 | break; | ||
332 | case NVIF_IOCTL_V0_NTFY_GET: | ||
333 | ret = usif_notify_get(filp, data, size, argv, argc); | ||
334 | break; | ||
335 | case NVIF_IOCTL_V0_NTFY_PUT: | ||
336 | ret = usif_notify_put(filp, data, size, argv, argc); | ||
337 | break; | ||
338 | default: | ||
339 | ret = nvif_client_ioctl(client, argv, argc); | ||
340 | break; | ||
341 | } | ||
342 | if (argv->v0.route == NVDRM_OBJECT_USIF) { | ||
343 | object = (void *)(unsigned long)argv->v0.token; | ||
344 | argv->v0.route = object->route; | ||
345 | argv->v0.token = object->token; | ||
346 | if (ret == 0 && argv->v0.type == NVIF_IOCTL_V0_DEL) { | ||
347 | list_del(&object->head); | ||
348 | kfree(object); | ||
349 | } | ||
350 | } else { | ||
351 | argv->v0.route = NVIF_IOCTL_V0_ROUTE_HIDDEN; | ||
352 | argv->v0.token = 0; | ||
353 | } | ||
354 | argv->v0.owner = owner; | ||
355 | mutex_unlock(&cli->mutex); | ||
356 | |||
357 | if (copy_to_user(user, argv, argc)) | ||
358 | ret = -EFAULT; | ||
359 | done: | ||
360 | kfree(argv); | ||
361 | return ret; | ||
362 | } | ||
363 | |||
364 | void | ||
365 | usif_client_fini(struct nouveau_cli *cli) | ||
366 | { | ||
367 | struct usif_object *object, *otemp; | ||
368 | struct usif_notify *notify, *ntemp; | ||
369 | |||
370 | list_for_each_entry_safe(notify, ntemp, &cli->notifys, head) { | ||
371 | usif_notify_dtor(notify); | ||
372 | } | ||
373 | |||
374 | list_for_each_entry_safe(object, otemp, &cli->objects, head) { | ||
375 | usif_object_dtor(object); | ||
376 | } | ||
377 | } | ||
378 | |||
379 | void | ||
380 | usif_client_init(struct nouveau_cli *cli) | ||
381 | { | ||
382 | INIT_LIST_HEAD(&cli->objects); | ||
383 | INIT_LIST_HEAD(&cli->notifys); | ||
384 | } | ||
diff --git a/drivers/gpu/drm/nouveau/nouveau_usif.h b/drivers/gpu/drm/nouveau/nouveau_usif.h new file mode 100644 index 000000000000..c037e3ae8c70 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nouveau_usif.h | |||
@@ -0,0 +1,9 @@ | |||
1 | #ifndef __NOUVEAU_USIF_H__ | ||
2 | #define __NOUVEAU_USIF_H__ | ||
3 | |||
4 | void usif_client_init(struct nouveau_cli *); | ||
5 | void usif_client_fini(struct nouveau_cli *); | ||
6 | int usif_ioctl(struct drm_file *, void __user *, u32); | ||
7 | int usif_notify(const void *, u32, const void *, u32); | ||
8 | |||
9 | #endif | ||
diff --git a/drivers/gpu/drm/nouveau/nouveau_vga.c b/drivers/gpu/drm/nouveau/nouveau_vga.c index 4f4c3fec6916..18d55d447248 100644 --- a/drivers/gpu/drm/nouveau/nouveau_vga.c +++ b/drivers/gpu/drm/nouveau/nouveau_vga.c | |||
@@ -12,14 +12,16 @@ | |||
12 | static unsigned int | 12 | static unsigned int |
13 | nouveau_vga_set_decode(void *priv, bool state) | 13 | nouveau_vga_set_decode(void *priv, bool state) |
14 | { | 14 | { |
15 | struct nouveau_device *device = nouveau_dev(priv); | 15 | struct nvif_device *device = &nouveau_drm(priv)->device; |
16 | 16 | ||
17 | if (device->card_type == NV_40 && device->chipset >= 0x4c) | 17 | if (device->info.family == NV_DEVICE_INFO_V0_CURIE && |
18 | nv_wr32(device, 0x088060, state); | 18 | device->info.chipset >= 0x4c) |
19 | else if (device->chipset >= 0x40) | 19 | nvif_wr32(device, 0x088060, state); |
20 | nv_wr32(device, 0x088054, state); | ||
21 | else | 20 | else |
22 | nv_wr32(device, 0x001854, state); | 21 | if (device->info.chipset >= 0x40) |
22 | nvif_wr32(device, 0x088054, state); | ||
23 | else | ||
24 | nvif_wr32(device, 0x001854, state); | ||
23 | 25 | ||
24 | if (state) | 26 | if (state) |
25 | return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM | | 27 | return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM | |
diff --git a/drivers/gpu/drm/nouveau/nv04_fbcon.c b/drivers/gpu/drm/nouveau/nv04_fbcon.c index 8fe32bbed99a..4ef602c5469d 100644 --- a/drivers/gpu/drm/nouveau/nv04_fbcon.c +++ b/drivers/gpu/drm/nouveau/nv04_fbcon.c | |||
@@ -22,8 +22,6 @@ | |||
22 | * DEALINGS IN THE SOFTWARE. | 22 | * DEALINGS IN THE SOFTWARE. |
23 | */ | 23 | */ |
24 | 24 | ||
25 | #include <core/object.h> | ||
26 | |||
27 | #include "nouveau_drm.h" | 25 | #include "nouveau_drm.h" |
28 | #include "nouveau_dma.h" | 26 | #include "nouveau_dma.h" |
29 | #include "nouveau_fbcon.h" | 27 | #include "nouveau_fbcon.h" |
@@ -141,8 +139,7 @@ nv04_fbcon_accel_init(struct fb_info *info) | |||
141 | struct drm_device *dev = nfbdev->dev; | 139 | struct drm_device *dev = nfbdev->dev; |
142 | struct nouveau_drm *drm = nouveau_drm(dev); | 140 | struct nouveau_drm *drm = nouveau_drm(dev); |
143 | struct nouveau_channel *chan = drm->channel; | 141 | struct nouveau_channel *chan = drm->channel; |
144 | struct nouveau_device *device = nv_device(drm->device); | 142 | struct nvif_device *device = &drm->device; |
145 | struct nouveau_object *object; | ||
146 | int surface_fmt, pattern_fmt, rect_fmt; | 143 | int surface_fmt, pattern_fmt, rect_fmt; |
147 | int ret; | 144 | int ret; |
148 | 145 | ||
@@ -174,35 +171,35 @@ nv04_fbcon_accel_init(struct fb_info *info) | |||
174 | return -EINVAL; | 171 | return -EINVAL; |
175 | } | 172 | } |
176 | 173 | ||
177 | ret = nouveau_object_new(nv_object(chan->cli), NVDRM_CHAN, NvCtxSurf2D, | 174 | ret = nvif_object_init(chan->object, NULL, 0x0062, |
178 | device->card_type >= NV_10 ? 0x0062 : 0x0042, | 175 | device->info.family >= NV_DEVICE_INFO_V0_CELSIUS ? |
179 | NULL, 0, &object); | 176 | 0x0062 : 0x0042, NULL, 0, &nfbdev->surf2d); |
180 | if (ret) | 177 | if (ret) |
181 | return ret; | 178 | return ret; |
182 | 179 | ||
183 | ret = nouveau_object_new(nv_object(chan->cli), NVDRM_CHAN, NvClipRect, | 180 | ret = nvif_object_init(chan->object, NULL, 0x0019, 0x0019, NULL, 0, |
184 | 0x0019, NULL, 0, &object); | 181 | &nfbdev->clip); |
185 | if (ret) | 182 | if (ret) |
186 | return ret; | 183 | return ret; |
187 | 184 | ||
188 | ret = nouveau_object_new(nv_object(chan->cli), NVDRM_CHAN, NvRop, | 185 | ret = nvif_object_init(chan->object, NULL, 0x0043, 0x0043, NULL, 0, |
189 | 0x0043, NULL, 0, &object); | 186 | &nfbdev->rop); |
190 | if (ret) | 187 | if (ret) |
191 | return ret; | 188 | return ret; |
192 | 189 | ||
193 | ret = nouveau_object_new(nv_object(chan->cli), NVDRM_CHAN, NvImagePatt, | 190 | ret = nvif_object_init(chan->object, NULL, 0x0044, 0x0044, NULL, 0, |
194 | 0x0044, NULL, 0, &object); | 191 | &nfbdev->patt); |
195 | if (ret) | 192 | if (ret) |
196 | return ret; | 193 | return ret; |
197 | 194 | ||
198 | ret = nouveau_object_new(nv_object(chan->cli), NVDRM_CHAN, NvGdiRect, | 195 | ret = nvif_object_init(chan->object, NULL, 0x004a, 0x004a, NULL, 0, |
199 | 0x004a, NULL, 0, &object); | 196 | &nfbdev->gdi); |
200 | if (ret) | 197 | if (ret) |
201 | return ret; | 198 | return ret; |
202 | 199 | ||
203 | ret = nouveau_object_new(nv_object(chan->cli), NVDRM_CHAN, NvImageBlit, | 200 | ret = nvif_object_init(chan->object, NULL, 0x005f, |
204 | device->chipset >= 0x11 ? 0x009f : 0x005f, | 201 | device->info.chipset >= 0x11 ? 0x009f : 0x005f, |
205 | NULL, 0, &object); | 202 | NULL, 0, &nfbdev->blit); |
206 | if (ret) | 203 | if (ret) |
207 | return ret; | 204 | return ret; |
208 | 205 | ||
@@ -212,10 +209,10 @@ nv04_fbcon_accel_init(struct fb_info *info) | |||
212 | } | 209 | } |
213 | 210 | ||
214 | BEGIN_NV04(chan, NvSubCtxSurf2D, 0x0000, 1); | 211 | BEGIN_NV04(chan, NvSubCtxSurf2D, 0x0000, 1); |
215 | OUT_RING(chan, NvCtxSurf2D); | 212 | OUT_RING(chan, nfbdev->surf2d.handle); |
216 | BEGIN_NV04(chan, NvSubCtxSurf2D, 0x0184, 2); | 213 | BEGIN_NV04(chan, NvSubCtxSurf2D, 0x0184, 2); |
217 | OUT_RING(chan, NvDmaFB); | 214 | OUT_RING(chan, chan->vram.handle); |
218 | OUT_RING(chan, NvDmaFB); | 215 | OUT_RING(chan, chan->vram.handle); |
219 | BEGIN_NV04(chan, NvSubCtxSurf2D, 0x0300, 4); | 216 | BEGIN_NV04(chan, NvSubCtxSurf2D, 0x0300, 4); |
220 | OUT_RING(chan, surface_fmt); | 217 | OUT_RING(chan, surface_fmt); |
221 | OUT_RING(chan, info->fix.line_length | (info->fix.line_length << 16)); | 218 | OUT_RING(chan, info->fix.line_length | (info->fix.line_length << 16)); |
@@ -223,12 +220,12 @@ nv04_fbcon_accel_init(struct fb_info *info) | |||
223 | OUT_RING(chan, info->fix.smem_start - dev->mode_config.fb_base); | 220 | OUT_RING(chan, info->fix.smem_start - dev->mode_config.fb_base); |
224 | 221 | ||
225 | BEGIN_NV04(chan, NvSubCtxSurf2D, 0x0000, 1); | 222 | BEGIN_NV04(chan, NvSubCtxSurf2D, 0x0000, 1); |
226 | OUT_RING(chan, NvRop); | 223 | OUT_RING(chan, nfbdev->rop.handle); |
227 | BEGIN_NV04(chan, NvSubCtxSurf2D, 0x0300, 1); | 224 | BEGIN_NV04(chan, NvSubCtxSurf2D, 0x0300, 1); |
228 | OUT_RING(chan, 0x55); | 225 | OUT_RING(chan, 0x55); |
229 | 226 | ||
230 | BEGIN_NV04(chan, NvSubCtxSurf2D, 0x0000, 1); | 227 | BEGIN_NV04(chan, NvSubCtxSurf2D, 0x0000, 1); |
231 | OUT_RING(chan, NvImagePatt); | 228 | OUT_RING(chan, nfbdev->patt.handle); |
232 | BEGIN_NV04(chan, NvSubCtxSurf2D, 0x0300, 8); | 229 | BEGIN_NV04(chan, NvSubCtxSurf2D, 0x0300, 8); |
233 | OUT_RING(chan, pattern_fmt); | 230 | OUT_RING(chan, pattern_fmt); |
234 | #ifdef __BIG_ENDIAN | 231 | #ifdef __BIG_ENDIAN |
@@ -244,18 +241,18 @@ nv04_fbcon_accel_init(struct fb_info *info) | |||
244 | OUT_RING(chan, ~0); | 241 | OUT_RING(chan, ~0); |
245 | 242 | ||
246 | BEGIN_NV04(chan, NvSubCtxSurf2D, 0x0000, 1); | 243 | BEGIN_NV04(chan, NvSubCtxSurf2D, 0x0000, 1); |
247 | OUT_RING(chan, NvClipRect); | 244 | OUT_RING(chan, nfbdev->clip.handle); |
248 | BEGIN_NV04(chan, NvSubCtxSurf2D, 0x0300, 2); | 245 | BEGIN_NV04(chan, NvSubCtxSurf2D, 0x0300, 2); |
249 | OUT_RING(chan, 0); | 246 | OUT_RING(chan, 0); |
250 | OUT_RING(chan, (info->var.yres_virtual << 16) | info->var.xres_virtual); | 247 | OUT_RING(chan, (info->var.yres_virtual << 16) | info->var.xres_virtual); |
251 | 248 | ||
252 | BEGIN_NV04(chan, NvSubImageBlit, 0x0000, 1); | 249 | BEGIN_NV04(chan, NvSubImageBlit, 0x0000, 1); |
253 | OUT_RING(chan, NvImageBlit); | 250 | OUT_RING(chan, nfbdev->blit.handle); |
254 | BEGIN_NV04(chan, NvSubImageBlit, 0x019c, 1); | 251 | BEGIN_NV04(chan, NvSubImageBlit, 0x019c, 1); |
255 | OUT_RING(chan, NvCtxSurf2D); | 252 | OUT_RING(chan, nfbdev->surf2d.handle); |
256 | BEGIN_NV04(chan, NvSubImageBlit, 0x02fc, 1); | 253 | BEGIN_NV04(chan, NvSubImageBlit, 0x02fc, 1); |
257 | OUT_RING(chan, 3); | 254 | OUT_RING(chan, 3); |
258 | if (device->chipset >= 0x11 /*XXX: oclass == 0x009f*/) { | 255 | if (device->info.chipset >= 0x11 /*XXX: oclass == 0x009f*/) { |
259 | BEGIN_NV04(chan, NvSubImageBlit, 0x0120, 3); | 256 | BEGIN_NV04(chan, NvSubImageBlit, 0x0120, 3); |
260 | OUT_RING(chan, 0); | 257 | OUT_RING(chan, 0); |
261 | OUT_RING(chan, 1); | 258 | OUT_RING(chan, 1); |
@@ -263,12 +260,12 @@ nv04_fbcon_accel_init(struct fb_info *info) | |||
263 | } | 260 | } |
264 | 261 | ||
265 | BEGIN_NV04(chan, NvSubGdiRect, 0x0000, 1); | 262 | BEGIN_NV04(chan, NvSubGdiRect, 0x0000, 1); |
266 | OUT_RING(chan, NvGdiRect); | 263 | OUT_RING(chan, nfbdev->gdi.handle); |
267 | BEGIN_NV04(chan, NvSubGdiRect, 0x0198, 1); | 264 | BEGIN_NV04(chan, NvSubGdiRect, 0x0198, 1); |
268 | OUT_RING(chan, NvCtxSurf2D); | 265 | OUT_RING(chan, nfbdev->surf2d.handle); |
269 | BEGIN_NV04(chan, NvSubGdiRect, 0x0188, 2); | 266 | BEGIN_NV04(chan, NvSubGdiRect, 0x0188, 2); |
270 | OUT_RING(chan, NvImagePatt); | 267 | OUT_RING(chan, nfbdev->patt.handle); |
271 | OUT_RING(chan, NvRop); | 268 | OUT_RING(chan, nfbdev->rop.handle); |
272 | BEGIN_NV04(chan, NvSubGdiRect, 0x0304, 1); | 269 | BEGIN_NV04(chan, NvSubGdiRect, 0x0304, 1); |
273 | OUT_RING(chan, 1); | 270 | OUT_RING(chan, 1); |
274 | BEGIN_NV04(chan, NvSubGdiRect, 0x0300, 1); | 271 | BEGIN_NV04(chan, NvSubGdiRect, 0x0300, 1); |
diff --git a/drivers/gpu/drm/nouveau/nv04_fence.c b/drivers/gpu/drm/nouveau/nv04_fence.c index 94eadd1dd10a..239c2c5a9615 100644 --- a/drivers/gpu/drm/nouveau/nv04_fence.c +++ b/drivers/gpu/drm/nouveau/nv04_fence.c | |||
@@ -22,8 +22,6 @@ | |||
22 | * Authors: Ben Skeggs | 22 | * Authors: Ben Skeggs |
23 | */ | 23 | */ |
24 | 24 | ||
25 | #include <engine/fifo.h> | ||
26 | |||
27 | #include "nouveau_drm.h" | 25 | #include "nouveau_drm.h" |
28 | #include "nouveau_dma.h" | 26 | #include "nouveau_dma.h" |
29 | #include "nouveau_fence.h" | 27 | #include "nouveau_fence.h" |
@@ -59,7 +57,7 @@ nv04_fence_sync(struct nouveau_fence *fence, | |||
59 | static u32 | 57 | static u32 |
60 | nv04_fence_read(struct nouveau_channel *chan) | 58 | nv04_fence_read(struct nouveau_channel *chan) |
61 | { | 59 | { |
62 | struct nouveau_fifo_chan *fifo = (void *)chan->object; | 60 | struct nouveau_fifo_chan *fifo = nvkm_fifo_chan(chan);; |
63 | return atomic_read(&fifo->refcnt); | 61 | return atomic_read(&fifo->refcnt); |
64 | } | 62 | } |
65 | 63 | ||
diff --git a/drivers/gpu/drm/nouveau/nv10_fence.c b/drivers/gpu/drm/nouveau/nv10_fence.c index 06f434f03fba..4faaf0acf5d7 100644 --- a/drivers/gpu/drm/nouveau/nv10_fence.c +++ b/drivers/gpu/drm/nouveau/nv10_fence.c | |||
@@ -22,9 +22,6 @@ | |||
22 | * Authors: Ben Skeggs <bskeggs@redhat.com> | 22 | * Authors: Ben Skeggs <bskeggs@redhat.com> |
23 | */ | 23 | */ |
24 | 24 | ||
25 | #include <core/object.h> | ||
26 | #include <core/class.h> | ||
27 | |||
28 | #include "nouveau_drm.h" | 25 | #include "nouveau_drm.h" |
29 | #include "nouveau_dma.h" | 26 | #include "nouveau_dma.h" |
30 | #include "nv10_fence.h" | 27 | #include "nv10_fence.h" |
@@ -53,14 +50,18 @@ nv10_fence_sync(struct nouveau_fence *fence, | |||
53 | u32 | 50 | u32 |
54 | nv10_fence_read(struct nouveau_channel *chan) | 51 | nv10_fence_read(struct nouveau_channel *chan) |
55 | { | 52 | { |
56 | return nv_ro32(chan->object, 0x0048); | 53 | return nvif_rd32(chan, 0x0048); |
57 | } | 54 | } |
58 | 55 | ||
59 | void | 56 | void |
60 | nv10_fence_context_del(struct nouveau_channel *chan) | 57 | nv10_fence_context_del(struct nouveau_channel *chan) |
61 | { | 58 | { |
62 | struct nv10_fence_chan *fctx = chan->fence; | 59 | struct nv10_fence_chan *fctx = chan->fence; |
60 | int i; | ||
63 | nouveau_fence_context_del(&fctx->base); | 61 | nouveau_fence_context_del(&fctx->base); |
62 | for (i = 0; i < ARRAY_SIZE(fctx->head); i++) | ||
63 | nvif_object_fini(&fctx->head[i]); | ||
64 | nvif_object_fini(&fctx->sema); | ||
64 | chan->fence = NULL; | 65 | chan->fence = NULL; |
65 | kfree(fctx); | 66 | kfree(fctx); |
66 | } | 67 | } |
diff --git a/drivers/gpu/drm/nouveau/nv10_fence.h b/drivers/gpu/drm/nouveau/nv10_fence.h index e5d9204826c2..a87259f3983a 100644 --- a/drivers/gpu/drm/nouveau/nv10_fence.h +++ b/drivers/gpu/drm/nouveau/nv10_fence.h | |||
@@ -1,12 +1,13 @@ | |||
1 | #ifndef __NV10_FENCE_H_ | 1 | #ifndef __NV10_FENCE_H_ |
2 | #define __NV10_FENCE_H_ | 2 | #define __NV10_FENCE_H_ |
3 | 3 | ||
4 | #include <core/os.h> | ||
5 | #include "nouveau_fence.h" | 4 | #include "nouveau_fence.h" |
6 | #include "nouveau_bo.h" | 5 | #include "nouveau_bo.h" |
7 | 6 | ||
8 | struct nv10_fence_chan { | 7 | struct nv10_fence_chan { |
9 | struct nouveau_fence_chan base; | 8 | struct nouveau_fence_chan base; |
9 | struct nvif_object sema; | ||
10 | struct nvif_object head[4]; | ||
10 | }; | 11 | }; |
11 | 12 | ||
12 | struct nv10_fence_priv { | 13 | struct nv10_fence_priv { |
diff --git a/drivers/gpu/drm/nouveau/nv17_fence.c b/drivers/gpu/drm/nouveau/nv17_fence.c index 22aa9963ea6f..ca907479f92f 100644 --- a/drivers/gpu/drm/nouveau/nv17_fence.c +++ b/drivers/gpu/drm/nouveau/nv17_fence.c | |||
@@ -22,8 +22,8 @@ | |||
22 | * Authors: Ben Skeggs <bskeggs@redhat.com> | 22 | * Authors: Ben Skeggs <bskeggs@redhat.com> |
23 | */ | 23 | */ |
24 | 24 | ||
25 | #include <core/object.h> | 25 | #include <nvif/os.h> |
26 | #include <core/class.h> | 26 | #include <nvif/class.h> |
27 | 27 | ||
28 | #include "nouveau_drm.h" | 28 | #include "nouveau_drm.h" |
29 | #include "nouveau_dma.h" | 29 | #include "nouveau_dma.h" |
@@ -33,11 +33,13 @@ int | |||
33 | nv17_fence_sync(struct nouveau_fence *fence, | 33 | nv17_fence_sync(struct nouveau_fence *fence, |
34 | struct nouveau_channel *prev, struct nouveau_channel *chan) | 34 | struct nouveau_channel *prev, struct nouveau_channel *chan) |
35 | { | 35 | { |
36 | struct nouveau_cli *cli = (void *)nvif_client(&prev->device->base); | ||
36 | struct nv10_fence_priv *priv = chan->drm->fence; | 37 | struct nv10_fence_priv *priv = chan->drm->fence; |
38 | struct nv10_fence_chan *fctx = chan->fence; | ||
37 | u32 value; | 39 | u32 value; |
38 | int ret; | 40 | int ret; |
39 | 41 | ||
40 | if (!mutex_trylock(&prev->cli->mutex)) | 42 | if (!mutex_trylock(&cli->mutex)) |
41 | return -EBUSY; | 43 | return -EBUSY; |
42 | 44 | ||
43 | spin_lock(&priv->lock); | 45 | spin_lock(&priv->lock); |
@@ -48,7 +50,7 @@ nv17_fence_sync(struct nouveau_fence *fence, | |||
48 | ret = RING_SPACE(prev, 5); | 50 | ret = RING_SPACE(prev, 5); |
49 | if (!ret) { | 51 | if (!ret) { |
50 | BEGIN_NV04(prev, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 4); | 52 | BEGIN_NV04(prev, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 4); |
51 | OUT_RING (prev, NvSema); | 53 | OUT_RING (prev, fctx->sema.handle); |
52 | OUT_RING (prev, 0); | 54 | OUT_RING (prev, 0); |
53 | OUT_RING (prev, value + 0); | 55 | OUT_RING (prev, value + 0); |
54 | OUT_RING (prev, value + 1); | 56 | OUT_RING (prev, value + 1); |
@@ -57,14 +59,14 @@ nv17_fence_sync(struct nouveau_fence *fence, | |||
57 | 59 | ||
58 | if (!ret && !(ret = RING_SPACE(chan, 5))) { | 60 | if (!ret && !(ret = RING_SPACE(chan, 5))) { |
59 | BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 4); | 61 | BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 4); |
60 | OUT_RING (chan, NvSema); | 62 | OUT_RING (chan, fctx->sema.handle); |
61 | OUT_RING (chan, 0); | 63 | OUT_RING (chan, 0); |
62 | OUT_RING (chan, value + 1); | 64 | OUT_RING (chan, value + 1); |
63 | OUT_RING (chan, value + 2); | 65 | OUT_RING (chan, value + 2); |
64 | FIRE_RING (chan); | 66 | FIRE_RING (chan); |
65 | } | 67 | } |
66 | 68 | ||
67 | mutex_unlock(&prev->cli->mutex); | 69 | mutex_unlock(&cli->mutex); |
68 | return 0; | 70 | return 0; |
69 | } | 71 | } |
70 | 72 | ||
@@ -74,7 +76,6 @@ nv17_fence_context_new(struct nouveau_channel *chan) | |||
74 | struct nv10_fence_priv *priv = chan->drm->fence; | 76 | struct nv10_fence_priv *priv = chan->drm->fence; |
75 | struct nv10_fence_chan *fctx; | 77 | struct nv10_fence_chan *fctx; |
76 | struct ttm_mem_reg *mem = &priv->bo->bo.mem; | 78 | struct ttm_mem_reg *mem = &priv->bo->bo.mem; |
77 | struct nouveau_object *object; | ||
78 | u32 start = mem->start * PAGE_SIZE; | 79 | u32 start = mem->start * PAGE_SIZE; |
79 | u32 limit = start + mem->size - 1; | 80 | u32 limit = start + mem->size - 1; |
80 | int ret = 0; | 81 | int ret = 0; |
@@ -88,15 +89,14 @@ nv17_fence_context_new(struct nouveau_channel *chan) | |||
88 | fctx->base.read = nv10_fence_read; | 89 | fctx->base.read = nv10_fence_read; |
89 | fctx->base.sync = nv17_fence_sync; | 90 | fctx->base.sync = nv17_fence_sync; |
90 | 91 | ||
91 | ret = nouveau_object_new(nv_object(chan->cli), chan->handle, | 92 | ret = nvif_object_init(chan->object, NULL, NvSema, NV_DMA_FROM_MEMORY, |
92 | NvSema, 0x0002, | 93 | &(struct nv_dma_v0) { |
93 | &(struct nv_dma_class) { | 94 | .target = NV_DMA_V0_TARGET_VRAM, |
94 | .flags = NV_DMA_TARGET_VRAM | | 95 | .access = NV_DMA_V0_ACCESS_RDWR, |
95 | NV_DMA_ACCESS_RDWR, | ||
96 | .start = start, | 96 | .start = start, |
97 | .limit = limit, | 97 | .limit = limit, |
98 | }, sizeof(struct nv_dma_class), | 98 | }, sizeof(struct nv_dma_v0), |
99 | &object); | 99 | &fctx->sema); |
100 | if (ret) | 100 | if (ret) |
101 | nv10_fence_context_del(chan); | 101 | nv10_fence_context_del(chan); |
102 | return ret; | 102 | return ret; |
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c index 4c534b7b04da..03949eaa629f 100644 --- a/drivers/gpu/drm/nouveau/nv50_display.c +++ b/drivers/gpu/drm/nouveau/nv50_display.c | |||
@@ -28,6 +28,8 @@ | |||
28 | #include <drm/drm_crtc_helper.h> | 28 | #include <drm/drm_crtc_helper.h> |
29 | #include <drm/drm_dp_helper.h> | 29 | #include <drm/drm_dp_helper.h> |
30 | 30 | ||
31 | #include <nvif/class.h> | ||
32 | |||
31 | #include "nouveau_drm.h" | 33 | #include "nouveau_drm.h" |
32 | #include "nouveau_dma.h" | 34 | #include "nouveau_dma.h" |
33 | #include "nouveau_gem.h" | 35 | #include "nouveau_gem.h" |
@@ -37,15 +39,6 @@ | |||
37 | #include "nouveau_fence.h" | 39 | #include "nouveau_fence.h" |
38 | #include "nv50_display.h" | 40 | #include "nv50_display.h" |
39 | 41 | ||
40 | #include <core/client.h> | ||
41 | #include <core/gpuobj.h> | ||
42 | #include <core/class.h> | ||
43 | |||
44 | #include <subdev/timer.h> | ||
45 | #include <subdev/bar.h> | ||
46 | #include <subdev/fb.h> | ||
47 | #include <subdev/i2c.h> | ||
48 | |||
49 | #define EVO_DMA_NR 9 | 42 | #define EVO_DMA_NR 9 |
50 | 43 | ||
51 | #define EVO_MASTER (0x00) | 44 | #define EVO_MASTER (0x00) |
@@ -60,45 +53,34 @@ | |||
60 | #define EVO_FLIP_SEM0(c) EVO_SYNC((c) + 1, 0x00) | 53 | #define EVO_FLIP_SEM0(c) EVO_SYNC((c) + 1, 0x00) |
61 | #define EVO_FLIP_SEM1(c) EVO_SYNC((c) + 1, 0x10) | 54 | #define EVO_FLIP_SEM1(c) EVO_SYNC((c) + 1, 0x10) |
62 | 55 | ||
63 | #define EVO_CORE_HANDLE (0xd1500000) | ||
64 | #define EVO_CHAN_HANDLE(t,i) (0xd15c0000 | (((t) & 0x00ff) << 8) | (i)) | ||
65 | #define EVO_CHAN_OCLASS(t,c) ((nv_hclass(c) & 0xff00) | ((t) & 0x00ff)) | ||
66 | #define EVO_PUSH_HANDLE(t,i) (0xd15b0000 | (i) | \ | ||
67 | (((NV50_DISP_##t##_CLASS) & 0x00ff) << 8)) | ||
68 | |||
69 | /****************************************************************************** | 56 | /****************************************************************************** |
70 | * EVO channel | 57 | * EVO channel |
71 | *****************************************************************************/ | 58 | *****************************************************************************/ |
72 | 59 | ||
73 | struct nv50_chan { | 60 | struct nv50_chan { |
74 | struct nouveau_object *user; | 61 | struct nvif_object user; |
75 | u32 handle; | ||
76 | }; | 62 | }; |
77 | 63 | ||
78 | static int | 64 | static int |
79 | nv50_chan_create(struct nouveau_object *core, u32 bclass, u8 head, | 65 | nv50_chan_create(struct nvif_object *disp, const u32 *oclass, u8 head, |
80 | void *data, u32 size, struct nv50_chan *chan) | 66 | void *data, u32 size, struct nv50_chan *chan) |
81 | { | 67 | { |
82 | struct nouveau_object *client = nv_pclass(core, NV_CLIENT_CLASS); | 68 | while (oclass[0]) { |
83 | const u32 oclass = EVO_CHAN_OCLASS(bclass, core); | 69 | int ret = nvif_object_init(disp, NULL, (oclass[0] << 16) | head, |
84 | const u32 handle = EVO_CHAN_HANDLE(bclass, head); | 70 | oclass[0], data, size, |
85 | int ret; | 71 | &chan->user); |
86 | 72 | if (oclass++, ret == 0) { | |
87 | ret = nouveau_object_new(client, EVO_CORE_HANDLE, handle, | 73 | nvif_object_map(&chan->user); |
88 | oclass, data, size, &chan->user); | 74 | return ret; |
89 | if (ret) | 75 | } |
90 | return ret; | 76 | } |
91 | 77 | return -ENOSYS; | |
92 | chan->handle = handle; | ||
93 | return 0; | ||
94 | } | 78 | } |
95 | 79 | ||
96 | static void | 80 | static void |
97 | nv50_chan_destroy(struct nouveau_object *core, struct nv50_chan *chan) | 81 | nv50_chan_destroy(struct nv50_chan *chan) |
98 | { | 82 | { |
99 | struct nouveau_object *client = nv_pclass(core, NV_CLIENT_CLASS); | 83 | nvif_object_fini(&chan->user); |
100 | if (chan->handle) | ||
101 | nouveau_object_del(client, EVO_CORE_HANDLE, chan->handle); | ||
102 | } | 84 | } |
103 | 85 | ||
104 | /****************************************************************************** | 86 | /****************************************************************************** |
@@ -110,16 +92,70 @@ struct nv50_pioc { | |||
110 | }; | 92 | }; |
111 | 93 | ||
112 | static void | 94 | static void |
113 | nv50_pioc_destroy(struct nouveau_object *core, struct nv50_pioc *pioc) | 95 | nv50_pioc_destroy(struct nv50_pioc *pioc) |
114 | { | 96 | { |
115 | nv50_chan_destroy(core, &pioc->base); | 97 | nv50_chan_destroy(&pioc->base); |
116 | } | 98 | } |
117 | 99 | ||
118 | static int | 100 | static int |
119 | nv50_pioc_create(struct nouveau_object *core, u32 bclass, u8 head, | 101 | nv50_pioc_create(struct nvif_object *disp, const u32 *oclass, u8 head, |
120 | void *data, u32 size, struct nv50_pioc *pioc) | 102 | void *data, u32 size, struct nv50_pioc *pioc) |
121 | { | 103 | { |
122 | return nv50_chan_create(core, bclass, head, data, size, &pioc->base); | 104 | return nv50_chan_create(disp, oclass, head, data, size, &pioc->base); |
105 | } | ||
106 | |||
107 | /****************************************************************************** | ||
108 | * Cursor Immediate | ||
109 | *****************************************************************************/ | ||
110 | |||
111 | struct nv50_curs { | ||
112 | struct nv50_pioc base; | ||
113 | }; | ||
114 | |||
115 | static int | ||
116 | nv50_curs_create(struct nvif_object *disp, int head, struct nv50_curs *curs) | ||
117 | { | ||
118 | struct nv50_disp_cursor_v0 args = { | ||
119 | .head = head, | ||
120 | }; | ||
121 | static const u32 oclass[] = { | ||
122 | GK104_DISP_CURSOR, | ||
123 | GF110_DISP_CURSOR, | ||
124 | GT214_DISP_CURSOR, | ||
125 | G82_DISP_CURSOR, | ||
126 | NV50_DISP_CURSOR, | ||
127 | 0 | ||
128 | }; | ||
129 | |||
130 | return nv50_pioc_create(disp, oclass, head, &args, sizeof(args), | ||
131 | &curs->base); | ||
132 | } | ||
133 | |||
134 | /****************************************************************************** | ||
135 | * Overlay Immediate | ||
136 | *****************************************************************************/ | ||
137 | |||
138 | struct nv50_oimm { | ||
139 | struct nv50_pioc base; | ||
140 | }; | ||
141 | |||
142 | static int | ||
143 | nv50_oimm_create(struct nvif_object *disp, int head, struct nv50_oimm *oimm) | ||
144 | { | ||
145 | struct nv50_disp_cursor_v0 args = { | ||
146 | .head = head, | ||
147 | }; | ||
148 | static const u32 oclass[] = { | ||
149 | GK104_DISP_OVERLAY, | ||
150 | GF110_DISP_OVERLAY, | ||
151 | GT214_DISP_OVERLAY, | ||
152 | G82_DISP_OVERLAY, | ||
153 | NV50_DISP_OVERLAY, | ||
154 | 0 | ||
155 | }; | ||
156 | |||
157 | return nv50_pioc_create(disp, oclass, head, &args, sizeof(args), | ||
158 | &oimm->base); | ||
123 | } | 159 | } |
124 | 160 | ||
125 | /****************************************************************************** | 161 | /****************************************************************************** |
@@ -131,6 +167,9 @@ struct nv50_dmac { | |||
131 | dma_addr_t handle; | 167 | dma_addr_t handle; |
132 | u32 *ptr; | 168 | u32 *ptr; |
133 | 169 | ||
170 | struct nvif_object sync; | ||
171 | struct nvif_object vram; | ||
172 | |||
134 | /* Protects against concurrent pushbuf access to this channel, lock is | 173 | /* Protects against concurrent pushbuf access to this channel, lock is |
135 | * grabbed by evo_wait (if the pushbuf reservation is successful) and | 174 | * grabbed by evo_wait (if the pushbuf reservation is successful) and |
136 | * dropped again by evo_kick. */ | 175 | * dropped again by evo_kick. */ |
@@ -138,207 +177,113 @@ struct nv50_dmac { | |||
138 | }; | 177 | }; |
139 | 178 | ||
140 | static void | 179 | static void |
141 | nv50_dmac_destroy(struct nouveau_object *core, struct nv50_dmac *dmac) | 180 | nv50_dmac_destroy(struct nv50_dmac *dmac, struct nvif_object *disp) |
142 | { | 181 | { |
182 | nvif_object_fini(&dmac->vram); | ||
183 | nvif_object_fini(&dmac->sync); | ||
184 | |||
185 | nv50_chan_destroy(&dmac->base); | ||
186 | |||
143 | if (dmac->ptr) { | 187 | if (dmac->ptr) { |
144 | struct pci_dev *pdev = nv_device(core)->pdev; | 188 | struct pci_dev *pdev = nvkm_device(nvif_device(disp))->pdev; |
145 | pci_free_consistent(pdev, PAGE_SIZE, dmac->ptr, dmac->handle); | 189 | pci_free_consistent(pdev, PAGE_SIZE, dmac->ptr, dmac->handle); |
146 | } | 190 | } |
147 | |||
148 | nv50_chan_destroy(core, &dmac->base); | ||
149 | } | ||
150 | |||
151 | static int | ||
152 | nv50_dmac_create_fbdma(struct nouveau_object *core, u32 parent) | ||
153 | { | ||
154 | struct nouveau_fb *pfb = nouveau_fb(core); | ||
155 | struct nouveau_object *client = nv_pclass(core, NV_CLIENT_CLASS); | ||
156 | struct nouveau_object *object; | ||
157 | int ret = nouveau_object_new(client, parent, NvEvoVRAM_LP, | ||
158 | NV_DMA_IN_MEMORY_CLASS, | ||
159 | &(struct nv_dma_class) { | ||
160 | .flags = NV_DMA_TARGET_VRAM | | ||
161 | NV_DMA_ACCESS_RDWR, | ||
162 | .start = 0, | ||
163 | .limit = pfb->ram->size - 1, | ||
164 | .conf0 = NV50_DMA_CONF0_ENABLE | | ||
165 | NV50_DMA_CONF0_PART_256, | ||
166 | }, sizeof(struct nv_dma_class), &object); | ||
167 | if (ret) | ||
168 | return ret; | ||
169 | |||
170 | ret = nouveau_object_new(client, parent, NvEvoFB16, | ||
171 | NV_DMA_IN_MEMORY_CLASS, | ||
172 | &(struct nv_dma_class) { | ||
173 | .flags = NV_DMA_TARGET_VRAM | | ||
174 | NV_DMA_ACCESS_RDWR, | ||
175 | .start = 0, | ||
176 | .limit = pfb->ram->size - 1, | ||
177 | .conf0 = NV50_DMA_CONF0_ENABLE | 0x70 | | ||
178 | NV50_DMA_CONF0_PART_256, | ||
179 | }, sizeof(struct nv_dma_class), &object); | ||
180 | if (ret) | ||
181 | return ret; | ||
182 | |||
183 | ret = nouveau_object_new(client, parent, NvEvoFB32, | ||
184 | NV_DMA_IN_MEMORY_CLASS, | ||
185 | &(struct nv_dma_class) { | ||
186 | .flags = NV_DMA_TARGET_VRAM | | ||
187 | NV_DMA_ACCESS_RDWR, | ||
188 | .start = 0, | ||
189 | .limit = pfb->ram->size - 1, | ||
190 | .conf0 = NV50_DMA_CONF0_ENABLE | 0x7a | | ||
191 | NV50_DMA_CONF0_PART_256, | ||
192 | }, sizeof(struct nv_dma_class), &object); | ||
193 | return ret; | ||
194 | } | ||
195 | |||
196 | static int | ||
197 | nvc0_dmac_create_fbdma(struct nouveau_object *core, u32 parent) | ||
198 | { | ||
199 | struct nouveau_fb *pfb = nouveau_fb(core); | ||
200 | struct nouveau_object *client = nv_pclass(core, NV_CLIENT_CLASS); | ||
201 | struct nouveau_object *object; | ||
202 | int ret = nouveau_object_new(client, parent, NvEvoVRAM_LP, | ||
203 | NV_DMA_IN_MEMORY_CLASS, | ||
204 | &(struct nv_dma_class) { | ||
205 | .flags = NV_DMA_TARGET_VRAM | | ||
206 | NV_DMA_ACCESS_RDWR, | ||
207 | .start = 0, | ||
208 | .limit = pfb->ram->size - 1, | ||
209 | .conf0 = NVC0_DMA_CONF0_ENABLE, | ||
210 | }, sizeof(struct nv_dma_class), &object); | ||
211 | if (ret) | ||
212 | return ret; | ||
213 | |||
214 | ret = nouveau_object_new(client, parent, NvEvoFB16, | ||
215 | NV_DMA_IN_MEMORY_CLASS, | ||
216 | &(struct nv_dma_class) { | ||
217 | .flags = NV_DMA_TARGET_VRAM | | ||
218 | NV_DMA_ACCESS_RDWR, | ||
219 | .start = 0, | ||
220 | .limit = pfb->ram->size - 1, | ||
221 | .conf0 = NVC0_DMA_CONF0_ENABLE | 0xfe, | ||
222 | }, sizeof(struct nv_dma_class), &object); | ||
223 | if (ret) | ||
224 | return ret; | ||
225 | |||
226 | ret = nouveau_object_new(client, parent, NvEvoFB32, | ||
227 | NV_DMA_IN_MEMORY_CLASS, | ||
228 | &(struct nv_dma_class) { | ||
229 | .flags = NV_DMA_TARGET_VRAM | | ||
230 | NV_DMA_ACCESS_RDWR, | ||
231 | .start = 0, | ||
232 | .limit = pfb->ram->size - 1, | ||
233 | .conf0 = NVC0_DMA_CONF0_ENABLE | 0xfe, | ||
234 | }, sizeof(struct nv_dma_class), &object); | ||
235 | return ret; | ||
236 | } | ||
237 | |||
238 | static int | ||
239 | nvd0_dmac_create_fbdma(struct nouveau_object *core, u32 parent) | ||
240 | { | ||
241 | struct nouveau_fb *pfb = nouveau_fb(core); | ||
242 | struct nouveau_object *client = nv_pclass(core, NV_CLIENT_CLASS); | ||
243 | struct nouveau_object *object; | ||
244 | int ret = nouveau_object_new(client, parent, NvEvoVRAM_LP, | ||
245 | NV_DMA_IN_MEMORY_CLASS, | ||
246 | &(struct nv_dma_class) { | ||
247 | .flags = NV_DMA_TARGET_VRAM | | ||
248 | NV_DMA_ACCESS_RDWR, | ||
249 | .start = 0, | ||
250 | .limit = pfb->ram->size - 1, | ||
251 | .conf0 = NVD0_DMA_CONF0_ENABLE | | ||
252 | NVD0_DMA_CONF0_PAGE_LP, | ||
253 | }, sizeof(struct nv_dma_class), &object); | ||
254 | if (ret) | ||
255 | return ret; | ||
256 | |||
257 | ret = nouveau_object_new(client, parent, NvEvoFB32, | ||
258 | NV_DMA_IN_MEMORY_CLASS, | ||
259 | &(struct nv_dma_class) { | ||
260 | .flags = NV_DMA_TARGET_VRAM | | ||
261 | NV_DMA_ACCESS_RDWR, | ||
262 | .start = 0, | ||
263 | .limit = pfb->ram->size - 1, | ||
264 | .conf0 = NVD0_DMA_CONF0_ENABLE | 0xfe | | ||
265 | NVD0_DMA_CONF0_PAGE_LP, | ||
266 | }, sizeof(struct nv_dma_class), &object); | ||
267 | return ret; | ||
268 | } | 191 | } |
269 | 192 | ||
270 | static int | 193 | static int |
271 | nv50_dmac_create(struct nouveau_object *core, u32 bclass, u8 head, | 194 | nv50_dmac_create(struct nvif_object *disp, const u32 *oclass, u8 head, |
272 | void *data, u32 size, u64 syncbuf, | 195 | void *data, u32 size, u64 syncbuf, |
273 | struct nv50_dmac *dmac) | 196 | struct nv50_dmac *dmac) |
274 | { | 197 | { |
275 | struct nouveau_fb *pfb = nouveau_fb(core); | 198 | struct nvif_device *device = nvif_device(disp); |
276 | struct nouveau_object *client = nv_pclass(core, NV_CLIENT_CLASS); | 199 | struct nv50_disp_core_channel_dma_v0 *args = data; |
277 | struct nouveau_object *object; | 200 | struct nvif_object pushbuf; |
278 | u32 pushbuf = *(u32 *)data; | ||
279 | int ret; | 201 | int ret; |
280 | 202 | ||
281 | mutex_init(&dmac->lock); | 203 | mutex_init(&dmac->lock); |
282 | 204 | ||
283 | dmac->ptr = pci_alloc_consistent(nv_device(core)->pdev, PAGE_SIZE, | 205 | dmac->ptr = pci_alloc_consistent(nvkm_device(device)->pdev, |
284 | &dmac->handle); | 206 | PAGE_SIZE, &dmac->handle); |
285 | if (!dmac->ptr) | 207 | if (!dmac->ptr) |
286 | return -ENOMEM; | 208 | return -ENOMEM; |
287 | 209 | ||
288 | ret = nouveau_object_new(client, NVDRM_DEVICE, pushbuf, | 210 | ret = nvif_object_init(nvif_object(device), NULL, |
289 | NV_DMA_FROM_MEMORY_CLASS, | 211 | args->pushbuf, NV_DMA_FROM_MEMORY, |
290 | &(struct nv_dma_class) { | 212 | &(struct nv_dma_v0) { |
291 | .flags = NV_DMA_TARGET_PCI_US | | 213 | .target = NV_DMA_V0_TARGET_PCI_US, |
292 | NV_DMA_ACCESS_RD, | 214 | .access = NV_DMA_V0_ACCESS_RD, |
293 | .start = dmac->handle + 0x0000, | 215 | .start = dmac->handle + 0x0000, |
294 | .limit = dmac->handle + 0x0fff, | 216 | .limit = dmac->handle + 0x0fff, |
295 | }, sizeof(struct nv_dma_class), &object); | 217 | }, sizeof(struct nv_dma_v0), &pushbuf); |
296 | if (ret) | 218 | if (ret) |
297 | return ret; | 219 | return ret; |
298 | 220 | ||
299 | ret = nv50_chan_create(core, bclass, head, data, size, &dmac->base); | 221 | ret = nv50_chan_create(disp, oclass, head, data, size, &dmac->base); |
222 | nvif_object_fini(&pushbuf); | ||
300 | if (ret) | 223 | if (ret) |
301 | return ret; | 224 | return ret; |
302 | 225 | ||
303 | ret = nouveau_object_new(client, dmac->base.handle, NvEvoSync, | 226 | ret = nvif_object_init(&dmac->base.user, NULL, 0xf0000000, |
304 | NV_DMA_IN_MEMORY_CLASS, | 227 | NV_DMA_IN_MEMORY, |
305 | &(struct nv_dma_class) { | 228 | &(struct nv_dma_v0) { |
306 | .flags = NV_DMA_TARGET_VRAM | | 229 | .target = NV_DMA_V0_TARGET_VRAM, |
307 | NV_DMA_ACCESS_RDWR, | 230 | .access = NV_DMA_V0_ACCESS_RDWR, |
308 | .start = syncbuf + 0x0000, | 231 | .start = syncbuf + 0x0000, |
309 | .limit = syncbuf + 0x0fff, | 232 | .limit = syncbuf + 0x0fff, |
310 | }, sizeof(struct nv_dma_class), &object); | 233 | }, sizeof(struct nv_dma_v0), |
234 | &dmac->sync); | ||
311 | if (ret) | 235 | if (ret) |
312 | return ret; | 236 | return ret; |
313 | 237 | ||
314 | ret = nouveau_object_new(client, dmac->base.handle, NvEvoVRAM, | 238 | ret = nvif_object_init(&dmac->base.user, NULL, 0xf0000001, |
315 | NV_DMA_IN_MEMORY_CLASS, | 239 | NV_DMA_IN_MEMORY, |
316 | &(struct nv_dma_class) { | 240 | &(struct nv_dma_v0) { |
317 | .flags = NV_DMA_TARGET_VRAM | | 241 | .target = NV_DMA_V0_TARGET_VRAM, |
318 | NV_DMA_ACCESS_RDWR, | 242 | .access = NV_DMA_V0_ACCESS_RDWR, |
319 | .start = 0, | 243 | .start = 0, |
320 | .limit = pfb->ram->size - 1, | 244 | .limit = device->info.ram_user - 1, |
321 | }, sizeof(struct nv_dma_class), &object); | 245 | }, sizeof(struct nv_dma_v0), |
246 | &dmac->vram); | ||
322 | if (ret) | 247 | if (ret) |
323 | return ret; | 248 | return ret; |
324 | 249 | ||
325 | if (nv_device(core)->card_type < NV_C0) | ||
326 | ret = nv50_dmac_create_fbdma(core, dmac->base.handle); | ||
327 | else | ||
328 | if (nv_device(core)->card_type < NV_D0) | ||
329 | ret = nvc0_dmac_create_fbdma(core, dmac->base.handle); | ||
330 | else | ||
331 | ret = nvd0_dmac_create_fbdma(core, dmac->base.handle); | ||
332 | return ret; | 250 | return ret; |
333 | } | 251 | } |
334 | 252 | ||
253 | /****************************************************************************** | ||
254 | * Core | ||
255 | *****************************************************************************/ | ||
256 | |||
335 | struct nv50_mast { | 257 | struct nv50_mast { |
336 | struct nv50_dmac base; | 258 | struct nv50_dmac base; |
337 | }; | 259 | }; |
338 | 260 | ||
339 | struct nv50_curs { | 261 | static int |
340 | struct nv50_pioc base; | 262 | nv50_core_create(struct nvif_object *disp, u64 syncbuf, struct nv50_mast *core) |
341 | }; | 263 | { |
264 | struct nv50_disp_core_channel_dma_v0 args = { | ||
265 | .pushbuf = 0xb0007d00, | ||
266 | }; | ||
267 | static const u32 oclass[] = { | ||
268 | GM107_DISP_CORE_CHANNEL_DMA, | ||
269 | GK110_DISP_CORE_CHANNEL_DMA, | ||
270 | GK104_DISP_CORE_CHANNEL_DMA, | ||
271 | GF110_DISP_CORE_CHANNEL_DMA, | ||
272 | GT214_DISP_CORE_CHANNEL_DMA, | ||
273 | GT206_DISP_CORE_CHANNEL_DMA, | ||
274 | GT200_DISP_CORE_CHANNEL_DMA, | ||
275 | G82_DISP_CORE_CHANNEL_DMA, | ||
276 | NV50_DISP_CORE_CHANNEL_DMA, | ||
277 | 0 | ||
278 | }; | ||
279 | |||
280 | return nv50_dmac_create(disp, oclass, 0, &args, sizeof(args), syncbuf, | ||
281 | &core->base); | ||
282 | } | ||
283 | |||
284 | /****************************************************************************** | ||
285 | * Base | ||
286 | *****************************************************************************/ | ||
342 | 287 | ||
343 | struct nv50_sync { | 288 | struct nv50_sync { |
344 | struct nv50_dmac base; | 289 | struct nv50_dmac base; |
@@ -346,13 +291,58 @@ struct nv50_sync { | |||
346 | u32 data; | 291 | u32 data; |
347 | }; | 292 | }; |
348 | 293 | ||
294 | static int | ||
295 | nv50_base_create(struct nvif_object *disp, int head, u64 syncbuf, | ||
296 | struct nv50_sync *base) | ||
297 | { | ||
298 | struct nv50_disp_base_channel_dma_v0 args = { | ||
299 | .pushbuf = 0xb0007c00 | head, | ||
300 | .head = head, | ||
301 | }; | ||
302 | static const u32 oclass[] = { | ||
303 | GK110_DISP_BASE_CHANNEL_DMA, | ||
304 | GK104_DISP_BASE_CHANNEL_DMA, | ||
305 | GF110_DISP_BASE_CHANNEL_DMA, | ||
306 | GT214_DISP_BASE_CHANNEL_DMA, | ||
307 | GT200_DISP_BASE_CHANNEL_DMA, | ||
308 | G82_DISP_BASE_CHANNEL_DMA, | ||
309 | NV50_DISP_BASE_CHANNEL_DMA, | ||
310 | 0 | ||
311 | }; | ||
312 | |||
313 | return nv50_dmac_create(disp, oclass, head, &args, sizeof(args), | ||
314 | syncbuf, &base->base); | ||
315 | } | ||
316 | |||
317 | /****************************************************************************** | ||
318 | * Overlay | ||
319 | *****************************************************************************/ | ||
320 | |||
349 | struct nv50_ovly { | 321 | struct nv50_ovly { |
350 | struct nv50_dmac base; | 322 | struct nv50_dmac base; |
351 | }; | 323 | }; |
352 | 324 | ||
353 | struct nv50_oimm { | 325 | static int |
354 | struct nv50_pioc base; | 326 | nv50_ovly_create(struct nvif_object *disp, int head, u64 syncbuf, |
355 | }; | 327 | struct nv50_ovly *ovly) |
328 | { | ||
329 | struct nv50_disp_overlay_channel_dma_v0 args = { | ||
330 | .pushbuf = 0xb0007e00 | head, | ||
331 | .head = head, | ||
332 | }; | ||
333 | static const u32 oclass[] = { | ||
334 | GK104_DISP_OVERLAY_CONTROL_DMA, | ||
335 | GF110_DISP_OVERLAY_CONTROL_DMA, | ||
336 | GT214_DISP_OVERLAY_CHANNEL_DMA, | ||
337 | GT200_DISP_OVERLAY_CHANNEL_DMA, | ||
338 | G82_DISP_OVERLAY_CHANNEL_DMA, | ||
339 | NV50_DISP_OVERLAY_CHANNEL_DMA, | ||
340 | 0 | ||
341 | }; | ||
342 | |||
343 | return nv50_dmac_create(disp, oclass, head, &args, sizeof(args), | ||
344 | syncbuf, &ovly->base); | ||
345 | } | ||
356 | 346 | ||
357 | struct nv50_head { | 347 | struct nv50_head { |
358 | struct nouveau_crtc base; | 348 | struct nouveau_crtc base; |
@@ -369,13 +359,19 @@ struct nv50_head { | |||
369 | #define nv50_ovly(c) (&nv50_head(c)->ovly) | 359 | #define nv50_ovly(c) (&nv50_head(c)->ovly) |
370 | #define nv50_oimm(c) (&nv50_head(c)->oimm) | 360 | #define nv50_oimm(c) (&nv50_head(c)->oimm) |
371 | #define nv50_chan(c) (&(c)->base.base) | 361 | #define nv50_chan(c) (&(c)->base.base) |
372 | #define nv50_vers(c) nv_mclass(nv50_chan(c)->user) | 362 | #define nv50_vers(c) nv50_chan(c)->user.oclass |
363 | |||
364 | struct nv50_fbdma { | ||
365 | struct list_head head; | ||
366 | struct nvif_object core; | ||
367 | struct nvif_object base[4]; | ||
368 | }; | ||
373 | 369 | ||
374 | struct nv50_disp { | 370 | struct nv50_disp { |
375 | struct nouveau_object *core; | 371 | struct nvif_object *disp; |
376 | struct nv50_mast mast; | 372 | struct nv50_mast mast; |
377 | 373 | ||
378 | u32 modeset; | 374 | struct list_head fbdma; |
379 | 375 | ||
380 | struct nouveau_bo *sync; | 376 | struct nouveau_bo *sync; |
381 | }; | 377 | }; |
@@ -401,16 +397,16 @@ static u32 * | |||
401 | evo_wait(void *evoc, int nr) | 397 | evo_wait(void *evoc, int nr) |
402 | { | 398 | { |
403 | struct nv50_dmac *dmac = evoc; | 399 | struct nv50_dmac *dmac = evoc; |
404 | u32 put = nv_ro32(dmac->base.user, 0x0000) / 4; | 400 | u32 put = nvif_rd32(&dmac->base.user, 0x0000) / 4; |
405 | 401 | ||
406 | mutex_lock(&dmac->lock); | 402 | mutex_lock(&dmac->lock); |
407 | if (put + nr >= (PAGE_SIZE / 4) - 8) { | 403 | if (put + nr >= (PAGE_SIZE / 4) - 8) { |
408 | dmac->ptr[put] = 0x20000000; | 404 | dmac->ptr[put] = 0x20000000; |
409 | 405 | ||
410 | nv_wo32(dmac->base.user, 0x0000, 0x00000000); | 406 | nvif_wr32(&dmac->base.user, 0x0000, 0x00000000); |
411 | if (!nv_wait(dmac->base.user, 0x0004, ~0, 0x00000000)) { | 407 | if (!nvkm_wait(&dmac->base.user, 0x0004, ~0, 0x00000000)) { |
412 | mutex_unlock(&dmac->lock); | 408 | mutex_unlock(&dmac->lock); |
413 | NV_ERROR(dmac->base.user, "channel stalled\n"); | 409 | nv_error(nvkm_object(&dmac->base.user), "channel stalled\n"); |
414 | return NULL; | 410 | return NULL; |
415 | } | 411 | } |
416 | 412 | ||
@@ -424,7 +420,7 @@ static void | |||
424 | evo_kick(u32 *push, void *evoc) | 420 | evo_kick(u32 *push, void *evoc) |
425 | { | 421 | { |
426 | struct nv50_dmac *dmac = evoc; | 422 | struct nv50_dmac *dmac = evoc; |
427 | nv_wo32(dmac->base.user, 0x0000, (push - dmac->ptr) << 2); | 423 | nvif_wr32(&dmac->base.user, 0x0000, (push - dmac->ptr) << 2); |
428 | mutex_unlock(&dmac->lock); | 424 | mutex_unlock(&dmac->lock); |
429 | } | 425 | } |
430 | 426 | ||
@@ -443,7 +439,7 @@ evo_sync_wait(void *data) | |||
443 | static int | 439 | static int |
444 | evo_sync(struct drm_device *dev) | 440 | evo_sync(struct drm_device *dev) |
445 | { | 441 | { |
446 | struct nouveau_device *device = nouveau_dev(dev); | 442 | struct nvif_device *device = &nouveau_drm(dev)->device; |
447 | struct nv50_disp *disp = nv50_disp(dev); | 443 | struct nv50_disp *disp = nv50_disp(dev); |
448 | struct nv50_mast *mast = nv50_mast(dev); | 444 | struct nv50_mast *mast = nv50_mast(dev); |
449 | u32 *push = evo_wait(mast, 8); | 445 | u32 *push = evo_wait(mast, 8); |
@@ -455,7 +451,7 @@ evo_sync(struct drm_device *dev) | |||
455 | evo_data(push, 0x00000000); | 451 | evo_data(push, 0x00000000); |
456 | evo_data(push, 0x00000000); | 452 | evo_data(push, 0x00000000); |
457 | evo_kick(push, mast); | 453 | evo_kick(push, mast); |
458 | if (nv_wait_cb(device, evo_sync_wait, disp->sync)) | 454 | if (nv_wait_cb(nvkm_device(device), evo_sync_wait, disp->sync)) |
459 | return 0; | 455 | return 0; |
460 | } | 456 | } |
461 | 457 | ||
@@ -490,7 +486,7 @@ nv50_display_flip_wait(void *data) | |||
490 | void | 486 | void |
491 | nv50_display_flip_stop(struct drm_crtc *crtc) | 487 | nv50_display_flip_stop(struct drm_crtc *crtc) |
492 | { | 488 | { |
493 | struct nouveau_device *device = nouveau_dev(crtc->dev); | 489 | struct nvif_device *device = &nouveau_drm(crtc->dev)->device; |
494 | struct nv50_display_flip flip = { | 490 | struct nv50_display_flip flip = { |
495 | .disp = nv50_disp(crtc->dev), | 491 | .disp = nv50_disp(crtc->dev), |
496 | .chan = nv50_sync(crtc), | 492 | .chan = nv50_sync(crtc), |
@@ -510,7 +506,7 @@ nv50_display_flip_stop(struct drm_crtc *crtc) | |||
510 | evo_kick(push, flip.chan); | 506 | evo_kick(push, flip.chan); |
511 | } | 507 | } |
512 | 508 | ||
513 | nv_wait_cb(device, nv50_display_flip_wait, &flip); | 509 | nv_wait_cb(nvkm_device(device), nv50_display_flip_wait, &flip); |
514 | } | 510 | } |
515 | 511 | ||
516 | int | 512 | int |
@@ -534,7 +530,7 @@ nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb, | |||
534 | if (unlikely(push == NULL)) | 530 | if (unlikely(push == NULL)) |
535 | return -EBUSY; | 531 | return -EBUSY; |
536 | 532 | ||
537 | if (chan && nv_mclass(chan->object) < NV84_CHANNEL_IND_CLASS) { | 533 | if (chan && chan->object->oclass < G82_CHANNEL_GPFIFO) { |
538 | ret = RING_SPACE(chan, 8); | 534 | ret = RING_SPACE(chan, 8); |
539 | if (ret) | 535 | if (ret) |
540 | return ret; | 536 | return ret; |
@@ -548,14 +544,14 @@ nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb, | |||
548 | OUT_RING (chan, sync->addr); | 544 | OUT_RING (chan, sync->addr); |
549 | OUT_RING (chan, sync->data); | 545 | OUT_RING (chan, sync->data); |
550 | } else | 546 | } else |
551 | if (chan && nv_mclass(chan->object) < NVC0_CHANNEL_IND_CLASS) { | 547 | if (chan && chan->object->oclass < FERMI_CHANNEL_GPFIFO) { |
552 | u64 addr = nv84_fence_crtc(chan, nv_crtc->index) + sync->addr; | 548 | u64 addr = nv84_fence_crtc(chan, nv_crtc->index) + sync->addr; |
553 | ret = RING_SPACE(chan, 12); | 549 | ret = RING_SPACE(chan, 12); |
554 | if (ret) | 550 | if (ret) |
555 | return ret; | 551 | return ret; |
556 | 552 | ||
557 | BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 1); | 553 | BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 1); |
558 | OUT_RING (chan, chan->vram); | 554 | OUT_RING (chan, chan->vram.handle); |
559 | BEGIN_NV04(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4); | 555 | BEGIN_NV04(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4); |
560 | OUT_RING (chan, upper_32_bits(addr ^ 0x10)); | 556 | OUT_RING (chan, upper_32_bits(addr ^ 0x10)); |
561 | OUT_RING (chan, lower_32_bits(addr ^ 0x10)); | 557 | OUT_RING (chan, lower_32_bits(addr ^ 0x10)); |
@@ -606,16 +602,16 @@ nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb, | |||
606 | evo_data(push, sync->addr); | 602 | evo_data(push, sync->addr); |
607 | evo_data(push, sync->data++); | 603 | evo_data(push, sync->data++); |
608 | evo_data(push, sync->data); | 604 | evo_data(push, sync->data); |
609 | evo_data(push, NvEvoSync); | 605 | evo_data(push, sync->base.sync.handle); |
610 | evo_mthd(push, 0x00a0, 2); | 606 | evo_mthd(push, 0x00a0, 2); |
611 | evo_data(push, 0x00000000); | 607 | evo_data(push, 0x00000000); |
612 | evo_data(push, 0x00000000); | 608 | evo_data(push, 0x00000000); |
613 | evo_mthd(push, 0x00c0, 1); | 609 | evo_mthd(push, 0x00c0, 1); |
614 | evo_data(push, nv_fb->r_dma); | 610 | evo_data(push, nv_fb->r_handle); |
615 | evo_mthd(push, 0x0110, 2); | 611 | evo_mthd(push, 0x0110, 2); |
616 | evo_data(push, 0x00000000); | 612 | evo_data(push, 0x00000000); |
617 | evo_data(push, 0x00000000); | 613 | evo_data(push, 0x00000000); |
618 | if (nv50_vers(sync) < NVD0_DISP_SYNC_CLASS) { | 614 | if (nv50_vers(sync) < GF110_DISP_BASE_CHANNEL_DMA) { |
619 | evo_mthd(push, 0x0800, 5); | 615 | evo_mthd(push, 0x0800, 5); |
620 | evo_data(push, nv_fb->nvbo->bo.offset >> 8); | 616 | evo_data(push, nv_fb->nvbo->bo.offset >> 8); |
621 | evo_data(push, 0); | 617 | evo_data(push, 0); |
@@ -667,11 +663,11 @@ nv50_crtc_set_dither(struct nouveau_crtc *nv_crtc, bool update) | |||
667 | 663 | ||
668 | push = evo_wait(mast, 4); | 664 | push = evo_wait(mast, 4); |
669 | if (push) { | 665 | if (push) { |
670 | if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) { | 666 | if (nv50_vers(mast) < GF110_DISP_CORE_CHANNEL_DMA) { |
671 | evo_mthd(push, 0x08a0 + (nv_crtc->index * 0x0400), 1); | 667 | evo_mthd(push, 0x08a0 + (nv_crtc->index * 0x0400), 1); |
672 | evo_data(push, mode); | 668 | evo_data(push, mode); |
673 | } else | 669 | } else |
674 | if (nv50_vers(mast) < NVE0_DISP_MAST_CLASS) { | 670 | if (nv50_vers(mast) < GK104_DISP_CORE_CHANNEL_DMA) { |
675 | evo_mthd(push, 0x0490 + (nv_crtc->index * 0x0300), 1); | 671 | evo_mthd(push, 0x0490 + (nv_crtc->index * 0x0300), 1); |
676 | evo_data(push, mode); | 672 | evo_data(push, mode); |
677 | } else { | 673 | } else { |
@@ -762,7 +758,7 @@ nv50_crtc_set_scale(struct nouveau_crtc *nv_crtc, bool update) | |||
762 | 758 | ||
763 | push = evo_wait(mast, 8); | 759 | push = evo_wait(mast, 8); |
764 | if (push) { | 760 | if (push) { |
765 | if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) { | 761 | if (nv50_vers(mast) < GF110_DISP_CORE_CHANNEL_DMA) { |
766 | /*XXX: SCALE_CTRL_ACTIVE??? */ | 762 | /*XXX: SCALE_CTRL_ACTIVE??? */ |
767 | evo_mthd(push, 0x08d8 + (nv_crtc->index * 0x400), 2); | 763 | evo_mthd(push, 0x08d8 + (nv_crtc->index * 0x400), 2); |
768 | evo_data(push, (oY << 16) | oX); | 764 | evo_data(push, (oY << 16) | oX); |
@@ -807,7 +803,7 @@ nv50_crtc_set_color_vibrance(struct nouveau_crtc *nv_crtc, bool update) | |||
807 | 803 | ||
808 | push = evo_wait(mast, 16); | 804 | push = evo_wait(mast, 16); |
809 | if (push) { | 805 | if (push) { |
810 | if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) { | 806 | if (nv50_vers(mast) < GF110_DISP_CORE_CHANNEL_DMA) { |
811 | evo_mthd(push, 0x08a8 + (nv_crtc->index * 0x400), 1); | 807 | evo_mthd(push, 0x08a8 + (nv_crtc->index * 0x400), 1); |
812 | evo_data(push, (hue << 20) | (vib << 8)); | 808 | evo_data(push, (hue << 20) | (vib << 8)); |
813 | } else { | 809 | } else { |
@@ -835,7 +831,7 @@ nv50_crtc_set_image(struct nouveau_crtc *nv_crtc, struct drm_framebuffer *fb, | |||
835 | 831 | ||
836 | push = evo_wait(mast, 16); | 832 | push = evo_wait(mast, 16); |
837 | if (push) { | 833 | if (push) { |
838 | if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) { | 834 | if (nv50_vers(mast) < GF110_DISP_CORE_CHANNEL_DMA) { |
839 | evo_mthd(push, 0x0860 + (nv_crtc->index * 0x400), 1); | 835 | evo_mthd(push, 0x0860 + (nv_crtc->index * 0x400), 1); |
840 | evo_data(push, nvfb->nvbo->bo.offset >> 8); | 836 | evo_data(push, nvfb->nvbo->bo.offset >> 8); |
841 | evo_mthd(push, 0x0868 + (nv_crtc->index * 0x400), 3); | 837 | evo_mthd(push, 0x0868 + (nv_crtc->index * 0x400), 3); |
@@ -844,9 +840,9 @@ nv50_crtc_set_image(struct nouveau_crtc *nv_crtc, struct drm_framebuffer *fb, | |||
844 | evo_data(push, nvfb->r_format); | 840 | evo_data(push, nvfb->r_format); |
845 | evo_mthd(push, 0x08c0 + (nv_crtc->index * 0x400), 1); | 841 | evo_mthd(push, 0x08c0 + (nv_crtc->index * 0x400), 1); |
846 | evo_data(push, (y << 16) | x); | 842 | evo_data(push, (y << 16) | x); |
847 | if (nv50_vers(mast) > NV50_DISP_MAST_CLASS) { | 843 | if (nv50_vers(mast) > NV50_DISP_CORE_CHANNEL_DMA) { |
848 | evo_mthd(push, 0x0874 + (nv_crtc->index * 0x400), 1); | 844 | evo_mthd(push, 0x0874 + (nv_crtc->index * 0x400), 1); |
849 | evo_data(push, nvfb->r_dma); | 845 | evo_data(push, nvfb->r_handle); |
850 | } | 846 | } |
851 | } else { | 847 | } else { |
852 | evo_mthd(push, 0x0460 + (nv_crtc->index * 0x300), 1); | 848 | evo_mthd(push, 0x0460 + (nv_crtc->index * 0x300), 1); |
@@ -855,7 +851,7 @@ nv50_crtc_set_image(struct nouveau_crtc *nv_crtc, struct drm_framebuffer *fb, | |||
855 | evo_data(push, (fb->height << 16) | fb->width); | 851 | evo_data(push, (fb->height << 16) | fb->width); |
856 | evo_data(push, nvfb->r_pitch); | 852 | evo_data(push, nvfb->r_pitch); |
857 | evo_data(push, nvfb->r_format); | 853 | evo_data(push, nvfb->r_format); |
858 | evo_data(push, nvfb->r_dma); | 854 | evo_data(push, nvfb->r_handle); |
859 | evo_mthd(push, 0x04b0 + (nv_crtc->index * 0x300), 1); | 855 | evo_mthd(push, 0x04b0 + (nv_crtc->index * 0x300), 1); |
860 | evo_data(push, (y << 16) | x); | 856 | evo_data(push, (y << 16) | x); |
861 | } | 857 | } |
@@ -867,7 +863,7 @@ nv50_crtc_set_image(struct nouveau_crtc *nv_crtc, struct drm_framebuffer *fb, | |||
867 | evo_kick(push, mast); | 863 | evo_kick(push, mast); |
868 | } | 864 | } |
869 | 865 | ||
870 | nv_crtc->fb.tile_flags = nvfb->r_dma; | 866 | nv_crtc->fb.handle = nvfb->r_handle; |
871 | return 0; | 867 | return 0; |
872 | } | 868 | } |
873 | 869 | ||
@@ -877,23 +873,23 @@ nv50_crtc_cursor_show(struct nouveau_crtc *nv_crtc) | |||
877 | struct nv50_mast *mast = nv50_mast(nv_crtc->base.dev); | 873 | struct nv50_mast *mast = nv50_mast(nv_crtc->base.dev); |
878 | u32 *push = evo_wait(mast, 16); | 874 | u32 *push = evo_wait(mast, 16); |
879 | if (push) { | 875 | if (push) { |
880 | if (nv50_vers(mast) < NV84_DISP_MAST_CLASS) { | 876 | if (nv50_vers(mast) < G82_DISP_CORE_CHANNEL_DMA) { |
881 | evo_mthd(push, 0x0880 + (nv_crtc->index * 0x400), 2); | 877 | evo_mthd(push, 0x0880 + (nv_crtc->index * 0x400), 2); |
882 | evo_data(push, 0x85000000); | 878 | evo_data(push, 0x85000000); |
883 | evo_data(push, nv_crtc->cursor.nvbo->bo.offset >> 8); | 879 | evo_data(push, nv_crtc->cursor.nvbo->bo.offset >> 8); |
884 | } else | 880 | } else |
885 | if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) { | 881 | if (nv50_vers(mast) < GF110_DISP_CORE_CHANNEL_DMA) { |
886 | evo_mthd(push, 0x0880 + (nv_crtc->index * 0x400), 2); | 882 | evo_mthd(push, 0x0880 + (nv_crtc->index * 0x400), 2); |
887 | evo_data(push, 0x85000000); | 883 | evo_data(push, 0x85000000); |
888 | evo_data(push, nv_crtc->cursor.nvbo->bo.offset >> 8); | 884 | evo_data(push, nv_crtc->cursor.nvbo->bo.offset >> 8); |
889 | evo_mthd(push, 0x089c + (nv_crtc->index * 0x400), 1); | 885 | evo_mthd(push, 0x089c + (nv_crtc->index * 0x400), 1); |
890 | evo_data(push, NvEvoVRAM); | 886 | evo_data(push, mast->base.vram.handle); |
891 | } else { | 887 | } else { |
892 | evo_mthd(push, 0x0480 + (nv_crtc->index * 0x300), 2); | 888 | evo_mthd(push, 0x0480 + (nv_crtc->index * 0x300), 2); |
893 | evo_data(push, 0x85000000); | 889 | evo_data(push, 0x85000000); |
894 | evo_data(push, nv_crtc->cursor.nvbo->bo.offset >> 8); | 890 | evo_data(push, nv_crtc->cursor.nvbo->bo.offset >> 8); |
895 | evo_mthd(push, 0x048c + (nv_crtc->index * 0x300), 1); | 891 | evo_mthd(push, 0x048c + (nv_crtc->index * 0x300), 1); |
896 | evo_data(push, NvEvoVRAM); | 892 | evo_data(push, mast->base.vram.handle); |
897 | } | 893 | } |
898 | evo_kick(push, mast); | 894 | evo_kick(push, mast); |
899 | } | 895 | } |
@@ -905,11 +901,11 @@ nv50_crtc_cursor_hide(struct nouveau_crtc *nv_crtc) | |||
905 | struct nv50_mast *mast = nv50_mast(nv_crtc->base.dev); | 901 | struct nv50_mast *mast = nv50_mast(nv_crtc->base.dev); |
906 | u32 *push = evo_wait(mast, 16); | 902 | u32 *push = evo_wait(mast, 16); |
907 | if (push) { | 903 | if (push) { |
908 | if (nv50_vers(mast) < NV84_DISP_MAST_CLASS) { | 904 | if (nv50_vers(mast) < G82_DISP_CORE_CHANNEL_DMA) { |
909 | evo_mthd(push, 0x0880 + (nv_crtc->index * 0x400), 1); | 905 | evo_mthd(push, 0x0880 + (nv_crtc->index * 0x400), 1); |
910 | evo_data(push, 0x05000000); | 906 | evo_data(push, 0x05000000); |
911 | } else | 907 | } else |
912 | if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) { | 908 | if (nv50_vers(mast) < GF110_DISP_CORE_CHANNEL_DMA) { |
913 | evo_mthd(push, 0x0880 + (nv_crtc->index * 0x400), 1); | 909 | evo_mthd(push, 0x0880 + (nv_crtc->index * 0x400), 1); |
914 | evo_data(push, 0x05000000); | 910 | evo_data(push, 0x05000000); |
915 | evo_mthd(push, 0x089c + (nv_crtc->index * 0x400), 1); | 911 | evo_mthd(push, 0x089c + (nv_crtc->index * 0x400), 1); |
@@ -960,13 +956,13 @@ nv50_crtc_prepare(struct drm_crtc *crtc) | |||
960 | 956 | ||
961 | push = evo_wait(mast, 6); | 957 | push = evo_wait(mast, 6); |
962 | if (push) { | 958 | if (push) { |
963 | if (nv50_vers(mast) < NV84_DISP_MAST_CLASS) { | 959 | if (nv50_vers(mast) < G82_DISP_CORE_CHANNEL_DMA) { |
964 | evo_mthd(push, 0x0874 + (nv_crtc->index * 0x400), 1); | 960 | evo_mthd(push, 0x0874 + (nv_crtc->index * 0x400), 1); |
965 | evo_data(push, 0x00000000); | 961 | evo_data(push, 0x00000000); |
966 | evo_mthd(push, 0x0840 + (nv_crtc->index * 0x400), 1); | 962 | evo_mthd(push, 0x0840 + (nv_crtc->index * 0x400), 1); |
967 | evo_data(push, 0x40000000); | 963 | evo_data(push, 0x40000000); |
968 | } else | 964 | } else |
969 | if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) { | 965 | if (nv50_vers(mast) < GF110_DISP_CORE_CHANNEL_DMA) { |
970 | evo_mthd(push, 0x0874 + (nv_crtc->index * 0x400), 1); | 966 | evo_mthd(push, 0x0874 + (nv_crtc->index * 0x400), 1); |
971 | evo_data(push, 0x00000000); | 967 | evo_data(push, 0x00000000); |
972 | evo_mthd(push, 0x0840 + (nv_crtc->index * 0x400), 1); | 968 | evo_mthd(push, 0x0840 + (nv_crtc->index * 0x400), 1); |
@@ -997,31 +993,31 @@ nv50_crtc_commit(struct drm_crtc *crtc) | |||
997 | 993 | ||
998 | push = evo_wait(mast, 32); | 994 | push = evo_wait(mast, 32); |
999 | if (push) { | 995 | if (push) { |
1000 | if (nv50_vers(mast) < NV84_DISP_MAST_CLASS) { | 996 | if (nv50_vers(mast) < G82_DISP_CORE_CHANNEL_DMA) { |
1001 | evo_mthd(push, 0x0874 + (nv_crtc->index * 0x400), 1); | 997 | evo_mthd(push, 0x0874 + (nv_crtc->index * 0x400), 1); |
1002 | evo_data(push, NvEvoVRAM_LP); | 998 | evo_data(push, nv_crtc->fb.handle); |
1003 | evo_mthd(push, 0x0840 + (nv_crtc->index * 0x400), 2); | 999 | evo_mthd(push, 0x0840 + (nv_crtc->index * 0x400), 2); |
1004 | evo_data(push, 0xc0000000); | 1000 | evo_data(push, 0xc0000000); |
1005 | evo_data(push, nv_crtc->lut.nvbo->bo.offset >> 8); | 1001 | evo_data(push, nv_crtc->lut.nvbo->bo.offset >> 8); |
1006 | } else | 1002 | } else |
1007 | if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) { | 1003 | if (nv50_vers(mast) < GF110_DISP_CORE_CHANNEL_DMA) { |
1008 | evo_mthd(push, 0x0874 + (nv_crtc->index * 0x400), 1); | 1004 | evo_mthd(push, 0x0874 + (nv_crtc->index * 0x400), 1); |
1009 | evo_data(push, nv_crtc->fb.tile_flags); | 1005 | evo_data(push, nv_crtc->fb.handle); |
1010 | evo_mthd(push, 0x0840 + (nv_crtc->index * 0x400), 2); | 1006 | evo_mthd(push, 0x0840 + (nv_crtc->index * 0x400), 2); |
1011 | evo_data(push, 0xc0000000); | 1007 | evo_data(push, 0xc0000000); |
1012 | evo_data(push, nv_crtc->lut.nvbo->bo.offset >> 8); | 1008 | evo_data(push, nv_crtc->lut.nvbo->bo.offset >> 8); |
1013 | evo_mthd(push, 0x085c + (nv_crtc->index * 0x400), 1); | 1009 | evo_mthd(push, 0x085c + (nv_crtc->index * 0x400), 1); |
1014 | evo_data(push, NvEvoVRAM); | 1010 | evo_data(push, mast->base.vram.handle); |
1015 | } else { | 1011 | } else { |
1016 | evo_mthd(push, 0x0474 + (nv_crtc->index * 0x300), 1); | 1012 | evo_mthd(push, 0x0474 + (nv_crtc->index * 0x300), 1); |
1017 | evo_data(push, nv_crtc->fb.tile_flags); | 1013 | evo_data(push, nv_crtc->fb.handle); |
1018 | evo_mthd(push, 0x0440 + (nv_crtc->index * 0x300), 4); | 1014 | evo_mthd(push, 0x0440 + (nv_crtc->index * 0x300), 4); |
1019 | evo_data(push, 0x83000000); | 1015 | evo_data(push, 0x83000000); |
1020 | evo_data(push, nv_crtc->lut.nvbo->bo.offset >> 8); | 1016 | evo_data(push, nv_crtc->lut.nvbo->bo.offset >> 8); |
1021 | evo_data(push, 0x00000000); | 1017 | evo_data(push, 0x00000000); |
1022 | evo_data(push, 0x00000000); | 1018 | evo_data(push, 0x00000000); |
1023 | evo_mthd(push, 0x045c + (nv_crtc->index * 0x300), 1); | 1019 | evo_mthd(push, 0x045c + (nv_crtc->index * 0x300), 1); |
1024 | evo_data(push, NvEvoVRAM); | 1020 | evo_data(push, mast->base.vram.handle); |
1025 | evo_mthd(push, 0x0430 + (nv_crtc->index * 0x300), 1); | 1021 | evo_mthd(push, 0x0430 + (nv_crtc->index * 0x300), 1); |
1026 | evo_data(push, 0xffffff00); | 1022 | evo_data(push, 0xffffff00); |
1027 | } | 1023 | } |
@@ -1099,7 +1095,7 @@ nv50_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *umode, | |||
1099 | 1095 | ||
1100 | push = evo_wait(mast, 64); | 1096 | push = evo_wait(mast, 64); |
1101 | if (push) { | 1097 | if (push) { |
1102 | if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) { | 1098 | if (nv50_vers(mast) < GF110_DISP_CORE_CHANNEL_DMA) { |
1103 | evo_mthd(push, 0x0804 + (nv_crtc->index * 0x400), 2); | 1099 | evo_mthd(push, 0x0804 + (nv_crtc->index * 0x400), 2); |
1104 | evo_data(push, 0x00800000 | mode->clock); | 1100 | evo_data(push, 0x00800000 | mode->clock); |
1105 | evo_data(push, (ilace == 2) ? 2 : 0); | 1101 | evo_data(push, (ilace == 2) ? 2 : 0); |
@@ -1192,7 +1188,7 @@ nv50_crtc_lut_load(struct drm_crtc *crtc) | |||
1192 | u16 g = nv_crtc->lut.g[i] >> 2; | 1188 | u16 g = nv_crtc->lut.g[i] >> 2; |
1193 | u16 b = nv_crtc->lut.b[i] >> 2; | 1189 | u16 b = nv_crtc->lut.b[i] >> 2; |
1194 | 1190 | ||
1195 | if (nv_mclass(disp->core) < NVD0_DISP_CLASS) { | 1191 | if (disp->disp->oclass < GF110_DISP) { |
1196 | writew(r + 0x0000, lut + (i * 0x08) + 0); | 1192 | writew(r + 0x0000, lut + (i * 0x08) + 0); |
1197 | writew(g + 0x0000, lut + (i * 0x08) + 2); | 1193 | writew(g + 0x0000, lut + (i * 0x08) + 2); |
1198 | writew(b + 0x0000, lut + (i * 0x08) + 4); | 1194 | writew(b + 0x0000, lut + (i * 0x08) + 4); |
@@ -1259,8 +1255,8 @@ nv50_crtc_cursor_move(struct drm_crtc *crtc, int x, int y) | |||
1259 | { | 1255 | { |
1260 | struct nv50_curs *curs = nv50_curs(crtc); | 1256 | struct nv50_curs *curs = nv50_curs(crtc); |
1261 | struct nv50_chan *chan = nv50_chan(curs); | 1257 | struct nv50_chan *chan = nv50_chan(curs); |
1262 | nv_wo32(chan->user, 0x0084, (y << 16) | (x & 0xffff)); | 1258 | nvif_wr32(&chan->user, 0x0084, (y << 16) | (x & 0xffff)); |
1263 | nv_wo32(chan->user, 0x0080, 0x00000000); | 1259 | nvif_wr32(&chan->user, 0x0080, 0x00000000); |
1264 | return 0; | 1260 | return 0; |
1265 | } | 1261 | } |
1266 | 1262 | ||
@@ -1287,11 +1283,16 @@ nv50_crtc_destroy(struct drm_crtc *crtc) | |||
1287 | struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); | 1283 | struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); |
1288 | struct nv50_disp *disp = nv50_disp(crtc->dev); | 1284 | struct nv50_disp *disp = nv50_disp(crtc->dev); |
1289 | struct nv50_head *head = nv50_head(crtc); | 1285 | struct nv50_head *head = nv50_head(crtc); |
1286 | struct nv50_fbdma *fbdma; | ||
1287 | |||
1288 | list_for_each_entry(fbdma, &disp->fbdma, head) { | ||
1289 | nvif_object_fini(&fbdma->base[nv_crtc->index]); | ||
1290 | } | ||
1290 | 1291 | ||
1291 | nv50_dmac_destroy(disp->core, &head->ovly.base); | 1292 | nv50_dmac_destroy(&head->ovly.base, disp->disp); |
1292 | nv50_pioc_destroy(disp->core, &head->oimm.base); | 1293 | nv50_pioc_destroy(&head->oimm.base); |
1293 | nv50_dmac_destroy(disp->core, &head->sync.base); | 1294 | nv50_dmac_destroy(&head->sync.base, disp->disp); |
1294 | nv50_pioc_destroy(disp->core, &head->curs.base); | 1295 | nv50_pioc_destroy(&head->curs.base); |
1295 | 1296 | ||
1296 | /*XXX: this shouldn't be necessary, but the core doesn't call | 1297 | /*XXX: this shouldn't be necessary, but the core doesn't call |
1297 | * disconnect() during the cleanup paths | 1298 | * disconnect() during the cleanup paths |
@@ -1346,7 +1347,7 @@ nv50_cursor_set_offset(struct nouveau_crtc *nv_crtc, uint32_t offset) | |||
1346 | } | 1347 | } |
1347 | 1348 | ||
1348 | static int | 1349 | static int |
1349 | nv50_crtc_create(struct drm_device *dev, struct nouveau_object *core, int index) | 1350 | nv50_crtc_create(struct drm_device *dev, int index) |
1350 | { | 1351 | { |
1351 | struct nv50_disp *disp = nv50_disp(dev); | 1352 | struct nv50_disp *disp = nv50_disp(dev); |
1352 | struct nv50_head *head; | 1353 | struct nv50_head *head; |
@@ -1395,11 +1396,7 @@ nv50_crtc_create(struct drm_device *dev, struct nouveau_object *core, int index) | |||
1395 | nv50_crtc_lut_load(crtc); | 1396 | nv50_crtc_lut_load(crtc); |
1396 | 1397 | ||
1397 | /* allocate cursor resources */ | 1398 | /* allocate cursor resources */ |
1398 | ret = nv50_pioc_create(disp->core, NV50_DISP_CURS_CLASS, index, | 1399 | ret = nv50_curs_create(disp->disp, index, &head->curs); |
1399 | &(struct nv50_display_curs_class) { | ||
1400 | .head = index, | ||
1401 | }, sizeof(struct nv50_display_curs_class), | ||
1402 | &head->curs.base); | ||
1403 | if (ret) | 1400 | if (ret) |
1404 | goto out; | 1401 | goto out; |
1405 | 1402 | ||
@@ -1420,12 +1417,8 @@ nv50_crtc_create(struct drm_device *dev, struct nouveau_object *core, int index) | |||
1420 | goto out; | 1417 | goto out; |
1421 | 1418 | ||
1422 | /* allocate page flip / sync resources */ | 1419 | /* allocate page flip / sync resources */ |
1423 | ret = nv50_dmac_create(disp->core, NV50_DISP_SYNC_CLASS, index, | 1420 | ret = nv50_base_create(disp->disp, index, disp->sync->bo.offset, |
1424 | &(struct nv50_display_sync_class) { | 1421 | &head->sync); |
1425 | .pushbuf = EVO_PUSH_HANDLE(SYNC, index), | ||
1426 | .head = index, | ||
1427 | }, sizeof(struct nv50_display_sync_class), | ||
1428 | disp->sync->bo.offset, &head->sync.base); | ||
1429 | if (ret) | 1422 | if (ret) |
1430 | goto out; | 1423 | goto out; |
1431 | 1424 | ||
@@ -1433,20 +1426,12 @@ nv50_crtc_create(struct drm_device *dev, struct nouveau_object *core, int index) | |||
1433 | head->sync.data = 0x00000000; | 1426 | head->sync.data = 0x00000000; |
1434 | 1427 | ||
1435 | /* allocate overlay resources */ | 1428 | /* allocate overlay resources */ |
1436 | ret = nv50_pioc_create(disp->core, NV50_DISP_OIMM_CLASS, index, | 1429 | ret = nv50_oimm_create(disp->disp, index, &head->oimm); |
1437 | &(struct nv50_display_oimm_class) { | ||
1438 | .head = index, | ||
1439 | }, sizeof(struct nv50_display_oimm_class), | ||
1440 | &head->oimm.base); | ||
1441 | if (ret) | 1430 | if (ret) |
1442 | goto out; | 1431 | goto out; |
1443 | 1432 | ||
1444 | ret = nv50_dmac_create(disp->core, NV50_DISP_OVLY_CLASS, index, | 1433 | ret = nv50_ovly_create(disp->disp, index, disp->sync->bo.offset, |
1445 | &(struct nv50_display_ovly_class) { | 1434 | &head->ovly); |
1446 | .pushbuf = EVO_PUSH_HANDLE(OVLY, index), | ||
1447 | .head = index, | ||
1448 | }, sizeof(struct nv50_display_ovly_class), | ||
1449 | disp->sync->bo.offset, &head->ovly.base); | ||
1450 | if (ret) | 1435 | if (ret) |
1451 | goto out; | 1436 | goto out; |
1452 | 1437 | ||
@@ -1464,16 +1449,23 @@ nv50_dac_dpms(struct drm_encoder *encoder, int mode) | |||
1464 | { | 1449 | { |
1465 | struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); | 1450 | struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); |
1466 | struct nv50_disp *disp = nv50_disp(encoder->dev); | 1451 | struct nv50_disp *disp = nv50_disp(encoder->dev); |
1467 | int or = nv_encoder->or; | 1452 | struct { |
1468 | u32 dpms_ctrl; | 1453 | struct nv50_disp_mthd_v1 base; |
1469 | 1454 | struct nv50_disp_dac_pwr_v0 pwr; | |
1470 | dpms_ctrl = 0x00000000; | 1455 | } args = { |
1471 | if (mode == DRM_MODE_DPMS_STANDBY || mode == DRM_MODE_DPMS_OFF) | 1456 | .base.version = 1, |
1472 | dpms_ctrl |= 0x00000001; | 1457 | .base.method = NV50_DISP_MTHD_V1_DAC_PWR, |
1473 | if (mode == DRM_MODE_DPMS_SUSPEND || mode == DRM_MODE_DPMS_OFF) | 1458 | .base.hasht = nv_encoder->dcb->hasht, |
1474 | dpms_ctrl |= 0x00000004; | 1459 | .base.hashm = nv_encoder->dcb->hashm, |
1460 | .pwr.state = 1, | ||
1461 | .pwr.data = 1, | ||
1462 | .pwr.vsync = (mode != DRM_MODE_DPMS_SUSPEND && | ||
1463 | mode != DRM_MODE_DPMS_OFF), | ||
1464 | .pwr.hsync = (mode != DRM_MODE_DPMS_STANDBY && | ||
1465 | mode != DRM_MODE_DPMS_OFF), | ||
1466 | }; | ||
1475 | 1467 | ||
1476 | nv_call(disp->core, NV50_DISP_DAC_PWR + or, dpms_ctrl); | 1468 | nvif_mthd(disp->disp, 0, &args, sizeof(args)); |
1477 | } | 1469 | } |
1478 | 1470 | ||
1479 | static bool | 1471 | static bool |
@@ -1514,7 +1506,7 @@ nv50_dac_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, | |||
1514 | 1506 | ||
1515 | push = evo_wait(mast, 8); | 1507 | push = evo_wait(mast, 8); |
1516 | if (push) { | 1508 | if (push) { |
1517 | if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) { | 1509 | if (nv50_vers(mast) < GF110_DISP_CORE_CHANNEL_DMA) { |
1518 | u32 syncs = 0x00000000; | 1510 | u32 syncs = 0x00000000; |
1519 | 1511 | ||
1520 | if (mode->flags & DRM_MODE_FLAG_NHSYNC) | 1512 | if (mode->flags & DRM_MODE_FLAG_NHSYNC) |
@@ -1563,7 +1555,7 @@ nv50_dac_disconnect(struct drm_encoder *encoder) | |||
1563 | 1555 | ||
1564 | push = evo_wait(mast, 4); | 1556 | push = evo_wait(mast, 4); |
1565 | if (push) { | 1557 | if (push) { |
1566 | if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) { | 1558 | if (nv50_vers(mast) < GF110_DISP_CORE_CHANNEL_DMA) { |
1567 | evo_mthd(push, 0x0400 + (or * 0x080), 1); | 1559 | evo_mthd(push, 0x0400 + (or * 0x080), 1); |
1568 | evo_data(push, 0x00000000); | 1560 | evo_data(push, 0x00000000); |
1569 | } else { | 1561 | } else { |
@@ -1580,14 +1572,25 @@ nv50_dac_disconnect(struct drm_encoder *encoder) | |||
1580 | static enum drm_connector_status | 1572 | static enum drm_connector_status |
1581 | nv50_dac_detect(struct drm_encoder *encoder, struct drm_connector *connector) | 1573 | nv50_dac_detect(struct drm_encoder *encoder, struct drm_connector *connector) |
1582 | { | 1574 | { |
1575 | struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); | ||
1583 | struct nv50_disp *disp = nv50_disp(encoder->dev); | 1576 | struct nv50_disp *disp = nv50_disp(encoder->dev); |
1584 | int ret, or = nouveau_encoder(encoder)->or; | 1577 | struct { |
1585 | u32 load = nouveau_drm(encoder->dev)->vbios.dactestval; | 1578 | struct nv50_disp_mthd_v1 base; |
1586 | if (load == 0) | 1579 | struct nv50_disp_dac_load_v0 load; |
1587 | load = 340; | 1580 | } args = { |
1581 | .base.version = 1, | ||
1582 | .base.method = NV50_DISP_MTHD_V1_DAC_LOAD, | ||
1583 | .base.hasht = nv_encoder->dcb->hasht, | ||
1584 | .base.hashm = nv_encoder->dcb->hashm, | ||
1585 | }; | ||
1586 | int ret; | ||
1587 | |||
1588 | args.load.data = nouveau_drm(encoder->dev)->vbios.dactestval; | ||
1589 | if (args.load.data == 0) | ||
1590 | args.load.data = 340; | ||
1588 | 1591 | ||
1589 | ret = nv_exec(disp->core, NV50_DISP_DAC_LOAD + or, &load, sizeof(load)); | 1592 | ret = nvif_mthd(disp->disp, 0, &args, sizeof(args)); |
1590 | if (ret || !load) | 1593 | if (ret || !args.load.load) |
1591 | return connector_status_disconnected; | 1594 | return connector_status_disconnected; |
1592 | 1595 | ||
1593 | return connector_status_connected; | 1596 | return connector_status_connected; |
@@ -1619,7 +1622,7 @@ static int | |||
1619 | nv50_dac_create(struct drm_connector *connector, struct dcb_output *dcbe) | 1622 | nv50_dac_create(struct drm_connector *connector, struct dcb_output *dcbe) |
1620 | { | 1623 | { |
1621 | struct nouveau_drm *drm = nouveau_drm(connector->dev); | 1624 | struct nouveau_drm *drm = nouveau_drm(connector->dev); |
1622 | struct nouveau_i2c *i2c = nouveau_i2c(drm->device); | 1625 | struct nouveau_i2c *i2c = nvkm_i2c(&drm->device); |
1623 | struct nouveau_encoder *nv_encoder; | 1626 | struct nouveau_encoder *nv_encoder; |
1624 | struct drm_encoder *encoder; | 1627 | struct drm_encoder *encoder; |
1625 | int type = DRM_MODE_ENCODER_DAC; | 1628 | int type = DRM_MODE_ENCODER_DAC; |
@@ -1650,16 +1653,25 @@ nv50_audio_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode) | |||
1650 | struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); | 1653 | struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); |
1651 | struct nouveau_connector *nv_connector; | 1654 | struct nouveau_connector *nv_connector; |
1652 | struct nv50_disp *disp = nv50_disp(encoder->dev); | 1655 | struct nv50_disp *disp = nv50_disp(encoder->dev); |
1656 | struct { | ||
1657 | struct nv50_disp_mthd_v1 base; | ||
1658 | struct nv50_disp_sor_hda_eld_v0 eld; | ||
1659 | u8 data[sizeof(nv_connector->base.eld)]; | ||
1660 | } args = { | ||
1661 | .base.version = 1, | ||
1662 | .base.method = NV50_DISP_MTHD_V1_SOR_HDA_ELD, | ||
1663 | .base.hasht = nv_encoder->dcb->hasht, | ||
1664 | .base.hashm = nv_encoder->dcb->hashm, | ||
1665 | }; | ||
1653 | 1666 | ||
1654 | nv_connector = nouveau_encoder_connector_get(nv_encoder); | 1667 | nv_connector = nouveau_encoder_connector_get(nv_encoder); |
1655 | if (!drm_detect_monitor_audio(nv_connector->edid)) | 1668 | if (!drm_detect_monitor_audio(nv_connector->edid)) |
1656 | return; | 1669 | return; |
1657 | 1670 | ||
1658 | drm_edid_to_eld(&nv_connector->base, nv_connector->edid); | 1671 | drm_edid_to_eld(&nv_connector->base, nv_connector->edid); |
1672 | memcpy(args.data, nv_connector->base.eld, sizeof(args.data)); | ||
1659 | 1673 | ||
1660 | nv_exec(disp->core, NVA3_DISP_SOR_HDA_ELD + nv_encoder->or, | 1674 | nvif_mthd(disp->disp, 0, &args, sizeof(args)); |
1661 | nv_connector->base.eld, | ||
1662 | nv_connector->base.eld[2] * 4); | ||
1663 | } | 1675 | } |
1664 | 1676 | ||
1665 | static void | 1677 | static void |
@@ -1667,8 +1679,17 @@ nv50_audio_disconnect(struct drm_encoder *encoder) | |||
1667 | { | 1679 | { |
1668 | struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); | 1680 | struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); |
1669 | struct nv50_disp *disp = nv50_disp(encoder->dev); | 1681 | struct nv50_disp *disp = nv50_disp(encoder->dev); |
1682 | struct { | ||
1683 | struct nv50_disp_mthd_v1 base; | ||
1684 | struct nv50_disp_sor_hda_eld_v0 eld; | ||
1685 | } args = { | ||
1686 | .base.version = 1, | ||
1687 | .base.method = NV50_DISP_MTHD_V1_SOR_HDA_ELD, | ||
1688 | .base.hasht = nv_encoder->dcb->hasht, | ||
1689 | .base.hashm = nv_encoder->dcb->hashm, | ||
1690 | }; | ||
1670 | 1691 | ||
1671 | nv_exec(disp->core, NVA3_DISP_SOR_HDA_ELD + nv_encoder->or, NULL, 0); | 1692 | nvif_mthd(disp->disp, 0, &args, sizeof(args)); |
1672 | } | 1693 | } |
1673 | 1694 | ||
1674 | /****************************************************************************** | 1695 | /****************************************************************************** |
@@ -1679,10 +1700,20 @@ nv50_hdmi_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode) | |||
1679 | { | 1700 | { |
1680 | struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); | 1701 | struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); |
1681 | struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc); | 1702 | struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc); |
1682 | struct nouveau_connector *nv_connector; | ||
1683 | struct nv50_disp *disp = nv50_disp(encoder->dev); | 1703 | struct nv50_disp *disp = nv50_disp(encoder->dev); |
1684 | const u32 moff = (nv_crtc->index << 3) | nv_encoder->or; | 1704 | struct { |
1685 | u32 rekey = 56; /* binary driver, and tegra constant */ | 1705 | struct nv50_disp_mthd_v1 base; |
1706 | struct nv50_disp_sor_hdmi_pwr_v0 pwr; | ||
1707 | } args = { | ||
1708 | .base.version = 1, | ||
1709 | .base.method = NV50_DISP_MTHD_V1_SOR_HDMI_PWR, | ||
1710 | .base.hasht = nv_encoder->dcb->hasht, | ||
1711 | .base.hashm = (0xf0ff & nv_encoder->dcb->hashm) | | ||
1712 | (0x0100 << nv_crtc->index), | ||
1713 | .pwr.state = 1, | ||
1714 | .pwr.rekey = 56, /* binary driver, and tegra, constant */ | ||
1715 | }; | ||
1716 | struct nouveau_connector *nv_connector; | ||
1686 | u32 max_ac_packet; | 1717 | u32 max_ac_packet; |
1687 | 1718 | ||
1688 | nv_connector = nouveau_encoder_connector_get(nv_encoder); | 1719 | nv_connector = nouveau_encoder_connector_get(nv_encoder); |
@@ -1690,14 +1721,11 @@ nv50_hdmi_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode) | |||
1690 | return; | 1721 | return; |
1691 | 1722 | ||
1692 | max_ac_packet = mode->htotal - mode->hdisplay; | 1723 | max_ac_packet = mode->htotal - mode->hdisplay; |
1693 | max_ac_packet -= rekey; | 1724 | max_ac_packet -= args.pwr.rekey; |
1694 | max_ac_packet -= 18; /* constant from tegra */ | 1725 | max_ac_packet -= 18; /* constant from tegra */ |
1695 | max_ac_packet /= 32; | 1726 | args.pwr.max_ac_packet = max_ac_packet / 32; |
1696 | |||
1697 | nv_call(disp->core, NV84_DISP_SOR_HDMI_PWR + moff, | ||
1698 | NV84_DISP_SOR_HDMI_PWR_STATE_ON | | ||
1699 | (max_ac_packet << 16) | rekey); | ||
1700 | 1727 | ||
1728 | nvif_mthd(disp->disp, 0, &args, sizeof(args)); | ||
1701 | nv50_audio_mode_set(encoder, mode); | 1729 | nv50_audio_mode_set(encoder, mode); |
1702 | } | 1730 | } |
1703 | 1731 | ||
@@ -1706,11 +1734,20 @@ nv50_hdmi_disconnect(struct drm_encoder *encoder, struct nouveau_crtc *nv_crtc) | |||
1706 | { | 1734 | { |
1707 | struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); | 1735 | struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); |
1708 | struct nv50_disp *disp = nv50_disp(encoder->dev); | 1736 | struct nv50_disp *disp = nv50_disp(encoder->dev); |
1709 | const u32 moff = (nv_crtc->index << 3) | nv_encoder->or; | 1737 | struct { |
1738 | struct nv50_disp_mthd_v1 base; | ||
1739 | struct nv50_disp_sor_hdmi_pwr_v0 pwr; | ||
1740 | } args = { | ||
1741 | .base.version = 1, | ||
1742 | .base.method = NV50_DISP_MTHD_V1_SOR_HDMI_PWR, | ||
1743 | .base.hasht = nv_encoder->dcb->hasht, | ||
1744 | .base.hashm = (0xf0ff & nv_encoder->dcb->hashm) | | ||
1745 | (0x0100 << nv_crtc->index), | ||
1746 | }; | ||
1710 | 1747 | ||
1711 | nv50_audio_disconnect(encoder); | 1748 | nv50_audio_disconnect(encoder); |
1712 | 1749 | ||
1713 | nv_call(disp->core, NV84_DISP_SOR_HDMI_PWR + moff, 0x00000000); | 1750 | nvif_mthd(disp->disp, 0, &args, sizeof(args)); |
1714 | } | 1751 | } |
1715 | 1752 | ||
1716 | /****************************************************************************** | 1753 | /****************************************************************************** |
@@ -1720,10 +1757,29 @@ static void | |||
1720 | nv50_sor_dpms(struct drm_encoder *encoder, int mode) | 1757 | nv50_sor_dpms(struct drm_encoder *encoder, int mode) |
1721 | { | 1758 | { |
1722 | struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); | 1759 | struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); |
1760 | struct nv50_disp *disp = nv50_disp(encoder->dev); | ||
1761 | struct { | ||
1762 | struct nv50_disp_mthd_v1 base; | ||
1763 | struct nv50_disp_sor_pwr_v0 pwr; | ||
1764 | } args = { | ||
1765 | .base.version = 1, | ||
1766 | .base.method = NV50_DISP_MTHD_V1_SOR_PWR, | ||
1767 | .base.hasht = nv_encoder->dcb->hasht, | ||
1768 | .base.hashm = nv_encoder->dcb->hashm, | ||
1769 | .pwr.state = mode == DRM_MODE_DPMS_ON, | ||
1770 | }; | ||
1771 | struct { | ||
1772 | struct nv50_disp_mthd_v1 base; | ||
1773 | struct nv50_disp_sor_dp_pwr_v0 pwr; | ||
1774 | } link = { | ||
1775 | .base.version = 1, | ||
1776 | .base.method = NV50_DISP_MTHD_V1_SOR_DP_PWR, | ||
1777 | .base.hasht = nv_encoder->dcb->hasht, | ||
1778 | .base.hashm = nv_encoder->dcb->hashm, | ||
1779 | .pwr.state = mode == DRM_MODE_DPMS_ON, | ||
1780 | }; | ||
1723 | struct drm_device *dev = encoder->dev; | 1781 | struct drm_device *dev = encoder->dev; |
1724 | struct nv50_disp *disp = nv50_disp(dev); | ||
1725 | struct drm_encoder *partner; | 1782 | struct drm_encoder *partner; |
1726 | u32 mthd; | ||
1727 | 1783 | ||
1728 | nv_encoder->last_dpms = mode; | 1784 | nv_encoder->last_dpms = mode; |
1729 | 1785 | ||
@@ -1741,18 +1797,13 @@ nv50_sor_dpms(struct drm_encoder *encoder, int mode) | |||
1741 | } | 1797 | } |
1742 | } | 1798 | } |
1743 | 1799 | ||
1744 | mthd = (ffs(nv_encoder->dcb->heads) - 1) << 3; | ||
1745 | mthd |= (ffs(nv_encoder->dcb->sorconf.link) - 1) << 2; | ||
1746 | mthd |= nv_encoder->or; | ||
1747 | |||
1748 | if (nv_encoder->dcb->type == DCB_OUTPUT_DP) { | 1800 | if (nv_encoder->dcb->type == DCB_OUTPUT_DP) { |
1749 | nv_call(disp->core, NV50_DISP_SOR_PWR | mthd, 1); | 1801 | args.pwr.state = 1; |
1750 | mthd |= NV94_DISP_SOR_DP_PWR; | 1802 | nvif_mthd(disp->disp, 0, &args, sizeof(args)); |
1803 | nvif_mthd(disp->disp, 0, &link, sizeof(link)); | ||
1751 | } else { | 1804 | } else { |
1752 | mthd |= NV50_DISP_SOR_PWR; | 1805 | nvif_mthd(disp->disp, 0, &args, sizeof(args)); |
1753 | } | 1806 | } |
1754 | |||
1755 | nv_call(disp->core, mthd, (mode == DRM_MODE_DPMS_ON)); | ||
1756 | } | 1807 | } |
1757 | 1808 | ||
1758 | static bool | 1809 | static bool |
@@ -1781,7 +1832,7 @@ nv50_sor_ctrl(struct nouveau_encoder *nv_encoder, u32 mask, u32 data) | |||
1781 | struct nv50_mast *mast = nv50_mast(nv_encoder->base.base.dev); | 1832 | struct nv50_mast *mast = nv50_mast(nv_encoder->base.base.dev); |
1782 | u32 temp = (nv_encoder->ctrl & ~mask) | (data & mask), *push; | 1833 | u32 temp = (nv_encoder->ctrl & ~mask) | (data & mask), *push; |
1783 | if (temp != nv_encoder->ctrl && (push = evo_wait(mast, 2))) { | 1834 | if (temp != nv_encoder->ctrl && (push = evo_wait(mast, 2))) { |
1784 | if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) { | 1835 | if (nv50_vers(mast) < GF110_DISP_CORE_CHANNEL_DMA) { |
1785 | evo_mthd(push, 0x0600 + (nv_encoder->or * 0x40), 1); | 1836 | evo_mthd(push, 0x0600 + (nv_encoder->or * 0x40), 1); |
1786 | evo_data(push, (nv_encoder->ctrl = temp)); | 1837 | evo_data(push, (nv_encoder->ctrl = temp)); |
1787 | } else { | 1838 | } else { |
@@ -1817,15 +1868,24 @@ static void | |||
1817 | nv50_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *umode, | 1868 | nv50_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *umode, |
1818 | struct drm_display_mode *mode) | 1869 | struct drm_display_mode *mode) |
1819 | { | 1870 | { |
1871 | struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); | ||
1872 | struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc); | ||
1873 | struct { | ||
1874 | struct nv50_disp_mthd_v1 base; | ||
1875 | struct nv50_disp_sor_lvds_script_v0 lvds; | ||
1876 | } lvds = { | ||
1877 | .base.version = 1, | ||
1878 | .base.method = NV50_DISP_MTHD_V1_SOR_LVDS_SCRIPT, | ||
1879 | .base.hasht = nv_encoder->dcb->hasht, | ||
1880 | .base.hashm = nv_encoder->dcb->hashm, | ||
1881 | }; | ||
1820 | struct nv50_disp *disp = nv50_disp(encoder->dev); | 1882 | struct nv50_disp *disp = nv50_disp(encoder->dev); |
1821 | struct nv50_mast *mast = nv50_mast(encoder->dev); | 1883 | struct nv50_mast *mast = nv50_mast(encoder->dev); |
1822 | struct drm_device *dev = encoder->dev; | 1884 | struct drm_device *dev = encoder->dev; |
1823 | struct nouveau_drm *drm = nouveau_drm(dev); | 1885 | struct nouveau_drm *drm = nouveau_drm(dev); |
1824 | struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); | ||
1825 | struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc); | ||
1826 | struct nouveau_connector *nv_connector; | 1886 | struct nouveau_connector *nv_connector; |
1827 | struct nvbios *bios = &drm->vbios; | 1887 | struct nvbios *bios = &drm->vbios; |
1828 | u32 lvds = 0, mask, ctrl; | 1888 | u32 mask, ctrl; |
1829 | u8 owner = 1 << nv_crtc->index; | 1889 | u8 owner = 1 << nv_crtc->index; |
1830 | u8 proto = 0xf; | 1890 | u8 proto = 0xf; |
1831 | u8 depth = 0x0; | 1891 | u8 depth = 0x0; |
@@ -1851,31 +1911,31 @@ nv50_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *umode, | |||
1851 | 1911 | ||
1852 | if (bios->fp_no_ddc) { | 1912 | if (bios->fp_no_ddc) { |
1853 | if (bios->fp.dual_link) | 1913 | if (bios->fp.dual_link) |
1854 | lvds |= 0x0100; | 1914 | lvds.lvds.script |= 0x0100; |
1855 | if (bios->fp.if_is_24bit) | 1915 | if (bios->fp.if_is_24bit) |
1856 | lvds |= 0x0200; | 1916 | lvds.lvds.script |= 0x0200; |
1857 | } else { | 1917 | } else { |
1858 | if (nv_connector->type == DCB_CONNECTOR_LVDS_SPWG) { | 1918 | if (nv_connector->type == DCB_CONNECTOR_LVDS_SPWG) { |
1859 | if (((u8 *)nv_connector->edid)[121] == 2) | 1919 | if (((u8 *)nv_connector->edid)[121] == 2) |
1860 | lvds |= 0x0100; | 1920 | lvds.lvds.script |= 0x0100; |
1861 | } else | 1921 | } else |
1862 | if (mode->clock >= bios->fp.duallink_transition_clk) { | 1922 | if (mode->clock >= bios->fp.duallink_transition_clk) { |
1863 | lvds |= 0x0100; | 1923 | lvds.lvds.script |= 0x0100; |
1864 | } | 1924 | } |
1865 | 1925 | ||
1866 | if (lvds & 0x0100) { | 1926 | if (lvds.lvds.script & 0x0100) { |
1867 | if (bios->fp.strapless_is_24bit & 2) | 1927 | if (bios->fp.strapless_is_24bit & 2) |
1868 | lvds |= 0x0200; | 1928 | lvds.lvds.script |= 0x0200; |
1869 | } else { | 1929 | } else { |
1870 | if (bios->fp.strapless_is_24bit & 1) | 1930 | if (bios->fp.strapless_is_24bit & 1) |
1871 | lvds |= 0x0200; | 1931 | lvds.lvds.script |= 0x0200; |
1872 | } | 1932 | } |
1873 | 1933 | ||
1874 | if (nv_connector->base.display_info.bpc == 8) | 1934 | if (nv_connector->base.display_info.bpc == 8) |
1875 | lvds |= 0x0200; | 1935 | lvds.lvds.script |= 0x0200; |
1876 | } | 1936 | } |
1877 | 1937 | ||
1878 | nv_call(disp->core, NV50_DISP_SOR_LVDS_SCRIPT + nv_encoder->or, lvds); | 1938 | nvif_mthd(disp->disp, 0, &lvds, sizeof(lvds)); |
1879 | break; | 1939 | break; |
1880 | case DCB_OUTPUT_DP: | 1940 | case DCB_OUTPUT_DP: |
1881 | if (nv_connector->base.display_info.bpc == 6) { | 1941 | if (nv_connector->base.display_info.bpc == 6) { |
@@ -1902,7 +1962,7 @@ nv50_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *umode, | |||
1902 | 1962 | ||
1903 | nv50_sor_dpms(&nv_encoder->base.base, DRM_MODE_DPMS_ON); | 1963 | nv50_sor_dpms(&nv_encoder->base.base, DRM_MODE_DPMS_ON); |
1904 | 1964 | ||
1905 | if (nv50_vers(mast) >= NVD0_DISP_CLASS) { | 1965 | if (nv50_vers(mast) >= GF110_DISP) { |
1906 | u32 *push = evo_wait(mast, 3); | 1966 | u32 *push = evo_wait(mast, 3); |
1907 | if (push) { | 1967 | if (push) { |
1908 | u32 magic = 0x31ec6000 | (nv_crtc->index << 25); | 1968 | u32 magic = 0x31ec6000 | (nv_crtc->index << 25); |
@@ -1961,7 +2021,7 @@ static int | |||
1961 | nv50_sor_create(struct drm_connector *connector, struct dcb_output *dcbe) | 2021 | nv50_sor_create(struct drm_connector *connector, struct dcb_output *dcbe) |
1962 | { | 2022 | { |
1963 | struct nouveau_drm *drm = nouveau_drm(connector->dev); | 2023 | struct nouveau_drm *drm = nouveau_drm(connector->dev); |
1964 | struct nouveau_i2c *i2c = nouveau_i2c(drm->device); | 2024 | struct nouveau_i2c *i2c = nvkm_i2c(&drm->device); |
1965 | struct nouveau_encoder *nv_encoder; | 2025 | struct nouveau_encoder *nv_encoder; |
1966 | struct drm_encoder *encoder; | 2026 | struct drm_encoder *encoder; |
1967 | int type; | 2027 | int type; |
@@ -2002,9 +2062,19 @@ nv50_pior_dpms(struct drm_encoder *encoder, int mode) | |||
2002 | { | 2062 | { |
2003 | struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); | 2063 | struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); |
2004 | struct nv50_disp *disp = nv50_disp(encoder->dev); | 2064 | struct nv50_disp *disp = nv50_disp(encoder->dev); |
2005 | u32 mthd = (nv_encoder->dcb->type << 12) | nv_encoder->or; | 2065 | struct { |
2006 | u32 ctrl = (mode == DRM_MODE_DPMS_ON); | 2066 | struct nv50_disp_mthd_v1 base; |
2007 | nv_call(disp->core, NV50_DISP_PIOR_PWR + mthd, ctrl); | 2067 | struct nv50_disp_pior_pwr_v0 pwr; |
2068 | } args = { | ||
2069 | .base.version = 1, | ||
2070 | .base.method = NV50_DISP_MTHD_V1_PIOR_PWR, | ||
2071 | .base.hasht = nv_encoder->dcb->hasht, | ||
2072 | .base.hashm = nv_encoder->dcb->hashm, | ||
2073 | .pwr.state = mode == DRM_MODE_DPMS_ON, | ||
2074 | .pwr.type = nv_encoder->dcb->type, | ||
2075 | }; | ||
2076 | |||
2077 | nvif_mthd(disp->disp, 0, &args, sizeof(args)); | ||
2008 | } | 2078 | } |
2009 | 2079 | ||
2010 | static bool | 2080 | static bool |
@@ -2067,7 +2137,7 @@ nv50_pior_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, | |||
2067 | 2137 | ||
2068 | push = evo_wait(mast, 8); | 2138 | push = evo_wait(mast, 8); |
2069 | if (push) { | 2139 | if (push) { |
2070 | if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) { | 2140 | if (nv50_vers(mast) < GF110_DISP_CORE_CHANNEL_DMA) { |
2071 | u32 ctrl = (depth << 16) | (proto << 8) | owner; | 2141 | u32 ctrl = (depth << 16) | (proto << 8) | owner; |
2072 | if (mode->flags & DRM_MODE_FLAG_NHSYNC) | 2142 | if (mode->flags & DRM_MODE_FLAG_NHSYNC) |
2073 | ctrl |= 0x00001000; | 2143 | ctrl |= 0x00001000; |
@@ -2096,7 +2166,7 @@ nv50_pior_disconnect(struct drm_encoder *encoder) | |||
2096 | 2166 | ||
2097 | push = evo_wait(mast, 4); | 2167 | push = evo_wait(mast, 4); |
2098 | if (push) { | 2168 | if (push) { |
2099 | if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) { | 2169 | if (nv50_vers(mast) < GF110_DISP_CORE_CHANNEL_DMA) { |
2100 | evo_mthd(push, 0x0700 + (or * 0x040), 1); | 2170 | evo_mthd(push, 0x0700 + (or * 0x040), 1); |
2101 | evo_data(push, 0x00000000); | 2171 | evo_data(push, 0x00000000); |
2102 | } | 2172 | } |
@@ -2132,7 +2202,7 @@ static int | |||
2132 | nv50_pior_create(struct drm_connector *connector, struct dcb_output *dcbe) | 2202 | nv50_pior_create(struct drm_connector *connector, struct dcb_output *dcbe) |
2133 | { | 2203 | { |
2134 | struct nouveau_drm *drm = nouveau_drm(connector->dev); | 2204 | struct nouveau_drm *drm = nouveau_drm(connector->dev); |
2135 | struct nouveau_i2c *i2c = nouveau_i2c(drm->device); | 2205 | struct nouveau_i2c *i2c = nvkm_i2c(&drm->device); |
2136 | struct nouveau_i2c_port *ddc = NULL; | 2206 | struct nouveau_i2c_port *ddc = NULL; |
2137 | struct nouveau_encoder *nv_encoder; | 2207 | struct nouveau_encoder *nv_encoder; |
2138 | struct drm_encoder *encoder; | 2208 | struct drm_encoder *encoder; |
@@ -2169,8 +2239,151 @@ nv50_pior_create(struct drm_connector *connector, struct dcb_output *dcbe) | |||
2169 | } | 2239 | } |
2170 | 2240 | ||
2171 | /****************************************************************************** | 2241 | /****************************************************************************** |
2242 | * Framebuffer | ||
2243 | *****************************************************************************/ | ||
2244 | |||
2245 | static void | ||
2246 | nv50_fbdma_fini(struct nv50_fbdma *fbdma) | ||
2247 | { | ||
2248 | int i; | ||
2249 | for (i = 0; i < ARRAY_SIZE(fbdma->base); i++) | ||
2250 | nvif_object_fini(&fbdma->base[i]); | ||
2251 | nvif_object_fini(&fbdma->core); | ||
2252 | list_del(&fbdma->head); | ||
2253 | kfree(fbdma); | ||
2254 | } | ||
2255 | |||
2256 | static int | ||
2257 | nv50_fbdma_init(struct drm_device *dev, u32 name, u64 offset, u64 length, u8 kind) | ||
2258 | { | ||
2259 | struct nouveau_drm *drm = nouveau_drm(dev); | ||
2260 | struct nv50_disp *disp = nv50_disp(dev); | ||
2261 | struct nv50_mast *mast = nv50_mast(dev); | ||
2262 | struct __attribute__ ((packed)) { | ||
2263 | struct nv_dma_v0 base; | ||
2264 | union { | ||
2265 | struct nv50_dma_v0 nv50; | ||
2266 | struct gf100_dma_v0 gf100; | ||
2267 | struct gf110_dma_v0 gf110; | ||
2268 | }; | ||
2269 | } args = {}; | ||
2270 | struct nv50_fbdma *fbdma; | ||
2271 | struct drm_crtc *crtc; | ||
2272 | u32 size = sizeof(args.base); | ||
2273 | int ret; | ||
2274 | |||
2275 | list_for_each_entry(fbdma, &disp->fbdma, head) { | ||
2276 | if (fbdma->core.handle == name) | ||
2277 | return 0; | ||
2278 | } | ||
2279 | |||
2280 | fbdma = kzalloc(sizeof(*fbdma), GFP_KERNEL); | ||
2281 | if (!fbdma) | ||
2282 | return -ENOMEM; | ||
2283 | list_add(&fbdma->head, &disp->fbdma); | ||
2284 | |||
2285 | args.base.target = NV_DMA_V0_TARGET_VRAM; | ||
2286 | args.base.access = NV_DMA_V0_ACCESS_RDWR; | ||
2287 | args.base.start = offset; | ||
2288 | args.base.limit = offset + length - 1; | ||
2289 | |||
2290 | if (drm->device.info.chipset < 0x80) { | ||
2291 | args.nv50.part = NV50_DMA_V0_PART_256; | ||
2292 | size += sizeof(args.nv50); | ||
2293 | } else | ||
2294 | if (drm->device.info.chipset < 0xc0) { | ||
2295 | args.nv50.part = NV50_DMA_V0_PART_256; | ||
2296 | args.nv50.kind = kind; | ||
2297 | size += sizeof(args.nv50); | ||
2298 | } else | ||
2299 | if (drm->device.info.chipset < 0xd0) { | ||
2300 | args.gf100.kind = kind; | ||
2301 | size += sizeof(args.gf100); | ||
2302 | } else { | ||
2303 | args.gf110.page = GF110_DMA_V0_PAGE_LP; | ||
2304 | args.gf110.kind = kind; | ||
2305 | size += sizeof(args.gf110); | ||
2306 | } | ||
2307 | |||
2308 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { | ||
2309 | struct nv50_head *head = nv50_head(crtc); | ||
2310 | int ret = nvif_object_init(&head->sync.base.base.user, NULL, | ||
2311 | name, NV_DMA_IN_MEMORY, &args, size, | ||
2312 | &fbdma->base[head->base.index]); | ||
2313 | if (ret) { | ||
2314 | nv50_fbdma_fini(fbdma); | ||
2315 | return ret; | ||
2316 | } | ||
2317 | } | ||
2318 | |||
2319 | ret = nvif_object_init(&mast->base.base.user, NULL, name, | ||
2320 | NV_DMA_IN_MEMORY, &args, size, | ||
2321 | &fbdma->core); | ||
2322 | if (ret) { | ||
2323 | nv50_fbdma_fini(fbdma); | ||
2324 | return ret; | ||
2325 | } | ||
2326 | |||
2327 | return 0; | ||
2328 | } | ||
2329 | |||
2330 | static void | ||
2331 | nv50_fb_dtor(struct drm_framebuffer *fb) | ||
2332 | { | ||
2333 | } | ||
2334 | |||
2335 | static int | ||
2336 | nv50_fb_ctor(struct drm_framebuffer *fb) | ||
2337 | { | ||
2338 | struct nouveau_framebuffer *nv_fb = nouveau_framebuffer(fb); | ||
2339 | struct nouveau_drm *drm = nouveau_drm(fb->dev); | ||
2340 | struct nouveau_bo *nvbo = nv_fb->nvbo; | ||
2341 | struct nv50_disp *disp = nv50_disp(fb->dev); | ||
2342 | u8 kind = nouveau_bo_tile_layout(nvbo) >> 8; | ||
2343 | u8 tile = nvbo->tile_mode; | ||
2344 | |||
2345 | if (nvbo->tile_flags & NOUVEAU_GEM_TILE_NONCONTIG) { | ||
2346 | NV_ERROR(drm, "framebuffer requires contiguous bo\n"); | ||
2347 | return -EINVAL; | ||
2348 | } | ||
2349 | |||
2350 | if (drm->device.info.chipset >= 0xc0) | ||
2351 | tile >>= 4; /* yep.. */ | ||
2352 | |||
2353 | switch (fb->depth) { | ||
2354 | case 8: nv_fb->r_format = 0x1e00; break; | ||
2355 | case 15: nv_fb->r_format = 0xe900; break; | ||
2356 | case 16: nv_fb->r_format = 0xe800; break; | ||
2357 | case 24: | ||
2358 | case 32: nv_fb->r_format = 0xcf00; break; | ||
2359 | case 30: nv_fb->r_format = 0xd100; break; | ||
2360 | default: | ||
2361 | NV_ERROR(drm, "unknown depth %d\n", fb->depth); | ||
2362 | return -EINVAL; | ||
2363 | } | ||
2364 | |||
2365 | if (disp->disp->oclass < G82_DISP) { | ||
2366 | nv_fb->r_pitch = kind ? (((fb->pitches[0] / 4) << 4) | tile) : | ||
2367 | (fb->pitches[0] | 0x00100000); | ||
2368 | nv_fb->r_format |= kind << 16; | ||
2369 | } else | ||
2370 | if (disp->disp->oclass < GF110_DISP) { | ||
2371 | nv_fb->r_pitch = kind ? (((fb->pitches[0] / 4) << 4) | tile) : | ||
2372 | (fb->pitches[0] | 0x00100000); | ||
2373 | } else { | ||
2374 | nv_fb->r_pitch = kind ? (((fb->pitches[0] / 4) << 4) | tile) : | ||
2375 | (fb->pitches[0] | 0x01000000); | ||
2376 | } | ||
2377 | nv_fb->r_handle = 0xffff0000 | kind; | ||
2378 | |||
2379 | return nv50_fbdma_init(fb->dev, nv_fb->r_handle, 0, | ||
2380 | drm->device.info.ram_user, kind); | ||
2381 | } | ||
2382 | |||
2383 | /****************************************************************************** | ||
2172 | * Init | 2384 | * Init |
2173 | *****************************************************************************/ | 2385 | *****************************************************************************/ |
2386 | |||
2174 | void | 2387 | void |
2175 | nv50_display_fini(struct drm_device *dev) | 2388 | nv50_display_fini(struct drm_device *dev) |
2176 | { | 2389 | { |
@@ -2193,7 +2406,7 @@ nv50_display_init(struct drm_device *dev) | |||
2193 | } | 2406 | } |
2194 | 2407 | ||
2195 | evo_mthd(push, 0x0088, 1); | 2408 | evo_mthd(push, 0x0088, 1); |
2196 | evo_data(push, NvEvoSync); | 2409 | evo_data(push, nv50_mast(dev)->base.sync.handle); |
2197 | evo_kick(push, nv50_mast(dev)); | 2410 | evo_kick(push, nv50_mast(dev)); |
2198 | return 0; | 2411 | return 0; |
2199 | } | 2412 | } |
@@ -2202,8 +2415,13 @@ void | |||
2202 | nv50_display_destroy(struct drm_device *dev) | 2415 | nv50_display_destroy(struct drm_device *dev) |
2203 | { | 2416 | { |
2204 | struct nv50_disp *disp = nv50_disp(dev); | 2417 | struct nv50_disp *disp = nv50_disp(dev); |
2418 | struct nv50_fbdma *fbdma, *fbtmp; | ||
2419 | |||
2420 | list_for_each_entry_safe(fbdma, fbtmp, &disp->fbdma, head) { | ||
2421 | nv50_fbdma_fini(fbdma); | ||
2422 | } | ||
2205 | 2423 | ||
2206 | nv50_dmac_destroy(disp->core, &disp->mast.base); | 2424 | nv50_dmac_destroy(&disp->mast.base, disp->disp); |
2207 | 2425 | ||
2208 | nouveau_bo_unmap(disp->sync); | 2426 | nouveau_bo_unmap(disp->sync); |
2209 | if (disp->sync) | 2427 | if (disp->sync) |
@@ -2217,7 +2435,7 @@ nv50_display_destroy(struct drm_device *dev) | |||
2217 | int | 2435 | int |
2218 | nv50_display_create(struct drm_device *dev) | 2436 | nv50_display_create(struct drm_device *dev) |
2219 | { | 2437 | { |
2220 | struct nouveau_device *device = nouveau_dev(dev); | 2438 | struct nvif_device *device = &nouveau_drm(dev)->device; |
2221 | struct nouveau_drm *drm = nouveau_drm(dev); | 2439 | struct nouveau_drm *drm = nouveau_drm(dev); |
2222 | struct dcb_table *dcb = &drm->vbios.dcb; | 2440 | struct dcb_table *dcb = &drm->vbios.dcb; |
2223 | struct drm_connector *connector, *tmp; | 2441 | struct drm_connector *connector, *tmp; |
@@ -2228,12 +2446,15 @@ nv50_display_create(struct drm_device *dev) | |||
2228 | disp = kzalloc(sizeof(*disp), GFP_KERNEL); | 2446 | disp = kzalloc(sizeof(*disp), GFP_KERNEL); |
2229 | if (!disp) | 2447 | if (!disp) |
2230 | return -ENOMEM; | 2448 | return -ENOMEM; |
2449 | INIT_LIST_HEAD(&disp->fbdma); | ||
2231 | 2450 | ||
2232 | nouveau_display(dev)->priv = disp; | 2451 | nouveau_display(dev)->priv = disp; |
2233 | nouveau_display(dev)->dtor = nv50_display_destroy; | 2452 | nouveau_display(dev)->dtor = nv50_display_destroy; |
2234 | nouveau_display(dev)->init = nv50_display_init; | 2453 | nouveau_display(dev)->init = nv50_display_init; |
2235 | nouveau_display(dev)->fini = nv50_display_fini; | 2454 | nouveau_display(dev)->fini = nv50_display_fini; |
2236 | disp->core = nouveau_display(dev)->core; | 2455 | nouveau_display(dev)->fb_ctor = nv50_fb_ctor; |
2456 | nouveau_display(dev)->fb_dtor = nv50_fb_dtor; | ||
2457 | disp->disp = &nouveau_display(dev)->disp; | ||
2237 | 2458 | ||
2238 | /* small shared memory area we use for notifiers and semaphores */ | 2459 | /* small shared memory area we use for notifiers and semaphores */ |
2239 | ret = nouveau_bo_new(dev, 4096, 0x1000, TTM_PL_FLAG_VRAM, | 2460 | ret = nouveau_bo_new(dev, 4096, 0x1000, TTM_PL_FLAG_VRAM, |
@@ -2253,22 +2474,19 @@ nv50_display_create(struct drm_device *dev) | |||
2253 | goto out; | 2474 | goto out; |
2254 | 2475 | ||
2255 | /* allocate master evo channel */ | 2476 | /* allocate master evo channel */ |
2256 | ret = nv50_dmac_create(disp->core, NV50_DISP_MAST_CLASS, 0, | 2477 | ret = nv50_core_create(disp->disp, disp->sync->bo.offset, |
2257 | &(struct nv50_display_mast_class) { | 2478 | &disp->mast); |
2258 | .pushbuf = EVO_PUSH_HANDLE(MAST, 0), | ||
2259 | }, sizeof(struct nv50_display_mast_class), | ||
2260 | disp->sync->bo.offset, &disp->mast.base); | ||
2261 | if (ret) | 2479 | if (ret) |
2262 | goto out; | 2480 | goto out; |
2263 | 2481 | ||
2264 | /* create crtc objects to represent the hw heads */ | 2482 | /* create crtc objects to represent the hw heads */ |
2265 | if (nv_mclass(disp->core) >= NVD0_DISP_CLASS) | 2483 | if (disp->disp->oclass >= GF110_DISP) |
2266 | crtcs = nv_rd32(device, 0x022448); | 2484 | crtcs = nvif_rd32(device, 0x022448); |
2267 | else | 2485 | else |
2268 | crtcs = 2; | 2486 | crtcs = 2; |
2269 | 2487 | ||
2270 | for (i = 0; i < crtcs; i++) { | 2488 | for (i = 0; i < crtcs; i++) { |
2271 | ret = nv50_crtc_create(dev, disp->core, i); | 2489 | ret = nv50_crtc_create(dev, i); |
2272 | if (ret) | 2490 | if (ret) |
2273 | goto out; | 2491 | goto out; |
2274 | } | 2492 | } |
diff --git a/drivers/gpu/drm/nouveau/nv50_fbcon.c b/drivers/gpu/drm/nouveau/nv50_fbcon.c index 52068a0910dc..394c89abcc97 100644 --- a/drivers/gpu/drm/nouveau/nv50_fbcon.c +++ b/drivers/gpu/drm/nouveau/nv50_fbcon.c | |||
@@ -154,7 +154,6 @@ nv50_fbcon_accel_init(struct fb_info *info) | |||
154 | struct drm_device *dev = nfbdev->dev; | 154 | struct drm_device *dev = nfbdev->dev; |
155 | struct nouveau_drm *drm = nouveau_drm(dev); | 155 | struct nouveau_drm *drm = nouveau_drm(dev); |
156 | struct nouveau_channel *chan = drm->channel; | 156 | struct nouveau_channel *chan = drm->channel; |
157 | struct nouveau_object *object; | ||
158 | int ret, format; | 157 | int ret, format; |
159 | 158 | ||
160 | switch (info->var.bits_per_pixel) { | 159 | switch (info->var.bits_per_pixel) { |
@@ -184,8 +183,8 @@ nv50_fbcon_accel_init(struct fb_info *info) | |||
184 | return -EINVAL; | 183 | return -EINVAL; |
185 | } | 184 | } |
186 | 185 | ||
187 | ret = nouveau_object_new(nv_object(chan->cli), NVDRM_CHAN, Nv2D, | 186 | ret = nvif_object_init(chan->object, NULL, 0x502d, 0x502d, NULL, 0, |
188 | 0x502d, NULL, 0, &object); | 187 | &nfbdev->twod); |
189 | if (ret) | 188 | if (ret) |
190 | return ret; | 189 | return ret; |
191 | 190 | ||
@@ -196,11 +195,11 @@ nv50_fbcon_accel_init(struct fb_info *info) | |||
196 | } | 195 | } |
197 | 196 | ||
198 | BEGIN_NV04(chan, NvSub2D, 0x0000, 1); | 197 | BEGIN_NV04(chan, NvSub2D, 0x0000, 1); |
199 | OUT_RING(chan, Nv2D); | 198 | OUT_RING(chan, nfbdev->twod.handle); |
200 | BEGIN_NV04(chan, NvSub2D, 0x0184, 3); | 199 | BEGIN_NV04(chan, NvSub2D, 0x0184, 3); |
201 | OUT_RING(chan, NvDmaFB); | 200 | OUT_RING(chan, chan->vram.handle); |
202 | OUT_RING(chan, NvDmaFB); | 201 | OUT_RING(chan, chan->vram.handle); |
203 | OUT_RING(chan, NvDmaFB); | 202 | OUT_RING(chan, chan->vram.handle); |
204 | BEGIN_NV04(chan, NvSub2D, 0x0290, 1); | 203 | BEGIN_NV04(chan, NvSub2D, 0x0290, 1); |
205 | OUT_RING(chan, 0); | 204 | OUT_RING(chan, 0); |
206 | BEGIN_NV04(chan, NvSub2D, 0x0888, 1); | 205 | BEGIN_NV04(chan, NvSub2D, 0x0888, 1); |
diff --git a/drivers/gpu/drm/nouveau/nv50_fence.c b/drivers/gpu/drm/nouveau/nv50_fence.c index 0ee363840035..195cf51a7c31 100644 --- a/drivers/gpu/drm/nouveau/nv50_fence.c +++ b/drivers/gpu/drm/nouveau/nv50_fence.c | |||
@@ -22,8 +22,8 @@ | |||
22 | * Authors: Ben Skeggs <bskeggs@redhat.com> | 22 | * Authors: Ben Skeggs <bskeggs@redhat.com> |
23 | */ | 23 | */ |
24 | 24 | ||
25 | #include <core/object.h> | 25 | #include <nvif/os.h> |
26 | #include <core/class.h> | 26 | #include <nvif/class.h> |
27 | 27 | ||
28 | #include "nouveau_drm.h" | 28 | #include "nouveau_drm.h" |
29 | #include "nouveau_dma.h" | 29 | #include "nouveau_dma.h" |
@@ -38,7 +38,6 @@ nv50_fence_context_new(struct nouveau_channel *chan) | |||
38 | struct nv10_fence_priv *priv = chan->drm->fence; | 38 | struct nv10_fence_priv *priv = chan->drm->fence; |
39 | struct nv10_fence_chan *fctx; | 39 | struct nv10_fence_chan *fctx; |
40 | struct ttm_mem_reg *mem = &priv->bo->bo.mem; | 40 | struct ttm_mem_reg *mem = &priv->bo->bo.mem; |
41 | struct nouveau_object *object; | ||
42 | u32 start = mem->start * PAGE_SIZE; | 41 | u32 start = mem->start * PAGE_SIZE; |
43 | u32 limit = start + mem->size - 1; | 42 | u32 limit = start + mem->size - 1; |
44 | int ret, i; | 43 | int ret, i; |
@@ -52,15 +51,14 @@ nv50_fence_context_new(struct nouveau_channel *chan) | |||
52 | fctx->base.read = nv10_fence_read; | 51 | fctx->base.read = nv10_fence_read; |
53 | fctx->base.sync = nv17_fence_sync; | 52 | fctx->base.sync = nv17_fence_sync; |
54 | 53 | ||
55 | ret = nouveau_object_new(nv_object(chan->cli), chan->handle, | 54 | ret = nvif_object_init(chan->object, NULL, NvSema, NV_DMA_IN_MEMORY, |
56 | NvSema, 0x003d, | 55 | &(struct nv_dma_v0) { |
57 | &(struct nv_dma_class) { | 56 | .target = NV_DMA_V0_TARGET_VRAM, |
58 | .flags = NV_DMA_TARGET_VRAM | | 57 | .access = NV_DMA_V0_ACCESS_RDWR, |
59 | NV_DMA_ACCESS_RDWR, | ||
60 | .start = start, | 58 | .start = start, |
61 | .limit = limit, | 59 | .limit = limit, |
62 | }, sizeof(struct nv_dma_class), | 60 | }, sizeof(struct nv_dma_v0), |
63 | &object); | 61 | &fctx->sema); |
64 | 62 | ||
65 | /* dma objects for display sync channel semaphore blocks */ | 63 | /* dma objects for display sync channel semaphore blocks */ |
66 | for (i = 0; !ret && i < dev->mode_config.num_crtc; i++) { | 64 | for (i = 0; !ret && i < dev->mode_config.num_crtc; i++) { |
@@ -68,15 +66,14 @@ nv50_fence_context_new(struct nouveau_channel *chan) | |||
68 | u32 start = bo->bo.mem.start * PAGE_SIZE; | 66 | u32 start = bo->bo.mem.start * PAGE_SIZE; |
69 | u32 limit = start + bo->bo.mem.size - 1; | 67 | u32 limit = start + bo->bo.mem.size - 1; |
70 | 68 | ||
71 | ret = nouveau_object_new(nv_object(chan->cli), chan->handle, | 69 | ret = nvif_object_init(chan->object, NULL, NvEvoSema0 + i, |
72 | NvEvoSema0 + i, 0x003d, | 70 | NV_DMA_IN_MEMORY, &(struct nv_dma_v0) { |
73 | &(struct nv_dma_class) { | 71 | .target = NV_DMA_V0_TARGET_VRAM, |
74 | .flags = NV_DMA_TARGET_VRAM | | 72 | .access = NV_DMA_V0_ACCESS_RDWR, |
75 | NV_DMA_ACCESS_RDWR, | ||
76 | .start = start, | 73 | .start = start, |
77 | .limit = limit, | 74 | .limit = limit, |
78 | }, sizeof(struct nv_dma_class), | 75 | }, sizeof(struct nv_dma_v0), |
79 | &object); | 76 | &fctx->head[i]); |
80 | } | 77 | } |
81 | 78 | ||
82 | if (ret) | 79 | if (ret) |
diff --git a/drivers/gpu/drm/nouveau/nv84_fence.c b/drivers/gpu/drm/nouveau/nv84_fence.c index 9fd475c89820..933a779c93ab 100644 --- a/drivers/gpu/drm/nouveau/nv84_fence.c +++ b/drivers/gpu/drm/nouveau/nv84_fence.c | |||
@@ -22,12 +22,6 @@ | |||
22 | * Authors: Ben Skeggs | 22 | * Authors: Ben Skeggs |
23 | */ | 23 | */ |
24 | 24 | ||
25 | #include <core/object.h> | ||
26 | #include <core/client.h> | ||
27 | #include <core/class.h> | ||
28 | |||
29 | #include <engine/fifo.h> | ||
30 | |||
31 | #include "nouveau_drm.h" | 25 | #include "nouveau_drm.h" |
32 | #include "nouveau_dma.h" | 26 | #include "nouveau_dma.h" |
33 | #include "nouveau_fence.h" | 27 | #include "nouveau_fence.h" |
@@ -47,7 +41,7 @@ nv84_fence_emit32(struct nouveau_channel *chan, u64 virtual, u32 sequence) | |||
47 | int ret = RING_SPACE(chan, 8); | 41 | int ret = RING_SPACE(chan, 8); |
48 | if (ret == 0) { | 42 | if (ret == 0) { |
49 | BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 1); | 43 | BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 1); |
50 | OUT_RING (chan, chan->vram); | 44 | OUT_RING (chan, chan->vram.handle); |
51 | BEGIN_NV04(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 5); | 45 | BEGIN_NV04(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 5); |
52 | OUT_RING (chan, upper_32_bits(virtual)); | 46 | OUT_RING (chan, upper_32_bits(virtual)); |
53 | OUT_RING (chan, lower_32_bits(virtual)); | 47 | OUT_RING (chan, lower_32_bits(virtual)); |
@@ -65,7 +59,7 @@ nv84_fence_sync32(struct nouveau_channel *chan, u64 virtual, u32 sequence) | |||
65 | int ret = RING_SPACE(chan, 7); | 59 | int ret = RING_SPACE(chan, 7); |
66 | if (ret == 0) { | 60 | if (ret == 0) { |
67 | BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 1); | 61 | BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 1); |
68 | OUT_RING (chan, chan->vram); | 62 | OUT_RING (chan, chan->vram.handle); |
69 | BEGIN_NV04(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4); | 63 | BEGIN_NV04(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4); |
70 | OUT_RING (chan, upper_32_bits(virtual)); | 64 | OUT_RING (chan, upper_32_bits(virtual)); |
71 | OUT_RING (chan, lower_32_bits(virtual)); | 65 | OUT_RING (chan, lower_32_bits(virtual)); |
@@ -81,8 +75,7 @@ nv84_fence_emit(struct nouveau_fence *fence) | |||
81 | { | 75 | { |
82 | struct nouveau_channel *chan = fence->channel; | 76 | struct nouveau_channel *chan = fence->channel; |
83 | struct nv84_fence_chan *fctx = chan->fence; | 77 | struct nv84_fence_chan *fctx = chan->fence; |
84 | struct nouveau_fifo_chan *fifo = (void *)chan->object; | 78 | u64 addr = chan->chid * 16; |
85 | u64 addr = fifo->chid * 16; | ||
86 | 79 | ||
87 | if (fence->sysmem) | 80 | if (fence->sysmem) |
88 | addr += fctx->vma_gart.offset; | 81 | addr += fctx->vma_gart.offset; |
@@ -97,8 +90,7 @@ nv84_fence_sync(struct nouveau_fence *fence, | |||
97 | struct nouveau_channel *prev, struct nouveau_channel *chan) | 90 | struct nouveau_channel *prev, struct nouveau_channel *chan) |
98 | { | 91 | { |
99 | struct nv84_fence_chan *fctx = chan->fence; | 92 | struct nv84_fence_chan *fctx = chan->fence; |
100 | struct nouveau_fifo_chan *fifo = (void *)prev->object; | 93 | u64 addr = prev->chid * 16; |
101 | u64 addr = fifo->chid * 16; | ||
102 | 94 | ||
103 | if (fence->sysmem) | 95 | if (fence->sysmem) |
104 | addr += fctx->vma_gart.offset; | 96 | addr += fctx->vma_gart.offset; |
@@ -111,9 +103,8 @@ nv84_fence_sync(struct nouveau_fence *fence, | |||
111 | static u32 | 103 | static u32 |
112 | nv84_fence_read(struct nouveau_channel *chan) | 104 | nv84_fence_read(struct nouveau_channel *chan) |
113 | { | 105 | { |
114 | struct nouveau_fifo_chan *fifo = (void *)chan->object; | ||
115 | struct nv84_fence_priv *priv = chan->drm->fence; | 106 | struct nv84_fence_priv *priv = chan->drm->fence; |
116 | return nouveau_bo_rd32(priv->bo, fifo->chid * 16/4); | 107 | return nouveau_bo_rd32(priv->bo, chan->chid * 16/4); |
117 | } | 108 | } |
118 | 109 | ||
119 | static void | 110 | static void |
@@ -139,8 +130,7 @@ nv84_fence_context_del(struct nouveau_channel *chan) | |||
139 | int | 130 | int |
140 | nv84_fence_context_new(struct nouveau_channel *chan) | 131 | nv84_fence_context_new(struct nouveau_channel *chan) |
141 | { | 132 | { |
142 | struct nouveau_fifo_chan *fifo = (void *)chan->object; | 133 | struct nouveau_cli *cli = (void *)nvif_client(&chan->device->base); |
143 | struct nouveau_client *client = nouveau_client(fifo); | ||
144 | struct nv84_fence_priv *priv = chan->drm->fence; | 134 | struct nv84_fence_priv *priv = chan->drm->fence; |
145 | struct nv84_fence_chan *fctx; | 135 | struct nv84_fence_chan *fctx; |
146 | int ret, i; | 136 | int ret, i; |
@@ -156,19 +146,19 @@ nv84_fence_context_new(struct nouveau_channel *chan) | |||
156 | fctx->base.emit32 = nv84_fence_emit32; | 146 | fctx->base.emit32 = nv84_fence_emit32; |
157 | fctx->base.sync32 = nv84_fence_sync32; | 147 | fctx->base.sync32 = nv84_fence_sync32; |
158 | 148 | ||
159 | ret = nouveau_bo_vma_add(priv->bo, client->vm, &fctx->vma); | 149 | ret = nouveau_bo_vma_add(priv->bo, cli->vm, &fctx->vma); |
160 | if (ret == 0) { | 150 | if (ret == 0) { |
161 | ret = nouveau_bo_vma_add(priv->bo_gart, client->vm, | 151 | ret = nouveau_bo_vma_add(priv->bo_gart, cli->vm, |
162 | &fctx->vma_gart); | 152 | &fctx->vma_gart); |
163 | } | 153 | } |
164 | 154 | ||
165 | /* map display semaphore buffers into channel's vm */ | 155 | /* map display semaphore buffers into channel's vm */ |
166 | for (i = 0; !ret && i < chan->drm->dev->mode_config.num_crtc; i++) { | 156 | for (i = 0; !ret && i < chan->drm->dev->mode_config.num_crtc; i++) { |
167 | struct nouveau_bo *bo = nv50_display_crtc_sema(chan->drm->dev, i); | 157 | struct nouveau_bo *bo = nv50_display_crtc_sema(chan->drm->dev, i); |
168 | ret = nouveau_bo_vma_add(bo, client->vm, &fctx->dispc_vma[i]); | 158 | ret = nouveau_bo_vma_add(bo, cli->vm, &fctx->dispc_vma[i]); |
169 | } | 159 | } |
170 | 160 | ||
171 | nouveau_bo_wr32(priv->bo, fifo->chid * 16/4, 0x00000000); | 161 | nouveau_bo_wr32(priv->bo, chan->chid * 16/4, 0x00000000); |
172 | 162 | ||
173 | if (ret) | 163 | if (ret) |
174 | nv84_fence_context_del(chan); | 164 | nv84_fence_context_del(chan); |
@@ -178,7 +168,7 @@ nv84_fence_context_new(struct nouveau_channel *chan) | |||
178 | static bool | 168 | static bool |
179 | nv84_fence_suspend(struct nouveau_drm *drm) | 169 | nv84_fence_suspend(struct nouveau_drm *drm) |
180 | { | 170 | { |
181 | struct nouveau_fifo *pfifo = nouveau_fifo(drm->device); | 171 | struct nouveau_fifo *pfifo = nvkm_fifo(&drm->device); |
182 | struct nv84_fence_priv *priv = drm->fence; | 172 | struct nv84_fence_priv *priv = drm->fence; |
183 | int i; | 173 | int i; |
184 | 174 | ||
@@ -194,7 +184,7 @@ nv84_fence_suspend(struct nouveau_drm *drm) | |||
194 | static void | 184 | static void |
195 | nv84_fence_resume(struct nouveau_drm *drm) | 185 | nv84_fence_resume(struct nouveau_drm *drm) |
196 | { | 186 | { |
197 | struct nouveau_fifo *pfifo = nouveau_fifo(drm->device); | 187 | struct nouveau_fifo *pfifo = nvkm_fifo(&drm->device); |
198 | struct nv84_fence_priv *priv = drm->fence; | 188 | struct nv84_fence_priv *priv = drm->fence; |
199 | int i; | 189 | int i; |
200 | 190 | ||
@@ -225,7 +215,7 @@ nv84_fence_destroy(struct nouveau_drm *drm) | |||
225 | int | 215 | int |
226 | nv84_fence_create(struct nouveau_drm *drm) | 216 | nv84_fence_create(struct nouveau_drm *drm) |
227 | { | 217 | { |
228 | struct nouveau_fifo *pfifo = nouveau_fifo(drm->device); | 218 | struct nouveau_fifo *pfifo = nvkm_fifo(&drm->device); |
229 | struct nv84_fence_priv *priv; | 219 | struct nv84_fence_priv *priv; |
230 | int ret; | 220 | int ret; |
231 | 221 | ||
diff --git a/drivers/gpu/drm/nouveau/nvc0_fbcon.c b/drivers/gpu/drm/nouveau/nvc0_fbcon.c index 9dcd30f3e1e0..61246677e8dc 100644 --- a/drivers/gpu/drm/nouveau/nvc0_fbcon.c +++ b/drivers/gpu/drm/nouveau/nvc0_fbcon.c | |||
@@ -154,11 +154,10 @@ nvc0_fbcon_accel_init(struct fb_info *info) | |||
154 | struct nouveau_framebuffer *fb = &nfbdev->nouveau_fb; | 154 | struct nouveau_framebuffer *fb = &nfbdev->nouveau_fb; |
155 | struct nouveau_drm *drm = nouveau_drm(dev); | 155 | struct nouveau_drm *drm = nouveau_drm(dev); |
156 | struct nouveau_channel *chan = drm->channel; | 156 | struct nouveau_channel *chan = drm->channel; |
157 | struct nouveau_object *object; | ||
158 | int ret, format; | 157 | int ret, format; |
159 | 158 | ||
160 | ret = nouveau_object_new(nv_object(chan->cli), NVDRM_CHAN, Nv2D, | 159 | ret = nvif_object_init(chan->object, NULL, 0x902d, 0x902d, NULL, 0, |
161 | 0x902d, NULL, 0, &object); | 160 | &nfbdev->twod); |
162 | if (ret) | 161 | if (ret) |
163 | return ret; | 162 | return ret; |
164 | 163 | ||
@@ -197,7 +196,7 @@ nvc0_fbcon_accel_init(struct fb_info *info) | |||
197 | } | 196 | } |
198 | 197 | ||
199 | BEGIN_NVC0(chan, NvSub2D, 0x0000, 1); | 198 | BEGIN_NVC0(chan, NvSub2D, 0x0000, 1); |
200 | OUT_RING (chan, 0x0000902d); | 199 | OUT_RING (chan, nfbdev->twod.handle); |
201 | BEGIN_NVC0(chan, NvSub2D, 0x0290, 1); | 200 | BEGIN_NVC0(chan, NvSub2D, 0x0290, 1); |
202 | OUT_RING (chan, 0); | 201 | OUT_RING (chan, 0); |
203 | BEGIN_NVC0(chan, NvSub2D, 0x0888, 1); | 202 | BEGIN_NVC0(chan, NvSub2D, 0x0888, 1); |
diff --git a/drivers/gpu/drm/nouveau/nvc0_fence.c b/drivers/gpu/drm/nouveau/nvc0_fence.c index 9566267fbc42..becf19abda2d 100644 --- a/drivers/gpu/drm/nouveau/nvc0_fence.c +++ b/drivers/gpu/drm/nouveau/nvc0_fence.c | |||
@@ -22,12 +22,6 @@ | |||
22 | * Authors: Ben Skeggs | 22 | * Authors: Ben Skeggs |
23 | */ | 23 | */ |
24 | 24 | ||
25 | #include <core/object.h> | ||
26 | #include <core/client.h> | ||
27 | #include <core/class.h> | ||
28 | |||
29 | #include <engine/fifo.h> | ||
30 | |||
31 | #include "nouveau_drm.h" | 25 | #include "nouveau_drm.h" |
32 | #include "nouveau_dma.h" | 26 | #include "nouveau_dma.h" |
33 | #include "nouveau_fence.h" | 27 | #include "nouveau_fence.h" |
diff --git a/drivers/gpu/drm/nouveau/nvif/class.h b/drivers/gpu/drm/nouveau/nvif/class.h new file mode 100644 index 000000000000..cc81e0e5fd30 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvif/class.h | |||
@@ -0,0 +1,558 @@ | |||
1 | #ifndef __NVIF_CLASS_H__ | ||
2 | #define __NVIF_CLASS_H__ | ||
3 | |||
4 | /******************************************************************************* | ||
5 | * class identifiers | ||
6 | ******************************************************************************/ | ||
7 | |||
8 | /* the below match nvidia-assigned (either in hw, or sw) class numbers */ | ||
9 | #define NV_DEVICE 0x00000080 | ||
10 | |||
11 | #define NV_DMA_FROM_MEMORY 0x00000002 | ||
12 | #define NV_DMA_TO_MEMORY 0x00000003 | ||
13 | #define NV_DMA_IN_MEMORY 0x0000003d | ||
14 | |||
15 | #define NV04_DISP 0x00000046 | ||
16 | |||
17 | #define NV03_CHANNEL_DMA 0x0000006b | ||
18 | #define NV10_CHANNEL_DMA 0x0000006e | ||
19 | #define NV17_CHANNEL_DMA 0x0000176e | ||
20 | #define NV40_CHANNEL_DMA 0x0000406e | ||
21 | #define NV50_CHANNEL_DMA 0x0000506e | ||
22 | #define G82_CHANNEL_DMA 0x0000826e | ||
23 | |||
24 | #define NV50_CHANNEL_GPFIFO 0x0000506f | ||
25 | #define G82_CHANNEL_GPFIFO 0x0000826f | ||
26 | #define FERMI_CHANNEL_GPFIFO 0x0000906f | ||
27 | #define KEPLER_CHANNEL_GPFIFO_A 0x0000a06f | ||
28 | |||
29 | #define NV50_DISP 0x00005070 | ||
30 | #define G82_DISP 0x00008270 | ||
31 | #define GT200_DISP 0x00008370 | ||
32 | #define GT214_DISP 0x00008570 | ||
33 | #define GT206_DISP 0x00008870 | ||
34 | #define GF110_DISP 0x00009070 | ||
35 | #define GK104_DISP 0x00009170 | ||
36 | #define GK110_DISP 0x00009270 | ||
37 | #define GM107_DISP 0x00009470 | ||
38 | |||
39 | #define NV50_DISP_CURSOR 0x0000507a | ||
40 | #define G82_DISP_CURSOR 0x0000827a | ||
41 | #define GT214_DISP_CURSOR 0x0000857a | ||
42 | #define GF110_DISP_CURSOR 0x0000907a | ||
43 | #define GK104_DISP_CURSOR 0x0000917a | ||
44 | |||
45 | #define NV50_DISP_OVERLAY 0x0000507b | ||
46 | #define G82_DISP_OVERLAY 0x0000827b | ||
47 | #define GT214_DISP_OVERLAY 0x0000857b | ||
48 | #define GF110_DISP_OVERLAY 0x0000907b | ||
49 | #define GK104_DISP_OVERLAY 0x0000917b | ||
50 | |||
51 | #define NV50_DISP_BASE_CHANNEL_DMA 0x0000507c | ||
52 | #define G82_DISP_BASE_CHANNEL_DMA 0x0000827c | ||
53 | #define GT200_DISP_BASE_CHANNEL_DMA 0x0000837c | ||
54 | #define GT214_DISP_BASE_CHANNEL_DMA 0x0000857c | ||
55 | #define GF110_DISP_BASE_CHANNEL_DMA 0x0000907c | ||
56 | #define GK104_DISP_BASE_CHANNEL_DMA 0x0000917c | ||
57 | #define GK110_DISP_BASE_CHANNEL_DMA 0x0000927c | ||
58 | |||
59 | #define NV50_DISP_CORE_CHANNEL_DMA 0x0000507d | ||
60 | #define G82_DISP_CORE_CHANNEL_DMA 0x0000827d | ||
61 | #define GT200_DISP_CORE_CHANNEL_DMA 0x0000837d | ||
62 | #define GT214_DISP_CORE_CHANNEL_DMA 0x0000857d | ||
63 | #define GT206_DISP_CORE_CHANNEL_DMA 0x0000887d | ||
64 | #define GF110_DISP_CORE_CHANNEL_DMA 0x0000907d | ||
65 | #define GK104_DISP_CORE_CHANNEL_DMA 0x0000917d | ||
66 | #define GK110_DISP_CORE_CHANNEL_DMA 0x0000927d | ||
67 | #define GM107_DISP_CORE_CHANNEL_DMA 0x0000947d | ||
68 | |||
69 | #define NV50_DISP_OVERLAY_CHANNEL_DMA 0x0000507e | ||
70 | #define G82_DISP_OVERLAY_CHANNEL_DMA 0x0000827e | ||
71 | #define GT200_DISP_OVERLAY_CHANNEL_DMA 0x0000837e | ||
72 | #define GT214_DISP_OVERLAY_CHANNEL_DMA 0x0000857e | ||
73 | #define GF110_DISP_OVERLAY_CONTROL_DMA 0x0000907e | ||
74 | #define GK104_DISP_OVERLAY_CONTROL_DMA 0x0000917e | ||
75 | |||
76 | #define FERMI_A 0x00009097 | ||
77 | #define FERMI_B 0x00009197 | ||
78 | #define FERMI_C 0x00009297 | ||
79 | |||
80 | #define KEPLER_A 0x0000a097 | ||
81 | #define KEPLER_B 0x0000a197 | ||
82 | #define KEPLER_C 0x0000a297 | ||
83 | |||
84 | #define MAXWELL_A 0x0000b097 | ||
85 | |||
86 | #define FERMI_COMPUTE_A 0x000090c0 | ||
87 | #define FERMI_COMPUTE_B 0x000091c0 | ||
88 | |||
89 | #define KEPLER_COMPUTE_A 0x0000a0c0 | ||
90 | #define KEPLER_COMPUTE_B 0x0000a1c0 | ||
91 | |||
92 | #define MAXWELL_COMPUTE_A 0x0000b0c0 | ||
93 | |||
94 | |||
95 | /******************************************************************************* | ||
96 | * client | ||
97 | ******************************************************************************/ | ||
98 | |||
99 | #define NV_CLIENT_DEVLIST 0x00 | ||
100 | |||
101 | struct nv_client_devlist_v0 { | ||
102 | __u8 version; | ||
103 | __u8 count; | ||
104 | __u8 pad02[6]; | ||
105 | __u64 device[]; | ||
106 | }; | ||
107 | |||
108 | |||
109 | /******************************************************************************* | ||
110 | * device | ||
111 | ******************************************************************************/ | ||
112 | |||
113 | struct nv_device_v0 { | ||
114 | __u8 version; | ||
115 | __u8 pad01[7]; | ||
116 | __u64 device; /* device identifier, ~0 for client default */ | ||
117 | #define NV_DEVICE_V0_DISABLE_IDENTIFY 0x0000000000000001ULL | ||
118 | #define NV_DEVICE_V0_DISABLE_MMIO 0x0000000000000002ULL | ||
119 | #define NV_DEVICE_V0_DISABLE_VBIOS 0x0000000000000004ULL | ||
120 | #define NV_DEVICE_V0_DISABLE_CORE 0x0000000000000008ULL | ||
121 | #define NV_DEVICE_V0_DISABLE_DISP 0x0000000000010000ULL | ||
122 | #define NV_DEVICE_V0_DISABLE_FIFO 0x0000000000020000ULL | ||
123 | #define NV_DEVICE_V0_DISABLE_GRAPH 0x0000000100000000ULL | ||
124 | #define NV_DEVICE_V0_DISABLE_MPEG 0x0000000200000000ULL | ||
125 | #define NV_DEVICE_V0_DISABLE_ME 0x0000000400000000ULL | ||
126 | #define NV_DEVICE_V0_DISABLE_VP 0x0000000800000000ULL | ||
127 | #define NV_DEVICE_V0_DISABLE_CRYPT 0x0000001000000000ULL | ||
128 | #define NV_DEVICE_V0_DISABLE_BSP 0x0000002000000000ULL | ||
129 | #define NV_DEVICE_V0_DISABLE_PPP 0x0000004000000000ULL | ||
130 | #define NV_DEVICE_V0_DISABLE_COPY0 0x0000008000000000ULL | ||
131 | #define NV_DEVICE_V0_DISABLE_COPY1 0x0000010000000000ULL | ||
132 | #define NV_DEVICE_V0_DISABLE_VIC 0x0000020000000000ULL | ||
133 | #define NV_DEVICE_V0_DISABLE_VENC 0x0000040000000000ULL | ||
134 | __u64 disable; /* disable particular subsystems */ | ||
135 | __u64 debug0; /* as above, but *internal* ids, and *NOT* ABI */ | ||
136 | }; | ||
137 | |||
138 | #define NV_DEVICE_V0_INFO 0x00 | ||
139 | |||
140 | struct nv_device_info_v0 { | ||
141 | __u8 version; | ||
142 | #define NV_DEVICE_INFO_V0_IGP 0x00 | ||
143 | #define NV_DEVICE_INFO_V0_PCI 0x01 | ||
144 | #define NV_DEVICE_INFO_V0_AGP 0x02 | ||
145 | #define NV_DEVICE_INFO_V0_PCIE 0x03 | ||
146 | #define NV_DEVICE_INFO_V0_SOC 0x04 | ||
147 | __u8 platform; | ||
148 | __u16 chipset; /* from NV_PMC_BOOT_0 */ | ||
149 | __u8 revision; /* from NV_PMC_BOOT_0 */ | ||
150 | #define NV_DEVICE_INFO_V0_TNT 0x01 | ||
151 | #define NV_DEVICE_INFO_V0_CELSIUS 0x02 | ||
152 | #define NV_DEVICE_INFO_V0_KELVIN 0x03 | ||
153 | #define NV_DEVICE_INFO_V0_RANKINE 0x04 | ||
154 | #define NV_DEVICE_INFO_V0_CURIE 0x05 | ||
155 | #define NV_DEVICE_INFO_V0_TESLA 0x06 | ||
156 | #define NV_DEVICE_INFO_V0_FERMI 0x07 | ||
157 | #define NV_DEVICE_INFO_V0_KEPLER 0x08 | ||
158 | #define NV_DEVICE_INFO_V0_MAXWELL 0x09 | ||
159 | __u8 family; | ||
160 | __u8 pad06[2]; | ||
161 | __u64 ram_size; | ||
162 | __u64 ram_user; | ||
163 | }; | ||
164 | |||
165 | |||
166 | /******************************************************************************* | ||
167 | * context dma | ||
168 | ******************************************************************************/ | ||
169 | |||
170 | struct nv_dma_v0 { | ||
171 | __u8 version; | ||
172 | #define NV_DMA_V0_TARGET_VM 0x00 | ||
173 | #define NV_DMA_V0_TARGET_VRAM 0x01 | ||
174 | #define NV_DMA_V0_TARGET_PCI 0x02 | ||
175 | #define NV_DMA_V0_TARGET_PCI_US 0x03 | ||
176 | #define NV_DMA_V0_TARGET_AGP 0x04 | ||
177 | __u8 target; | ||
178 | #define NV_DMA_V0_ACCESS_VM 0x00 | ||
179 | #define NV_DMA_V0_ACCESS_RD 0x01 | ||
180 | #define NV_DMA_V0_ACCESS_WR 0x02 | ||
181 | #define NV_DMA_V0_ACCESS_RDWR (NV_DMA_V0_ACCESS_RD | NV_DMA_V0_ACCESS_WR) | ||
182 | __u8 access; | ||
183 | __u8 pad03[5]; | ||
184 | __u64 start; | ||
185 | __u64 limit; | ||
186 | /* ... chipset-specific class data */ | ||
187 | }; | ||
188 | |||
189 | struct nv50_dma_v0 { | ||
190 | __u8 version; | ||
191 | #define NV50_DMA_V0_PRIV_VM 0x00 | ||
192 | #define NV50_DMA_V0_PRIV_US 0x01 | ||
193 | #define NV50_DMA_V0_PRIV__S 0x02 | ||
194 | __u8 priv; | ||
195 | #define NV50_DMA_V0_PART_VM 0x00 | ||
196 | #define NV50_DMA_V0_PART_256 0x01 | ||
197 | #define NV50_DMA_V0_PART_1KB 0x02 | ||
198 | __u8 part; | ||
199 | #define NV50_DMA_V0_COMP_NONE 0x00 | ||
200 | #define NV50_DMA_V0_COMP_1 0x01 | ||
201 | #define NV50_DMA_V0_COMP_2 0x02 | ||
202 | #define NV50_DMA_V0_COMP_VM 0x03 | ||
203 | __u8 comp; | ||
204 | #define NV50_DMA_V0_KIND_PITCH 0x00 | ||
205 | #define NV50_DMA_V0_KIND_VM 0x7f | ||
206 | __u8 kind; | ||
207 | __u8 pad05[3]; | ||
208 | }; | ||
209 | |||
210 | struct gf100_dma_v0 { | ||
211 | __u8 version; | ||
212 | #define GF100_DMA_V0_PRIV_VM 0x00 | ||
213 | #define GF100_DMA_V0_PRIV_US 0x01 | ||
214 | #define GF100_DMA_V0_PRIV__S 0x02 | ||
215 | __u8 priv; | ||
216 | #define GF100_DMA_V0_KIND_PITCH 0x00 | ||
217 | #define GF100_DMA_V0_KIND_VM 0xff | ||
218 | __u8 kind; | ||
219 | __u8 pad03[5]; | ||
220 | }; | ||
221 | |||
222 | struct gf110_dma_v0 { | ||
223 | __u8 version; | ||
224 | #define GF110_DMA_V0_PAGE_LP 0x00 | ||
225 | #define GF110_DMA_V0_PAGE_SP 0x01 | ||
226 | __u8 page; | ||
227 | #define GF110_DMA_V0_KIND_PITCH 0x00 | ||
228 | #define GF110_DMA_V0_KIND_VM 0xff | ||
229 | __u8 kind; | ||
230 | __u8 pad03[5]; | ||
231 | }; | ||
232 | |||
233 | |||
234 | /******************************************************************************* | ||
235 | * perfmon | ||
236 | ******************************************************************************/ | ||
237 | |||
238 | struct nvif_perfctr_v0 { | ||
239 | __u8 version; | ||
240 | __u8 pad01[1]; | ||
241 | __u16 logic_op; | ||
242 | __u8 pad04[4]; | ||
243 | char name[4][64]; | ||
244 | }; | ||
245 | |||
246 | #define NVIF_PERFCTR_V0_QUERY 0x00 | ||
247 | #define NVIF_PERFCTR_V0_SAMPLE 0x01 | ||
248 | #define NVIF_PERFCTR_V0_READ 0x02 | ||
249 | |||
250 | struct nvif_perfctr_query_v0 { | ||
251 | __u8 version; | ||
252 | __u8 pad01[3]; | ||
253 | __u32 iter; | ||
254 | char name[64]; | ||
255 | }; | ||
256 | |||
257 | struct nvif_perfctr_sample { | ||
258 | }; | ||
259 | |||
260 | struct nvif_perfctr_read_v0 { | ||
261 | __u8 version; | ||
262 | __u8 pad01[7]; | ||
263 | __u32 ctr; | ||
264 | __u32 clk; | ||
265 | }; | ||
266 | |||
267 | |||
268 | /******************************************************************************* | ||
269 | * device control | ||
270 | ******************************************************************************/ | ||
271 | |||
272 | #define NVIF_CONTROL_PSTATE_INFO 0x00 | ||
273 | #define NVIF_CONTROL_PSTATE_ATTR 0x01 | ||
274 | #define NVIF_CONTROL_PSTATE_USER 0x02 | ||
275 | |||
276 | struct nvif_control_pstate_info_v0 { | ||
277 | __u8 version; | ||
278 | __u8 count; /* out: number of power states */ | ||
279 | #define NVIF_CONTROL_PSTATE_INFO_V0_USTATE_DISABLE (-1) | ||
280 | #define NVIF_CONTROL_PSTATE_INFO_V0_USTATE_PERFMON (-2) | ||
281 | __s8 ustate_ac; /* out: target pstate index */ | ||
282 | __s8 ustate_dc; /* out: target pstate index */ | ||
283 | __s8 pwrsrc; /* out: current power source */ | ||
284 | #define NVIF_CONTROL_PSTATE_INFO_V0_PSTATE_UNKNOWN (-1) | ||
285 | #define NVIF_CONTROL_PSTATE_INFO_V0_PSTATE_PERFMON (-2) | ||
286 | __s8 pstate; /* out: current pstate index */ | ||
287 | __u8 pad06[2]; | ||
288 | }; | ||
289 | |||
290 | struct nvif_control_pstate_attr_v0 { | ||
291 | __u8 version; | ||
292 | #define NVIF_CONTROL_PSTATE_ATTR_V0_STATE_CURRENT (-1) | ||
293 | __s8 state; /* in: index of pstate to query | ||
294 | * out: pstate identifier | ||
295 | */ | ||
296 | __u8 index; /* in: index of attribute to query | ||
297 | * out: index of next attribute, or 0 if no more | ||
298 | */ | ||
299 | __u8 pad03[5]; | ||
300 | __u32 min; | ||
301 | __u32 max; | ||
302 | char name[32]; | ||
303 | char unit[16]; | ||
304 | }; | ||
305 | |||
306 | struct nvif_control_pstate_user_v0 { | ||
307 | __u8 version; | ||
308 | #define NVIF_CONTROL_PSTATE_USER_V0_STATE_UNKNOWN (-1) | ||
309 | #define NVIF_CONTROL_PSTATE_USER_V0_STATE_PERFMON (-2) | ||
310 | __s8 ustate; /* in: pstate identifier */ | ||
311 | __s8 pwrsrc; /* in: target power source */ | ||
312 | __u8 pad03[5]; | ||
313 | }; | ||
314 | |||
315 | |||
316 | /******************************************************************************* | ||
317 | * DMA FIFO channels | ||
318 | ******************************************************************************/ | ||
319 | |||
320 | struct nv03_channel_dma_v0 { | ||
321 | __u8 version; | ||
322 | __u8 chid; | ||
323 | __u8 pad02[2]; | ||
324 | __u32 pushbuf; | ||
325 | __u64 offset; | ||
326 | }; | ||
327 | |||
328 | #define G82_CHANNEL_DMA_V0_NTFY_UEVENT 0x00 | ||
329 | |||
330 | /******************************************************************************* | ||
331 | * GPFIFO channels | ||
332 | ******************************************************************************/ | ||
333 | |||
334 | struct nv50_channel_gpfifo_v0 { | ||
335 | __u8 version; | ||
336 | __u8 chid; | ||
337 | __u8 pad01[6]; | ||
338 | __u32 pushbuf; | ||
339 | __u32 ilength; | ||
340 | __u64 ioffset; | ||
341 | }; | ||
342 | |||
343 | struct kepler_channel_gpfifo_a_v0 { | ||
344 | __u8 version; | ||
345 | #define KEPLER_CHANNEL_GPFIFO_A_V0_ENGINE_GR 0x01 | ||
346 | #define KEPLER_CHANNEL_GPFIFO_A_V0_ENGINE_VP 0x02 | ||
347 | #define KEPLER_CHANNEL_GPFIFO_A_V0_ENGINE_PPP 0x04 | ||
348 | #define KEPLER_CHANNEL_GPFIFO_A_V0_ENGINE_BSP 0x08 | ||
349 | #define KEPLER_CHANNEL_GPFIFO_A_V0_ENGINE_CE0 0x10 | ||
350 | #define KEPLER_CHANNEL_GPFIFO_A_V0_ENGINE_CE1 0x20 | ||
351 | #define KEPLER_CHANNEL_GPFIFO_A_V0_ENGINE_ENC 0x40 | ||
352 | __u8 engine; | ||
353 | __u16 chid; | ||
354 | __u8 pad04[4]; | ||
355 | __u32 pushbuf; | ||
356 | __u32 ilength; | ||
357 | __u64 ioffset; | ||
358 | }; | ||
359 | |||
360 | /******************************************************************************* | ||
361 | * legacy display | ||
362 | ******************************************************************************/ | ||
363 | |||
364 | #define NV04_DISP_NTFY_VBLANK 0x00 | ||
365 | #define NV04_DISP_NTFY_CONN 0x01 | ||
366 | |||
367 | struct nv04_disp_mthd_v0 { | ||
368 | __u8 version; | ||
369 | #define NV04_DISP_SCANOUTPOS 0x00 | ||
370 | __u8 method; | ||
371 | __u8 head; | ||
372 | __u8 pad03[5]; | ||
373 | }; | ||
374 | |||
375 | struct nv04_disp_scanoutpos_v0 { | ||
376 | __u8 version; | ||
377 | __u8 pad01[7]; | ||
378 | __s64 time[2]; | ||
379 | __u16 vblanks; | ||
380 | __u16 vblanke; | ||
381 | __u16 vtotal; | ||
382 | __u16 vline; | ||
383 | __u16 hblanks; | ||
384 | __u16 hblanke; | ||
385 | __u16 htotal; | ||
386 | __u16 hline; | ||
387 | }; | ||
388 | |||
389 | /******************************************************************************* | ||
390 | * display | ||
391 | ******************************************************************************/ | ||
392 | |||
393 | #define NV50_DISP_MTHD 0x00 | ||
394 | |||
395 | struct nv50_disp_mthd_v0 { | ||
396 | __u8 version; | ||
397 | #define NV50_DISP_SCANOUTPOS 0x00 | ||
398 | __u8 method; | ||
399 | __u8 head; | ||
400 | __u8 pad03[5]; | ||
401 | }; | ||
402 | |||
403 | struct nv50_disp_mthd_v1 { | ||
404 | __u8 version; | ||
405 | #define NV50_DISP_MTHD_V1_DAC_PWR 0x10 | ||
406 | #define NV50_DISP_MTHD_V1_DAC_LOAD 0x11 | ||
407 | #define NV50_DISP_MTHD_V1_SOR_PWR 0x20 | ||
408 | #define NV50_DISP_MTHD_V1_SOR_HDA_ELD 0x21 | ||
409 | #define NV50_DISP_MTHD_V1_SOR_HDMI_PWR 0x22 | ||
410 | #define NV50_DISP_MTHD_V1_SOR_LVDS_SCRIPT 0x23 | ||
411 | #define NV50_DISP_MTHD_V1_SOR_DP_PWR 0x24 | ||
412 | #define NV50_DISP_MTHD_V1_PIOR_PWR 0x30 | ||
413 | __u8 method; | ||
414 | __u16 hasht; | ||
415 | __u16 hashm; | ||
416 | __u8 pad06[2]; | ||
417 | }; | ||
418 | |||
419 | struct nv50_disp_dac_pwr_v0 { | ||
420 | __u8 version; | ||
421 | __u8 state; | ||
422 | __u8 data; | ||
423 | __u8 vsync; | ||
424 | __u8 hsync; | ||
425 | __u8 pad05[3]; | ||
426 | }; | ||
427 | |||
428 | struct nv50_disp_dac_load_v0 { | ||
429 | __u8 version; | ||
430 | __u8 load; | ||
431 | __u16 data; | ||
432 | __u8 pad04[4]; | ||
433 | }; | ||
434 | |||
435 | struct nv50_disp_sor_pwr_v0 { | ||
436 | __u8 version; | ||
437 | __u8 state; | ||
438 | __u8 pad02[6]; | ||
439 | }; | ||
440 | |||
441 | struct nv50_disp_sor_hda_eld_v0 { | ||
442 | __u8 version; | ||
443 | __u8 pad01[7]; | ||
444 | __u8 data[]; | ||
445 | }; | ||
446 | |||
447 | struct nv50_disp_sor_hdmi_pwr_v0 { | ||
448 | __u8 version; | ||
449 | __u8 state; | ||
450 | __u8 max_ac_packet; | ||
451 | __u8 rekey; | ||
452 | __u8 pad04[4]; | ||
453 | }; | ||
454 | |||
455 | struct nv50_disp_sor_lvds_script_v0 { | ||
456 | __u8 version; | ||
457 | __u8 pad01[1]; | ||
458 | __u16 script; | ||
459 | __u8 pad04[4]; | ||
460 | }; | ||
461 | |||
462 | struct nv50_disp_sor_dp_pwr_v0 { | ||
463 | __u8 version; | ||
464 | __u8 state; | ||
465 | __u8 pad02[6]; | ||
466 | }; | ||
467 | |||
468 | struct nv50_disp_pior_pwr_v0 { | ||
469 | __u8 version; | ||
470 | __u8 state; | ||
471 | __u8 type; | ||
472 | __u8 pad03[5]; | ||
473 | }; | ||
474 | |||
475 | /* core */ | ||
476 | struct nv50_disp_core_channel_dma_v0 { | ||
477 | __u8 version; | ||
478 | __u8 pad01[3]; | ||
479 | __u32 pushbuf; | ||
480 | }; | ||
481 | |||
482 | /* cursor immediate */ | ||
483 | struct nv50_disp_cursor_v0 { | ||
484 | __u8 version; | ||
485 | __u8 head; | ||
486 | __u8 pad02[6]; | ||
487 | }; | ||
488 | |||
489 | /* base */ | ||
490 | struct nv50_disp_base_channel_dma_v0 { | ||
491 | __u8 version; | ||
492 | __u8 pad01[2]; | ||
493 | __u8 head; | ||
494 | __u32 pushbuf; | ||
495 | }; | ||
496 | |||
497 | /* overlay */ | ||
498 | struct nv50_disp_overlay_channel_dma_v0 { | ||
499 | __u8 version; | ||
500 | __u8 pad01[2]; | ||
501 | __u8 head; | ||
502 | __u32 pushbuf; | ||
503 | }; | ||
504 | |||
505 | /* overlay immediate */ | ||
506 | struct nv50_disp_overlay_v0 { | ||
507 | __u8 version; | ||
508 | __u8 head; | ||
509 | __u8 pad02[6]; | ||
510 | }; | ||
511 | |||
512 | |||
513 | /******************************************************************************* | ||
514 | * fermi | ||
515 | ******************************************************************************/ | ||
516 | |||
517 | #define FERMI_A_ZBC_COLOR 0x00 | ||
518 | #define FERMI_A_ZBC_DEPTH 0x01 | ||
519 | |||
520 | struct fermi_a_zbc_color_v0 { | ||
521 | __u8 version; | ||
522 | #define FERMI_A_ZBC_COLOR_V0_FMT_ZERO 0x01 | ||
523 | #define FERMI_A_ZBC_COLOR_V0_FMT_UNORM_ONE 0x02 | ||
524 | #define FERMI_A_ZBC_COLOR_V0_FMT_RF32_GF32_BF32_AF32 0x04 | ||
525 | #define FERMI_A_ZBC_COLOR_V0_FMT_R16_G16_B16_A16 0x08 | ||
526 | #define FERMI_A_ZBC_COLOR_V0_FMT_RN16_GN16_BN16_AN16 0x0c | ||
527 | #define FERMI_A_ZBC_COLOR_V0_FMT_RS16_GS16_BS16_AS16 0x10 | ||
528 | #define FERMI_A_ZBC_COLOR_V0_FMT_RU16_GU16_BU16_AU16 0x14 | ||
529 | #define FERMI_A_ZBC_COLOR_V0_FMT_RF16_GF16_BF16_AF16 0x16 | ||
530 | #define FERMI_A_ZBC_COLOR_V0_FMT_A8R8G8B8 0x18 | ||
531 | #define FERMI_A_ZBC_COLOR_V0_FMT_A8RL8GL8BL8 0x1c | ||
532 | #define FERMI_A_ZBC_COLOR_V0_FMT_A2B10G10R10 0x20 | ||
533 | #define FERMI_A_ZBC_COLOR_V0_FMT_AU2BU10GU10RU10 0x24 | ||
534 | #define FERMI_A_ZBC_COLOR_V0_FMT_A8B8G8R8 0x28 | ||
535 | #define FERMI_A_ZBC_COLOR_V0_FMT_A8BL8GL8RL8 0x2c | ||
536 | #define FERMI_A_ZBC_COLOR_V0_FMT_AN8BN8GN8RN8 0x30 | ||
537 | #define FERMI_A_ZBC_COLOR_V0_FMT_AS8BS8GS8RS8 0x34 | ||
538 | #define FERMI_A_ZBC_COLOR_V0_FMT_AU8BU8GU8RU8 0x38 | ||
539 | #define FERMI_A_ZBC_COLOR_V0_FMT_A2R10G10B10 0x3c | ||
540 | #define FERMI_A_ZBC_COLOR_V0_FMT_BF10GF11RF11 0x40 | ||
541 | __u8 format; | ||
542 | __u8 index; | ||
543 | __u8 pad03[5]; | ||
544 | __u32 ds[4]; | ||
545 | __u32 l2[4]; | ||
546 | }; | ||
547 | |||
548 | struct fermi_a_zbc_depth_v0 { | ||
549 | __u8 version; | ||
550 | #define FERMI_A_ZBC_DEPTH_V0_FMT_FP32 0x01 | ||
551 | __u8 format; | ||
552 | __u8 index; | ||
553 | __u8 pad03[5]; | ||
554 | __u32 ds; | ||
555 | __u32 l2; | ||
556 | }; | ||
557 | |||
558 | #endif | ||
diff --git a/drivers/gpu/drm/nouveau/nvif/client.c b/drivers/gpu/drm/nouveau/nvif/client.c new file mode 100644 index 000000000000..3c4df1fc26dc --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvif/client.c | |||
@@ -0,0 +1,129 @@ | |||
1 | /* | ||
2 | * Copyright 2013 Red Hat Inc. | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice shall be included in | ||
12 | * all copies or substantial portions of the Software. | ||
13 | * | ||
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
21 | * | ||
22 | * Authors: Ben Skeggs <bskeggs@redhat.com> | ||
23 | */ | ||
24 | |||
25 | #include "client.h" | ||
26 | #include "driver.h" | ||
27 | #include "ioctl.h" | ||
28 | |||
29 | int | ||
30 | nvif_client_ioctl(struct nvif_client *client, void *data, u32 size) | ||
31 | { | ||
32 | return client->driver->ioctl(client->base.priv, client->super, data, size, NULL); | ||
33 | } | ||
34 | |||
35 | int | ||
36 | nvif_client_suspend(struct nvif_client *client) | ||
37 | { | ||
38 | return client->driver->suspend(client->base.priv); | ||
39 | } | ||
40 | |||
41 | int | ||
42 | nvif_client_resume(struct nvif_client *client) | ||
43 | { | ||
44 | return client->driver->resume(client->base.priv); | ||
45 | } | ||
46 | |||
47 | void | ||
48 | nvif_client_fini(struct nvif_client *client) | ||
49 | { | ||
50 | if (client->driver) { | ||
51 | client->driver->fini(client->base.priv); | ||
52 | client->driver = NULL; | ||
53 | client->base.parent = NULL; | ||
54 | nvif_object_fini(&client->base); | ||
55 | } | ||
56 | } | ||
57 | |||
58 | const struct nvif_driver * | ||
59 | nvif_drivers[] = { | ||
60 | #ifdef __KERNEL__ | ||
61 | &nvif_driver_nvkm, | ||
62 | #else | ||
63 | &nvif_driver_drm, | ||
64 | &nvif_driver_lib, | ||
65 | #endif | ||
66 | NULL | ||
67 | }; | ||
68 | |||
69 | int | ||
70 | nvif_client_init(void (*dtor)(struct nvif_client *), const char *driver, | ||
71 | const char *name, u64 device, const char *cfg, const char *dbg, | ||
72 | struct nvif_client *client) | ||
73 | { | ||
74 | int ret, i; | ||
75 | |||
76 | ret = nvif_object_init(NULL, (void*)dtor, 0, 0, NULL, 0, &client->base); | ||
77 | if (ret) | ||
78 | return ret; | ||
79 | |||
80 | client->base.parent = &client->base; | ||
81 | client->base.handle = ~0; | ||
82 | client->object = &client->base; | ||
83 | client->super = true; | ||
84 | |||
85 | for (i = 0, ret = -EINVAL; (client->driver = nvif_drivers[i]); i++) { | ||
86 | if (!driver || !strcmp(client->driver->name, driver)) { | ||
87 | ret = client->driver->init(name, device, cfg, dbg, | ||
88 | &client->base.priv); | ||
89 | if (!ret || driver) | ||
90 | break; | ||
91 | } | ||
92 | } | ||
93 | |||
94 | if (ret) | ||
95 | nvif_client_fini(client); | ||
96 | return ret; | ||
97 | } | ||
98 | |||
99 | static void | ||
100 | nvif_client_del(struct nvif_client *client) | ||
101 | { | ||
102 | nvif_client_fini(client); | ||
103 | kfree(client); | ||
104 | } | ||
105 | |||
106 | int | ||
107 | nvif_client_new(const char *driver, const char *name, u64 device, | ||
108 | const char *cfg, const char *dbg, | ||
109 | struct nvif_client **pclient) | ||
110 | { | ||
111 | struct nvif_client *client = kzalloc(sizeof(*client), GFP_KERNEL); | ||
112 | if (client) { | ||
113 | int ret = nvif_client_init(nvif_client_del, driver, name, | ||
114 | device, cfg, dbg, client); | ||
115 | if (ret) { | ||
116 | kfree(client); | ||
117 | client = NULL; | ||
118 | } | ||
119 | *pclient = client; | ||
120 | return ret; | ||
121 | } | ||
122 | return -ENOMEM; | ||
123 | } | ||
124 | |||
125 | void | ||
126 | nvif_client_ref(struct nvif_client *client, struct nvif_client **pclient) | ||
127 | { | ||
128 | nvif_object_ref(&client->base, (struct nvif_object **)pclient); | ||
129 | } | ||
diff --git a/drivers/gpu/drm/nouveau/nvif/client.h b/drivers/gpu/drm/nouveau/nvif/client.h new file mode 100644 index 000000000000..28352f0882ec --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvif/client.h | |||
@@ -0,0 +1,39 @@ | |||
1 | #ifndef __NVIF_CLIENT_H__ | ||
2 | #define __NVIF_CLIENT_H__ | ||
3 | |||
4 | #include "object.h" | ||
5 | |||
6 | struct nvif_client { | ||
7 | struct nvif_object base; | ||
8 | struct nvif_object *object; /*XXX: hack for nvif_object() */ | ||
9 | const struct nvif_driver *driver; | ||
10 | bool super; | ||
11 | }; | ||
12 | |||
13 | static inline struct nvif_client * | ||
14 | nvif_client(struct nvif_object *object) | ||
15 | { | ||
16 | while (object && object->parent != object) | ||
17 | object = object->parent; | ||
18 | return (void *)object; | ||
19 | } | ||
20 | |||
21 | int nvif_client_init(void (*dtor)(struct nvif_client *), const char *, | ||
22 | const char *, u64, const char *, const char *, | ||
23 | struct nvif_client *); | ||
24 | void nvif_client_fini(struct nvif_client *); | ||
25 | int nvif_client_new(const char *, const char *, u64, const char *, | ||
26 | const char *, struct nvif_client **); | ||
27 | void nvif_client_ref(struct nvif_client *, struct nvif_client **); | ||
28 | int nvif_client_ioctl(struct nvif_client *, void *, u32); | ||
29 | int nvif_client_suspend(struct nvif_client *); | ||
30 | int nvif_client_resume(struct nvif_client *); | ||
31 | |||
32 | /*XXX*/ | ||
33 | #include <core/client.h> | ||
34 | #define nvkm_client(a) ({ \ | ||
35 | struct nvif_client *_client = nvif_client(nvif_object(a)); \ | ||
36 | nouveau_client(_client->base.priv); \ | ||
37 | }) | ||
38 | |||
39 | #endif | ||
diff --git a/drivers/gpu/drm/nouveau/nvif/device.c b/drivers/gpu/drm/nouveau/nvif/device.c new file mode 100644 index 000000000000..f477579725e3 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvif/device.c | |||
@@ -0,0 +1,78 @@ | |||
1 | /* | ||
2 | * Copyright 2014 Red Hat Inc. | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice shall be included in | ||
12 | * all copies or substantial portions of the Software. | ||
13 | * | ||
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
21 | * | ||
22 | * Authors: Ben Skeggs <bskeggs@redhat.com> | ||
23 | */ | ||
24 | |||
25 | #include "device.h" | ||
26 | |||
27 | void | ||
28 | nvif_device_fini(struct nvif_device *device) | ||
29 | { | ||
30 | nvif_object_fini(&device->base); | ||
31 | } | ||
32 | |||
33 | int | ||
34 | nvif_device_init(struct nvif_object *parent, void (*dtor)(struct nvif_device *), | ||
35 | u32 handle, u32 oclass, void *data, u32 size, | ||
36 | struct nvif_device *device) | ||
37 | { | ||
38 | int ret = nvif_object_init(parent, (void *)dtor, handle, oclass, | ||
39 | data, size, &device->base); | ||
40 | if (ret == 0) { | ||
41 | device->object = &device->base; | ||
42 | device->info.version = 0; | ||
43 | ret = nvif_object_mthd(&device->base, NV_DEVICE_V0_INFO, | ||
44 | &device->info, sizeof(device->info)); | ||
45 | } | ||
46 | return ret; | ||
47 | } | ||
48 | |||
49 | static void | ||
50 | nvif_device_del(struct nvif_device *device) | ||
51 | { | ||
52 | nvif_device_fini(device); | ||
53 | kfree(device); | ||
54 | } | ||
55 | |||
56 | int | ||
57 | nvif_device_new(struct nvif_object *parent, u32 handle, u32 oclass, | ||
58 | void *data, u32 size, struct nvif_device **pdevice) | ||
59 | { | ||
60 | struct nvif_device *device = kzalloc(sizeof(*device), GFP_KERNEL); | ||
61 | if (device) { | ||
62 | int ret = nvif_device_init(parent, nvif_device_del, handle, | ||
63 | oclass, data, size, device); | ||
64 | if (ret) { | ||
65 | kfree(device); | ||
66 | device = NULL; | ||
67 | } | ||
68 | *pdevice = device; | ||
69 | return ret; | ||
70 | } | ||
71 | return -ENOMEM; | ||
72 | } | ||
73 | |||
74 | void | ||
75 | nvif_device_ref(struct nvif_device *device, struct nvif_device **pdevice) | ||
76 | { | ||
77 | nvif_object_ref(&device->base, (struct nvif_object **)pdevice); | ||
78 | } | ||
diff --git a/drivers/gpu/drm/nouveau/nvif/device.h b/drivers/gpu/drm/nouveau/nvif/device.h new file mode 100644 index 000000000000..43180f9fe630 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvif/device.h | |||
@@ -0,0 +1,62 @@ | |||
1 | #ifndef __NVIF_DEVICE_H__ | ||
2 | #define __NVIF_DEVICE_H__ | ||
3 | |||
4 | #include "object.h" | ||
5 | #include "class.h" | ||
6 | |||
7 | struct nvif_device { | ||
8 | struct nvif_object base; | ||
9 | struct nvif_object *object; /*XXX: hack for nvif_object() */ | ||
10 | struct nv_device_info_v0 info; | ||
11 | }; | ||
12 | |||
13 | static inline struct nvif_device * | ||
14 | nvif_device(struct nvif_object *object) | ||
15 | { | ||
16 | while (object && object->oclass != 0x0080 /*XXX: NV_DEVICE_CLASS*/ ) | ||
17 | object = object->parent; | ||
18 | return (void *)object; | ||
19 | } | ||
20 | |||
21 | int nvif_device_init(struct nvif_object *, void (*dtor)(struct nvif_device *), | ||
22 | u32 handle, u32 oclass, void *, u32, | ||
23 | struct nvif_device *); | ||
24 | void nvif_device_fini(struct nvif_device *); | ||
25 | int nvif_device_new(struct nvif_object *, u32 handle, u32 oclass, | ||
26 | void *, u32, struct nvif_device **); | ||
27 | void nvif_device_ref(struct nvif_device *, struct nvif_device **); | ||
28 | |||
29 | /*XXX*/ | ||
30 | #include <subdev/bios.h> | ||
31 | #include <subdev/fb.h> | ||
32 | #include <subdev/vm.h> | ||
33 | #include <subdev/bar.h> | ||
34 | #include <subdev/gpio.h> | ||
35 | #include <subdev/clock.h> | ||
36 | #include <subdev/i2c.h> | ||
37 | #include <subdev/timer.h> | ||
38 | #include <subdev/therm.h> | ||
39 | |||
40 | #define nvkm_device(a) nv_device(nvkm_object((a))) | ||
41 | #define nvkm_bios(a) nouveau_bios(nvkm_device(a)) | ||
42 | #define nvkm_fb(a) nouveau_fb(nvkm_device(a)) | ||
43 | #define nvkm_vmmgr(a) nouveau_vmmgr(nvkm_device(a)) | ||
44 | #define nvkm_bar(a) nouveau_bar(nvkm_device(a)) | ||
45 | #define nvkm_gpio(a) nouveau_gpio(nvkm_device(a)) | ||
46 | #define nvkm_clock(a) nouveau_clock(nvkm_device(a)) | ||
47 | #define nvkm_i2c(a) nouveau_i2c(nvkm_device(a)) | ||
48 | #define nvkm_timer(a) nouveau_timer(nvkm_device(a)) | ||
49 | #define nvkm_wait(a,b,c,d) nv_wait(nvkm_timer(a), (b), (c), (d)) | ||
50 | #define nvkm_wait_cb(a,b,c) nv_wait_cb(nvkm_timer(a), (b), (c)) | ||
51 | #define nvkm_therm(a) nouveau_therm(nvkm_device(a)) | ||
52 | |||
53 | #include <engine/device.h> | ||
54 | #include <engine/fifo.h> | ||
55 | #include <engine/graph.h> | ||
56 | #include <engine/software.h> | ||
57 | |||
58 | #define nvkm_fifo(a) nouveau_fifo(nvkm_device(a)) | ||
59 | #define nvkm_fifo_chan(a) ((struct nouveau_fifo_chan *)nvkm_object(a)) | ||
60 | #define nvkm_gr(a) ((struct nouveau_graph *)nouveau_engine(nvkm_object(a), NVDEV_ENGINE_GR)) | ||
61 | |||
62 | #endif | ||
diff --git a/drivers/gpu/drm/nouveau/nvif/driver.h b/drivers/gpu/drm/nouveau/nvif/driver.h new file mode 100644 index 000000000000..b72a8f0c2758 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvif/driver.h | |||
@@ -0,0 +1,21 @@ | |||
1 | #ifndef __NVIF_DRIVER_H__ | ||
2 | #define __NVIF_DRIVER_H__ | ||
3 | |||
4 | struct nvif_driver { | ||
5 | const char *name; | ||
6 | int (*init)(const char *name, u64 device, const char *cfg, | ||
7 | const char *dbg, void **priv); | ||
8 | void (*fini)(void *priv); | ||
9 | int (*suspend)(void *priv); | ||
10 | int (*resume)(void *priv); | ||
11 | int (*ioctl)(void *priv, bool super, void *data, u32 size, void **hack); | ||
12 | void *(*map)(void *priv, u64 handle, u32 size); | ||
13 | void (*unmap)(void *priv, void *ptr, u32 size); | ||
14 | bool keep; | ||
15 | }; | ||
16 | |||
17 | extern const struct nvif_driver nvif_driver_nvkm; | ||
18 | extern const struct nvif_driver nvif_driver_drm; | ||
19 | extern const struct nvif_driver nvif_driver_lib; | ||
20 | |||
21 | #endif | ||
diff --git a/drivers/gpu/drm/nouveau/nvif/event.h b/drivers/gpu/drm/nouveau/nvif/event.h new file mode 100644 index 000000000000..21764499b4be --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvif/event.h | |||
@@ -0,0 +1,62 @@ | |||
1 | #ifndef __NVIF_EVENT_H__ | ||
2 | #define __NVIF_EVENT_H__ | ||
3 | |||
4 | struct nvif_notify_req_v0 { | ||
5 | __u8 version; | ||
6 | __u8 reply; | ||
7 | __u8 pad02[5]; | ||
8 | #define NVIF_NOTIFY_V0_ROUTE_NVIF 0x00 | ||
9 | __u8 route; | ||
10 | __u64 token; /* must be unique */ | ||
11 | __u8 data[]; /* request data (below) */ | ||
12 | }; | ||
13 | |||
14 | struct nvif_notify_rep_v0 { | ||
15 | __u8 version; | ||
16 | __u8 pad01[6]; | ||
17 | __u8 route; | ||
18 | __u64 token; | ||
19 | __u8 data[]; /* reply data (below) */ | ||
20 | }; | ||
21 | |||
22 | struct nvif_notify_head_req_v0 { | ||
23 | /* nvif_notify_req ... */ | ||
24 | __u8 version; | ||
25 | __u8 head; | ||
26 | __u8 pad02[6]; | ||
27 | }; | ||
28 | |||
29 | struct nvif_notify_head_rep_v0 { | ||
30 | /* nvif_notify_rep ... */ | ||
31 | __u8 version; | ||
32 | __u8 pad01[7]; | ||
33 | }; | ||
34 | |||
35 | struct nvif_notify_conn_req_v0 { | ||
36 | /* nvif_notify_req ... */ | ||
37 | __u8 version; | ||
38 | #define NVIF_NOTIFY_CONN_V0_PLUG 0x01 | ||
39 | #define NVIF_NOTIFY_CONN_V0_UNPLUG 0x02 | ||
40 | #define NVIF_NOTIFY_CONN_V0_IRQ 0x04 | ||
41 | #define NVIF_NOTIFY_CONN_V0_ANY 0x07 | ||
42 | __u8 mask; | ||
43 | __u8 conn; | ||
44 | __u8 pad03[5]; | ||
45 | }; | ||
46 | |||
47 | struct nvif_notify_conn_rep_v0 { | ||
48 | /* nvif_notify_rep ... */ | ||
49 | __u8 version; | ||
50 | __u8 mask; | ||
51 | __u8 pad02[6]; | ||
52 | }; | ||
53 | |||
54 | struct nvif_notify_uevent_req { | ||
55 | /* nvif_notify_req ... */ | ||
56 | }; | ||
57 | |||
58 | struct nvif_notify_uevent_rep { | ||
59 | /* nvif_notify_rep ... */ | ||
60 | }; | ||
61 | |||
62 | #endif | ||
diff --git a/drivers/gpu/drm/nouveau/nvif/ioctl.h b/drivers/gpu/drm/nouveau/nvif/ioctl.h new file mode 100644 index 000000000000..4cd8e323b23d --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvif/ioctl.h | |||
@@ -0,0 +1,128 @@ | |||
1 | #ifndef __NVIF_IOCTL_H__ | ||
2 | #define __NVIF_IOCTL_H__ | ||
3 | |||
4 | struct nvif_ioctl_v0 { | ||
5 | __u8 version; | ||
6 | #define NVIF_IOCTL_V0_OWNER_NVIF 0x00 | ||
7 | #define NVIF_IOCTL_V0_OWNER_ANY 0xff | ||
8 | __u8 owner; | ||
9 | #define NVIF_IOCTL_V0_NOP 0x00 | ||
10 | #define NVIF_IOCTL_V0_SCLASS 0x01 | ||
11 | #define NVIF_IOCTL_V0_NEW 0x02 | ||
12 | #define NVIF_IOCTL_V0_DEL 0x03 | ||
13 | #define NVIF_IOCTL_V0_MTHD 0x04 | ||
14 | #define NVIF_IOCTL_V0_RD 0x05 | ||
15 | #define NVIF_IOCTL_V0_WR 0x06 | ||
16 | #define NVIF_IOCTL_V0_MAP 0x07 | ||
17 | #define NVIF_IOCTL_V0_UNMAP 0x08 | ||
18 | #define NVIF_IOCTL_V0_NTFY_NEW 0x09 | ||
19 | #define NVIF_IOCTL_V0_NTFY_DEL 0x0a | ||
20 | #define NVIF_IOCTL_V0_NTFY_GET 0x0b | ||
21 | #define NVIF_IOCTL_V0_NTFY_PUT 0x0c | ||
22 | __u8 type; | ||
23 | __u8 path_nr; | ||
24 | #define NVIF_IOCTL_V0_ROUTE_NVIF 0x00 | ||
25 | #define NVIF_IOCTL_V0_ROUTE_HIDDEN 0xff | ||
26 | __u8 pad04[3]; | ||
27 | __u8 route; | ||
28 | __u64 token; | ||
29 | __u32 path[8]; /* in reverse */ | ||
30 | __u8 data[]; /* ioctl data (below) */ | ||
31 | }; | ||
32 | |||
33 | struct nvif_ioctl_nop { | ||
34 | }; | ||
35 | |||
36 | struct nvif_ioctl_sclass_v0 { | ||
37 | /* nvif_ioctl ... */ | ||
38 | __u8 version; | ||
39 | __u8 count; | ||
40 | __u8 pad02[6]; | ||
41 | __u32 oclass[]; | ||
42 | }; | ||
43 | |||
44 | struct nvif_ioctl_new_v0 { | ||
45 | /* nvif_ioctl ... */ | ||
46 | __u8 version; | ||
47 | __u8 pad01[6]; | ||
48 | __u8 route; | ||
49 | __u64 token; | ||
50 | __u32 handle; | ||
51 | /* these class numbers are made up by us, and not nvidia-assigned */ | ||
52 | #define NVIF_IOCTL_NEW_V0_PERFCTR 0x0000ffff | ||
53 | #define NVIF_IOCTL_NEW_V0_CONTROL 0x0000fffe | ||
54 | __u32 oclass; | ||
55 | __u8 data[]; /* class data (class.h) */ | ||
56 | }; | ||
57 | |||
58 | struct nvif_ioctl_del { | ||
59 | }; | ||
60 | |||
61 | struct nvif_ioctl_rd_v0 { | ||
62 | /* nvif_ioctl ... */ | ||
63 | __u8 version; | ||
64 | __u8 size; | ||
65 | __u8 pad02[2]; | ||
66 | __u32 data; | ||
67 | __u64 addr; | ||
68 | }; | ||
69 | |||
70 | struct nvif_ioctl_wr_v0 { | ||
71 | /* nvif_ioctl ... */ | ||
72 | __u8 version; | ||
73 | __u8 size; | ||
74 | __u8 pad02[2]; | ||
75 | __u32 data; | ||
76 | __u64 addr; | ||
77 | }; | ||
78 | |||
79 | struct nvif_ioctl_map_v0 { | ||
80 | /* nvif_ioctl ... */ | ||
81 | __u8 version; | ||
82 | __u8 pad01[3]; | ||
83 | __u32 length; | ||
84 | __u64 handle; | ||
85 | }; | ||
86 | |||
87 | struct nvif_ioctl_unmap { | ||
88 | }; | ||
89 | |||
90 | struct nvif_ioctl_ntfy_new_v0 { | ||
91 | /* nvif_ioctl ... */ | ||
92 | __u8 version; | ||
93 | __u8 event; | ||
94 | __u8 index; | ||
95 | __u8 pad03[5]; | ||
96 | __u8 data[]; /* event request data (event.h) */ | ||
97 | }; | ||
98 | |||
99 | struct nvif_ioctl_ntfy_del_v0 { | ||
100 | /* nvif_ioctl ... */ | ||
101 | __u8 version; | ||
102 | __u8 index; | ||
103 | __u8 pad02[6]; | ||
104 | }; | ||
105 | |||
106 | struct nvif_ioctl_ntfy_get_v0 { | ||
107 | /* nvif_ioctl ... */ | ||
108 | __u8 version; | ||
109 | __u8 index; | ||
110 | __u8 pad02[6]; | ||
111 | }; | ||
112 | |||
113 | struct nvif_ioctl_ntfy_put_v0 { | ||
114 | /* nvif_ioctl ... */ | ||
115 | __u8 version; | ||
116 | __u8 index; | ||
117 | __u8 pad02[6]; | ||
118 | }; | ||
119 | |||
120 | struct nvif_ioctl_mthd_v0 { | ||
121 | /* nvif_ioctl ... */ | ||
122 | __u8 version; | ||
123 | __u8 method; | ||
124 | __u8 pad02[6]; | ||
125 | __u8 data[]; /* method data (class.h) */ | ||
126 | }; | ||
127 | |||
128 | #endif | ||
diff --git a/drivers/gpu/drm/nouveau/nvif/list.h b/drivers/gpu/drm/nouveau/nvif/list.h new file mode 100644 index 000000000000..8af5d144ecb0 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvif/list.h | |||
@@ -0,0 +1,353 @@ | |||
1 | /* | ||
2 | * Copyright © 2010 Intel Corporation | ||
3 | * Copyright © 2010 Francisco Jerez <currojerez@riseup.net> | ||
4 | * | ||
5 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
6 | * copy of this software and associated documentation files (the "Software"), | ||
7 | * to deal in the Software without restriction, including without limitation | ||
8 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
9 | * and/or sell copies of the Software, and to permit persons to whom the | ||
10 | * Software is furnished to do so, subject to the following conditions: | ||
11 | * | ||
12 | * The above copyright notice and this permission notice (including the next | ||
13 | * paragraph) shall be included in all copies or substantial portions of the | ||
14 | * Software. | ||
15 | * | ||
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
19 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
20 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | ||
21 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS | ||
22 | * IN THE SOFTWARE. | ||
23 | * | ||
24 | */ | ||
25 | |||
26 | /* Modified by Ben Skeggs <bskeggs@redhat.com> to match kernel list APIs */ | ||
27 | |||
28 | #ifndef _XORG_LIST_H_ | ||
29 | #define _XORG_LIST_H_ | ||
30 | |||
31 | /** | ||
32 | * @file Classic doubly-link circular list implementation. | ||
33 | * For real usage examples of the linked list, see the file test/list.c | ||
34 | * | ||
35 | * Example: | ||
36 | * We need to keep a list of struct foo in the parent struct bar, i.e. what | ||
37 | * we want is something like this. | ||
38 | * | ||
39 | * struct bar { | ||
40 | * ... | ||
41 | * struct foo *list_of_foos; -----> struct foo {}, struct foo {}, struct foo{} | ||
42 | * ... | ||
43 | * } | ||
44 | * | ||
45 | * We need one list head in bar and a list element in all list_of_foos (both are of | ||
46 | * data type 'struct list_head'). | ||
47 | * | ||
48 | * struct bar { | ||
49 | * ... | ||
50 | * struct list_head list_of_foos; | ||
51 | * ... | ||
52 | * } | ||
53 | * | ||
54 | * struct foo { | ||
55 | * ... | ||
56 | * struct list_head entry; | ||
57 | * ... | ||
58 | * } | ||
59 | * | ||
60 | * Now we initialize the list head: | ||
61 | * | ||
62 | * struct bar bar; | ||
63 | * ... | ||
64 | * INIT_LIST_HEAD(&bar.list_of_foos); | ||
65 | * | ||
66 | * Then we create the first element and add it to this list: | ||
67 | * | ||
68 | * struct foo *foo = malloc(...); | ||
69 | * .... | ||
70 | * list_add(&foo->entry, &bar.list_of_foos); | ||
71 | * | ||
72 | * Repeat the above for each element you want to add to the list. Deleting | ||
73 | * works with the element itself. | ||
74 | * list_del(&foo->entry); | ||
75 | * free(foo); | ||
76 | * | ||
77 | * Note: calling list_del(&bar.list_of_foos) will set bar.list_of_foos to an empty | ||
78 | * list again. | ||
79 | * | ||
80 | * Looping through the list requires a 'struct foo' as iterator and the | ||
81 | * name of the field the subnodes use. | ||
82 | * | ||
83 | * struct foo *iterator; | ||
84 | * list_for_each_entry(iterator, &bar.list_of_foos, entry) { | ||
85 | * if (iterator->something == ...) | ||
86 | * ... | ||
87 | * } | ||
88 | * | ||
89 | * Note: You must not call list_del() on the iterator if you continue the | ||
90 | * loop. You need to run the safe for-each loop instead: | ||
91 | * | ||
92 | * struct foo *iterator, *next; | ||
93 | * list_for_each_entry_safe(iterator, next, &bar.list_of_foos, entry) { | ||
94 | * if (...) | ||
95 | * list_del(&iterator->entry); | ||
96 | * } | ||
97 | * | ||
98 | */ | ||
99 | |||
100 | /** | ||
101 | * The linkage struct for list nodes. This struct must be part of your | ||
102 | * to-be-linked struct. struct list_head is required for both the head of the | ||
103 | * list and for each list node. | ||
104 | * | ||
105 | * Position and name of the struct list_head field is irrelevant. | ||
106 | * There are no requirements that elements of a list are of the same type. | ||
107 | * There are no requirements for a list head, any struct list_head can be a list | ||
108 | * head. | ||
109 | */ | ||
110 | struct list_head { | ||
111 | struct list_head *next, *prev; | ||
112 | }; | ||
113 | |||
114 | /** | ||
115 | * Initialize the list as an empty list. | ||
116 | * | ||
117 | * Example: | ||
118 | * INIT_LIST_HEAD(&bar->list_of_foos); | ||
119 | * | ||
120 | * @param The list to initialized. | ||
121 | */ | ||
122 | #define LIST_HEAD_INIT(name) { &(name), &(name) } | ||
123 | |||
124 | #define LIST_HEAD(name) \ | ||
125 | struct list_head name = LIST_HEAD_INIT(name) | ||
126 | |||
127 | static inline void | ||
128 | INIT_LIST_HEAD(struct list_head *list) | ||
129 | { | ||
130 | list->next = list->prev = list; | ||
131 | } | ||
132 | |||
133 | static inline void | ||
134 | __list_add(struct list_head *entry, | ||
135 | struct list_head *prev, struct list_head *next) | ||
136 | { | ||
137 | next->prev = entry; | ||
138 | entry->next = next; | ||
139 | entry->prev = prev; | ||
140 | prev->next = entry; | ||
141 | } | ||
142 | |||
143 | /** | ||
144 | * Insert a new element after the given list head. The new element does not | ||
145 | * need to be initialised as empty list. | ||
146 | * The list changes from: | ||
147 | * head → some element → ... | ||
148 | * to | ||
149 | * head → new element → older element → ... | ||
150 | * | ||
151 | * Example: | ||
152 | * struct foo *newfoo = malloc(...); | ||
153 | * list_add(&newfoo->entry, &bar->list_of_foos); | ||
154 | * | ||
155 | * @param entry The new element to prepend to the list. | ||
156 | * @param head The existing list. | ||
157 | */ | ||
158 | static inline void | ||
159 | list_add(struct list_head *entry, struct list_head *head) | ||
160 | { | ||
161 | __list_add(entry, head, head->next); | ||
162 | } | ||
163 | |||
164 | /** | ||
165 | * Append a new element to the end of the list given with this list head. | ||
166 | * | ||
167 | * The list changes from: | ||
168 | * head → some element → ... → lastelement | ||
169 | * to | ||
170 | * head → some element → ... → lastelement → new element | ||
171 | * | ||
172 | * Example: | ||
173 | * struct foo *newfoo = malloc(...); | ||
174 | * list_add_tail(&newfoo->entry, &bar->list_of_foos); | ||
175 | * | ||
176 | * @param entry The new element to prepend to the list. | ||
177 | * @param head The existing list. | ||
178 | */ | ||
179 | static inline void | ||
180 | list_add_tail(struct list_head *entry, struct list_head *head) | ||
181 | { | ||
182 | __list_add(entry, head->prev, head); | ||
183 | } | ||
184 | |||
185 | static inline void | ||
186 | __list_del(struct list_head *prev, struct list_head *next) | ||
187 | { | ||
188 | next->prev = prev; | ||
189 | prev->next = next; | ||
190 | } | ||
191 | |||
192 | /** | ||
193 | * Remove the element from the list it is in. Using this function will reset | ||
194 | * the pointers to/from this element so it is removed from the list. It does | ||
195 | * NOT free the element itself or manipulate it otherwise. | ||
196 | * | ||
197 | * Using list_del on a pure list head (like in the example at the top of | ||
198 | * this file) will NOT remove the first element from | ||
199 | * the list but rather reset the list as empty list. | ||
200 | * | ||
201 | * Example: | ||
202 | * list_del(&foo->entry); | ||
203 | * | ||
204 | * @param entry The element to remove. | ||
205 | */ | ||
206 | static inline void | ||
207 | list_del(struct list_head *entry) | ||
208 | { | ||
209 | __list_del(entry->prev, entry->next); | ||
210 | } | ||
211 | |||
212 | static inline void | ||
213 | list_del_init(struct list_head *entry) | ||
214 | { | ||
215 | __list_del(entry->prev, entry->next); | ||
216 | INIT_LIST_HEAD(entry); | ||
217 | } | ||
218 | |||
219 | static inline void list_move_tail(struct list_head *list, | ||
220 | struct list_head *head) | ||
221 | { | ||
222 | __list_del(list->prev, list->next); | ||
223 | list_add_tail(list, head); | ||
224 | } | ||
225 | |||
226 | /** | ||
227 | * Check if the list is empty. | ||
228 | * | ||
229 | * Example: | ||
230 | * list_empty(&bar->list_of_foos); | ||
231 | * | ||
232 | * @return True if the list contains one or more elements or False otherwise. | ||
233 | */ | ||
234 | static inline bool | ||
235 | list_empty(struct list_head *head) | ||
236 | { | ||
237 | return head->next == head; | ||
238 | } | ||
239 | |||
240 | /** | ||
241 | * Returns a pointer to the container of this list element. | ||
242 | * | ||
243 | * Example: | ||
244 | * struct foo* f; | ||
245 | * f = container_of(&foo->entry, struct foo, entry); | ||
246 | * assert(f == foo); | ||
247 | * | ||
248 | * @param ptr Pointer to the struct list_head. | ||
249 | * @param type Data type of the list element. | ||
250 | * @param member Member name of the struct list_head field in the list element. | ||
251 | * @return A pointer to the data struct containing the list head. | ||
252 | */ | ||
253 | #ifndef container_of | ||
254 | #define container_of(ptr, type, member) \ | ||
255 | (type *)((char *)(ptr) - (char *) &((type *)0)->member) | ||
256 | #endif | ||
257 | |||
258 | /** | ||
259 | * Alias of container_of | ||
260 | */ | ||
261 | #define list_entry(ptr, type, member) \ | ||
262 | container_of(ptr, type, member) | ||
263 | |||
264 | /** | ||
265 | * Retrieve the first list entry for the given list pointer. | ||
266 | * | ||
267 | * Example: | ||
268 | * struct foo *first; | ||
269 | * first = list_first_entry(&bar->list_of_foos, struct foo, list_of_foos); | ||
270 | * | ||
271 | * @param ptr The list head | ||
272 | * @param type Data type of the list element to retrieve | ||
273 | * @param member Member name of the struct list_head field in the list element. | ||
274 | * @return A pointer to the first list element. | ||
275 | */ | ||
276 | #define list_first_entry(ptr, type, member) \ | ||
277 | list_entry((ptr)->next, type, member) | ||
278 | |||
279 | /** | ||
280 | * Retrieve the last list entry for the given listpointer. | ||
281 | * | ||
282 | * Example: | ||
283 | * struct foo *first; | ||
284 | * first = list_last_entry(&bar->list_of_foos, struct foo, list_of_foos); | ||
285 | * | ||
286 | * @param ptr The list head | ||
287 | * @param type Data type of the list element to retrieve | ||
288 | * @param member Member name of the struct list_head field in the list element. | ||
289 | * @return A pointer to the last list element. | ||
290 | */ | ||
291 | #define list_last_entry(ptr, type, member) \ | ||
292 | list_entry((ptr)->prev, type, member) | ||
293 | |||
294 | #define __container_of(ptr, sample, member) \ | ||
295 | (void *)container_of((ptr), typeof(*(sample)), member) | ||
296 | |||
297 | /** | ||
298 | * Loop through the list given by head and set pos to struct in the list. | ||
299 | * | ||
300 | * Example: | ||
301 | * struct foo *iterator; | ||
302 | * list_for_each_entry(iterator, &bar->list_of_foos, entry) { | ||
303 | * [modify iterator] | ||
304 | * } | ||
305 | * | ||
306 | * This macro is not safe for node deletion. Use list_for_each_entry_safe | ||
307 | * instead. | ||
308 | * | ||
309 | * @param pos Iterator variable of the type of the list elements. | ||
310 | * @param head List head | ||
311 | * @param member Member name of the struct list_head in the list elements. | ||
312 | * | ||
313 | */ | ||
314 | #define list_for_each_entry(pos, head, member) \ | ||
315 | for (pos = __container_of((head)->next, pos, member); \ | ||
316 | &pos->member != (head); \ | ||
317 | pos = __container_of(pos->member.next, pos, member)) | ||
318 | |||
319 | /** | ||
320 | * Loop through the list, keeping a backup pointer to the element. This | ||
321 | * macro allows for the deletion of a list element while looping through the | ||
322 | * list. | ||
323 | * | ||
324 | * See list_for_each_entry for more details. | ||
325 | */ | ||
326 | #define list_for_each_entry_safe(pos, tmp, head, member) \ | ||
327 | for (pos = __container_of((head)->next, pos, member), \ | ||
328 | tmp = __container_of(pos->member.next, pos, member); \ | ||
329 | &pos->member != (head); \ | ||
330 | pos = tmp, tmp = __container_of(pos->member.next, tmp, member)) | ||
331 | |||
332 | |||
333 | #define list_for_each_entry_reverse(pos, head, member) \ | ||
334 | for (pos = __container_of((head)->prev, pos, member); \ | ||
335 | &pos->member != (head); \ | ||
336 | pos = __container_of(pos->member.prev, pos, member)) | ||
337 | |||
338 | #define list_for_each_entry_continue(pos, head, member) \ | ||
339 | for (pos = __container_of(pos->member.next, pos, member); \ | ||
340 | &pos->member != (head); \ | ||
341 | pos = __container_of(pos->member.next, pos, member)) | ||
342 | |||
343 | #define list_for_each_entry_continue_reverse(pos, head, member) \ | ||
344 | for (pos = __container_of(pos->member.prev, pos, member); \ | ||
345 | &pos->member != (head); \ | ||
346 | pos = __container_of(pos->member.prev, pos, member)) | ||
347 | |||
348 | #define list_for_each_entry_from(pos, head, member) \ | ||
349 | for (; \ | ||
350 | &pos->member != (head); \ | ||
351 | pos = __container_of(pos->member.next, pos, member)) | ||
352 | |||
353 | #endif | ||
diff --git a/drivers/gpu/drm/nouveau/nvif/notify.c b/drivers/gpu/drm/nouveau/nvif/notify.c new file mode 100644 index 000000000000..7c06123a559c --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvif/notify.c | |||
@@ -0,0 +1,237 @@ | |||
1 | /* | ||
2 | * Copyright 2014 Red Hat Inc. | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice shall be included in | ||
12 | * all copies or substantial portions of the Software. | ||
13 | * | ||
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
21 | * | ||
22 | * Authors: Ben Skeggs <bskeggs@redhat.com> | ||
23 | */ | ||
24 | |||
25 | #include <nvif/client.h> | ||
26 | #include <nvif/driver.h> | ||
27 | #include <nvif/notify.h> | ||
28 | #include <nvif/object.h> | ||
29 | #include <nvif/ioctl.h> | ||
30 | #include <nvif/event.h> | ||
31 | |||
32 | static inline int | ||
33 | nvif_notify_put_(struct nvif_notify *notify) | ||
34 | { | ||
35 | struct nvif_object *object = notify->object; | ||
36 | struct { | ||
37 | struct nvif_ioctl_v0 ioctl; | ||
38 | struct nvif_ioctl_ntfy_put_v0 ntfy; | ||
39 | } args = { | ||
40 | .ioctl.type = NVIF_IOCTL_V0_NTFY_PUT, | ||
41 | .ntfy.index = notify->index, | ||
42 | }; | ||
43 | |||
44 | if (atomic_inc_return(¬ify->putcnt) != 1) | ||
45 | return 0; | ||
46 | |||
47 | return nvif_object_ioctl(object, &args, sizeof(args), NULL); | ||
48 | } | ||
49 | |||
50 | int | ||
51 | nvif_notify_put(struct nvif_notify *notify) | ||
52 | { | ||
53 | if (likely(notify->object) && | ||
54 | test_and_clear_bit(NVIF_NOTIFY_USER, ¬ify->flags)) { | ||
55 | int ret = nvif_notify_put_(notify); | ||
56 | if (test_bit(NVIF_NOTIFY_WORK, ¬ify->flags)) | ||
57 | flush_work(¬ify->work); | ||
58 | return ret; | ||
59 | } | ||
60 | return 0; | ||
61 | } | ||
62 | |||
63 | static inline int | ||
64 | nvif_notify_get_(struct nvif_notify *notify) | ||
65 | { | ||
66 | struct nvif_object *object = notify->object; | ||
67 | struct { | ||
68 | struct nvif_ioctl_v0 ioctl; | ||
69 | struct nvif_ioctl_ntfy_get_v0 ntfy; | ||
70 | } args = { | ||
71 | .ioctl.type = NVIF_IOCTL_V0_NTFY_GET, | ||
72 | .ntfy.index = notify->index, | ||
73 | }; | ||
74 | |||
75 | if (atomic_dec_return(¬ify->putcnt) != 0) | ||
76 | return 0; | ||
77 | |||
78 | return nvif_object_ioctl(object, &args, sizeof(args), NULL); | ||
79 | } | ||
80 | |||
81 | int | ||
82 | nvif_notify_get(struct nvif_notify *notify) | ||
83 | { | ||
84 | if (likely(notify->object) && | ||
85 | !test_and_set_bit(NVIF_NOTIFY_USER, ¬ify->flags)) | ||
86 | return nvif_notify_get_(notify); | ||
87 | return 0; | ||
88 | } | ||
89 | |||
90 | static void | ||
91 | nvif_notify_work(struct work_struct *work) | ||
92 | { | ||
93 | struct nvif_notify *notify = container_of(work, typeof(*notify), work); | ||
94 | if (notify->func(notify) == NVIF_NOTIFY_KEEP) | ||
95 | nvif_notify_get_(notify); | ||
96 | } | ||
97 | |||
98 | int | ||
99 | nvif_notify(const void *header, u32 length, const void *data, u32 size) | ||
100 | { | ||
101 | struct nvif_notify *notify = NULL; | ||
102 | const union { | ||
103 | struct nvif_notify_rep_v0 v0; | ||
104 | } *args = header; | ||
105 | int ret = NVIF_NOTIFY_DROP; | ||
106 | |||
107 | if (length == sizeof(args->v0) && args->v0.version == 0) { | ||
108 | if (WARN_ON(args->v0.route)) | ||
109 | return NVIF_NOTIFY_DROP; | ||
110 | notify = (void *)(unsigned long)args->v0.token; | ||
111 | } | ||
112 | |||
113 | if (!WARN_ON(notify == NULL)) { | ||
114 | struct nvif_client *client = nvif_client(notify->object); | ||
115 | if (!WARN_ON(notify->size != size)) { | ||
116 | if (test_bit(NVIF_NOTIFY_WORK, ¬ify->flags)) { | ||
117 | atomic_inc(¬ify->putcnt); | ||
118 | memcpy((void *)notify->data, data, size); | ||
119 | schedule_work(¬ify->work); | ||
120 | return NVIF_NOTIFY_DROP; | ||
121 | } | ||
122 | notify->data = data; | ||
123 | ret = notify->func(notify); | ||
124 | notify->data = NULL; | ||
125 | if (ret != NVIF_NOTIFY_DROP && client->driver->keep) { | ||
126 | atomic_inc(¬ify->putcnt); | ||
127 | nvif_notify_get_(notify); | ||
128 | } | ||
129 | } | ||
130 | } | ||
131 | |||
132 | return ret; | ||
133 | } | ||
134 | |||
135 | int | ||
136 | nvif_notify_fini(struct nvif_notify *notify) | ||
137 | { | ||
138 | struct nvif_object *object = notify->object; | ||
139 | struct { | ||
140 | struct nvif_ioctl_v0 ioctl; | ||
141 | struct nvif_ioctl_ntfy_del_v0 ntfy; | ||
142 | } args = { | ||
143 | .ioctl.type = NVIF_IOCTL_V0_NTFY_DEL, | ||
144 | .ntfy.index = notify->index, | ||
145 | }; | ||
146 | int ret = nvif_notify_put(notify); | ||
147 | if (ret >= 0 && object) { | ||
148 | ret = nvif_object_ioctl(object, &args, sizeof(args), NULL); | ||
149 | if (ret == 0) { | ||
150 | nvif_object_ref(NULL, ¬ify->object); | ||
151 | kfree((void *)notify->data); | ||
152 | } | ||
153 | } | ||
154 | return ret; | ||
155 | } | ||
156 | |||
157 | int | ||
158 | nvif_notify_init(struct nvif_object *object, void (*dtor)(struct nvif_notify *), | ||
159 | int (*func)(struct nvif_notify *), bool work, u8 event, | ||
160 | void *data, u32 size, u32 reply, struct nvif_notify *notify) | ||
161 | { | ||
162 | struct { | ||
163 | struct nvif_ioctl_v0 ioctl; | ||
164 | struct nvif_ioctl_ntfy_new_v0 ntfy; | ||
165 | struct nvif_notify_req_v0 req; | ||
166 | } *args; | ||
167 | int ret = -ENOMEM; | ||
168 | |||
169 | notify->object = NULL; | ||
170 | nvif_object_ref(object, ¬ify->object); | ||
171 | notify->flags = 0; | ||
172 | atomic_set(¬ify->putcnt, 1); | ||
173 | notify->dtor = dtor; | ||
174 | notify->func = func; | ||
175 | notify->data = NULL; | ||
176 | notify->size = reply; | ||
177 | if (work) { | ||
178 | INIT_WORK(¬ify->work, nvif_notify_work); | ||
179 | set_bit(NVIF_NOTIFY_WORK, ¬ify->flags); | ||
180 | notify->data = kmalloc(notify->size, GFP_KERNEL); | ||
181 | if (!notify->data) | ||
182 | goto done; | ||
183 | } | ||
184 | |||
185 | if (!(args = kmalloc(sizeof(*args) + size, GFP_KERNEL))) | ||
186 | goto done; | ||
187 | args->ioctl.version = 0; | ||
188 | args->ioctl.type = NVIF_IOCTL_V0_NTFY_NEW; | ||
189 | args->ntfy.version = 0; | ||
190 | args->ntfy.event = event; | ||
191 | args->req.version = 0; | ||
192 | args->req.reply = notify->size; | ||
193 | args->req.route = 0; | ||
194 | args->req.token = (unsigned long)(void *)notify; | ||
195 | |||
196 | memcpy(args->req.data, data, size); | ||
197 | ret = nvif_object_ioctl(object, args, sizeof(*args) + size, NULL); | ||
198 | notify->index = args->ntfy.index; | ||
199 | kfree(args); | ||
200 | done: | ||
201 | if (ret) | ||
202 | nvif_notify_fini(notify); | ||
203 | return ret; | ||
204 | } | ||
205 | |||
206 | static void | ||
207 | nvif_notify_del(struct nvif_notify *notify) | ||
208 | { | ||
209 | nvif_notify_fini(notify); | ||
210 | kfree(notify); | ||
211 | } | ||
212 | |||
213 | void | ||
214 | nvif_notify_ref(struct nvif_notify *notify, struct nvif_notify **pnotify) | ||
215 | { | ||
216 | BUG_ON(notify != NULL); | ||
217 | if (*pnotify) | ||
218 | (*pnotify)->dtor(*pnotify); | ||
219 | *pnotify = notify; | ||
220 | } | ||
221 | |||
222 | int | ||
223 | nvif_notify_new(struct nvif_object *object, int (*func)(struct nvif_notify *), | ||
224 | bool work, u8 type, void *data, u32 size, u32 reply, | ||
225 | struct nvif_notify **pnotify) | ||
226 | { | ||
227 | struct nvif_notify *notify = kzalloc(sizeof(*notify), GFP_KERNEL); | ||
228 | if (notify) { | ||
229 | int ret = nvif_notify_init(object, nvif_notify_del, func, work, | ||
230 | type, data, size, reply, notify); | ||
231 | if (ret) | ||
232 | kfree(notify); | ||
233 | *pnotify = notify; | ||
234 | return ret; | ||
235 | } | ||
236 | return -ENOMEM; | ||
237 | } | ||
diff --git a/drivers/gpu/drm/nouveau/nvif/notify.h b/drivers/gpu/drm/nouveau/nvif/notify.h new file mode 100644 index 000000000000..9ebfa3b45e76 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvif/notify.h | |||
@@ -0,0 +1,39 @@ | |||
1 | #ifndef __NVIF_NOTIFY_H__ | ||
2 | #define __NVIF_NOTIFY_H__ | ||
3 | |||
4 | struct nvif_notify { | ||
5 | struct nvif_object *object; | ||
6 | int index; | ||
7 | |||
8 | #define NVIF_NOTIFY_USER 0 | ||
9 | #define NVIF_NOTIFY_WORK 1 | ||
10 | unsigned long flags; | ||
11 | atomic_t putcnt; | ||
12 | void (*dtor)(struct nvif_notify *); | ||
13 | #define NVIF_NOTIFY_DROP 0 | ||
14 | #define NVIF_NOTIFY_KEEP 1 | ||
15 | int (*func)(struct nvif_notify *); | ||
16 | |||
17 | /* this is const for a *very* good reason - the data might be on the | ||
18 | * stack from an irq handler. if you're not nvif/notify.c then you | ||
19 | * should probably think twice before casting it away... | ||
20 | */ | ||
21 | const void *data; | ||
22 | u32 size; | ||
23 | struct work_struct work; | ||
24 | }; | ||
25 | |||
26 | int nvif_notify_init(struct nvif_object *, void (*dtor)(struct nvif_notify *), | ||
27 | int (*func)(struct nvif_notify *), bool work, u8 type, | ||
28 | void *data, u32 size, u32 reply, struct nvif_notify *); | ||
29 | int nvif_notify_fini(struct nvif_notify *); | ||
30 | int nvif_notify_get(struct nvif_notify *); | ||
31 | int nvif_notify_put(struct nvif_notify *); | ||
32 | int nvif_notify(const void *, u32, const void *, u32); | ||
33 | |||
34 | int nvif_notify_new(struct nvif_object *, int (*func)(struct nvif_notify *), | ||
35 | bool work, u8 type, void *data, u32 size, u32 reply, | ||
36 | struct nvif_notify **); | ||
37 | void nvif_notify_ref(struct nvif_notify *, struct nvif_notify **); | ||
38 | |||
39 | #endif | ||
diff --git a/drivers/gpu/drm/nouveau/nvif/object.c b/drivers/gpu/drm/nouveau/nvif/object.c new file mode 100644 index 000000000000..b0c82206ece2 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvif/object.c | |||
@@ -0,0 +1,302 @@ | |||
1 | /* | ||
2 | * Copyright 2014 Red Hat Inc. | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice shall be included in | ||
12 | * all copies or substantial portions of the Software. | ||
13 | * | ||
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
21 | * | ||
22 | * Authors: Ben Skeggs <bskeggs@redhat.com> | ||
23 | */ | ||
24 | |||
25 | #include "object.h" | ||
26 | #include "client.h" | ||
27 | #include "driver.h" | ||
28 | #include "ioctl.h" | ||
29 | |||
30 | int | ||
31 | nvif_object_ioctl(struct nvif_object *object, void *data, u32 size, void **hack) | ||
32 | { | ||
33 | struct nvif_client *client = nvif_client(object); | ||
34 | union { | ||
35 | struct nvif_ioctl_v0 v0; | ||
36 | } *args = data; | ||
37 | |||
38 | if (size >= sizeof(*args) && args->v0.version == 0) { | ||
39 | args->v0.owner = NVIF_IOCTL_V0_OWNER_ANY; | ||
40 | args->v0.path_nr = 0; | ||
41 | while (args->v0.path_nr < ARRAY_SIZE(args->v0.path)) { | ||
42 | args->v0.path[args->v0.path_nr++] = object->handle; | ||
43 | if (object->parent == object) | ||
44 | break; | ||
45 | object = object->parent; | ||
46 | } | ||
47 | } else | ||
48 | return -ENOSYS; | ||
49 | |||
50 | return client->driver->ioctl(client->base.priv, client->super, data, size, hack); | ||
51 | } | ||
52 | |||
53 | int | ||
54 | nvif_object_sclass(struct nvif_object *object, u32 *oclass, int count) | ||
55 | { | ||
56 | struct { | ||
57 | struct nvif_ioctl_v0 ioctl; | ||
58 | struct nvif_ioctl_sclass_v0 sclass; | ||
59 | } *args; | ||
60 | u32 size = count * sizeof(args->sclass.oclass[0]); | ||
61 | int ret; | ||
62 | |||
63 | if (!(args = kmalloc(sizeof(*args) + size, GFP_KERNEL))) | ||
64 | return -ENOMEM; | ||
65 | args->ioctl.version = 0; | ||
66 | args->ioctl.type = NVIF_IOCTL_V0_SCLASS; | ||
67 | args->sclass.version = 0; | ||
68 | args->sclass.count = count; | ||
69 | |||
70 | memcpy(args->sclass.oclass, oclass, size); | ||
71 | ret = nvif_object_ioctl(object, args, sizeof(*args) + size, NULL); | ||
72 | ret = ret ? ret : args->sclass.count; | ||
73 | memcpy(oclass, args->sclass.oclass, size); | ||
74 | kfree(args); | ||
75 | return ret; | ||
76 | } | ||
77 | |||
78 | u32 | ||
79 | nvif_object_rd(struct nvif_object *object, int size, u64 addr) | ||
80 | { | ||
81 | struct { | ||
82 | struct nvif_ioctl_v0 ioctl; | ||
83 | struct nvif_ioctl_rd_v0 rd; | ||
84 | } args = { | ||
85 | .ioctl.type = NVIF_IOCTL_V0_RD, | ||
86 | .rd.size = size, | ||
87 | .rd.addr = addr, | ||
88 | }; | ||
89 | int ret = nvif_object_ioctl(object, &args, sizeof(args), NULL); | ||
90 | if (ret) { | ||
91 | /*XXX: warn? */ | ||
92 | return 0; | ||
93 | } | ||
94 | return args.rd.data; | ||
95 | } | ||
96 | |||
97 | void | ||
98 | nvif_object_wr(struct nvif_object *object, int size, u64 addr, u32 data) | ||
99 | { | ||
100 | struct { | ||
101 | struct nvif_ioctl_v0 ioctl; | ||
102 | struct nvif_ioctl_wr_v0 wr; | ||
103 | } args = { | ||
104 | .ioctl.type = NVIF_IOCTL_V0_WR, | ||
105 | .wr.size = size, | ||
106 | .wr.addr = addr, | ||
107 | .wr.data = data, | ||
108 | }; | ||
109 | int ret = nvif_object_ioctl(object, &args, sizeof(args), NULL); | ||
110 | if (ret) { | ||
111 | /*XXX: warn? */ | ||
112 | } | ||
113 | } | ||
114 | |||
115 | int | ||
116 | nvif_object_mthd(struct nvif_object *object, u32 mthd, void *data, u32 size) | ||
117 | { | ||
118 | struct { | ||
119 | struct nvif_ioctl_v0 ioctl; | ||
120 | struct nvif_ioctl_mthd_v0 mthd; | ||
121 | } *args; | ||
122 | u8 stack[128]; | ||
123 | int ret; | ||
124 | |||
125 | if (sizeof(*args) + size > sizeof(stack)) { | ||
126 | if (!(args = kmalloc(sizeof(*args) + size, GFP_KERNEL))) | ||
127 | return -ENOMEM; | ||
128 | } else { | ||
129 | args = (void *)stack; | ||
130 | } | ||
131 | args->ioctl.version = 0; | ||
132 | args->ioctl.type = NVIF_IOCTL_V0_MTHD; | ||
133 | args->mthd.version = 0; | ||
134 | args->mthd.method = mthd; | ||
135 | |||
136 | memcpy(args->mthd.data, data, size); | ||
137 | ret = nvif_object_ioctl(object, args, sizeof(*args) + size, NULL); | ||
138 | memcpy(data, args->mthd.data, size); | ||
139 | if (args != (void *)stack) | ||
140 | kfree(args); | ||
141 | return ret; | ||
142 | } | ||
143 | |||
144 | void | ||
145 | nvif_object_unmap(struct nvif_object *object) | ||
146 | { | ||
147 | if (object->map.size) { | ||
148 | struct nvif_client *client = nvif_client(object); | ||
149 | struct { | ||
150 | struct nvif_ioctl_v0 ioctl; | ||
151 | struct nvif_ioctl_unmap unmap; | ||
152 | } args = { | ||
153 | .ioctl.type = NVIF_IOCTL_V0_UNMAP, | ||
154 | }; | ||
155 | |||
156 | if (object->map.ptr) { | ||
157 | client->driver->unmap(client, object->map.ptr, | ||
158 | object->map.size); | ||
159 | object->map.ptr = NULL; | ||
160 | } | ||
161 | |||
162 | nvif_object_ioctl(object, &args, sizeof(args), NULL); | ||
163 | object->map.size = 0; | ||
164 | } | ||
165 | } | ||
166 | |||
167 | int | ||
168 | nvif_object_map(struct nvif_object *object) | ||
169 | { | ||
170 | struct nvif_client *client = nvif_client(object); | ||
171 | struct { | ||
172 | struct nvif_ioctl_v0 ioctl; | ||
173 | struct nvif_ioctl_map_v0 map; | ||
174 | } args = { | ||
175 | .ioctl.type = NVIF_IOCTL_V0_MAP, | ||
176 | }; | ||
177 | int ret = nvif_object_ioctl(object, &args, sizeof(args), NULL); | ||
178 | if (ret == 0) { | ||
179 | object->map.size = args.map.length; | ||
180 | object->map.ptr = client->driver->map(client, args.map.handle, | ||
181 | object->map.size); | ||
182 | if (ret = -ENOMEM, object->map.ptr) | ||
183 | return 0; | ||
184 | nvif_object_unmap(object); | ||
185 | } | ||
186 | return ret; | ||
187 | } | ||
188 | |||
189 | struct ctor { | ||
190 | struct nvif_ioctl_v0 ioctl; | ||
191 | struct nvif_ioctl_new_v0 new; | ||
192 | }; | ||
193 | |||
194 | void | ||
195 | nvif_object_fini(struct nvif_object *object) | ||
196 | { | ||
197 | struct ctor *ctor = container_of(object->data, typeof(*ctor), new.data); | ||
198 | if (object->parent) { | ||
199 | struct { | ||
200 | struct nvif_ioctl_v0 ioctl; | ||
201 | struct nvif_ioctl_del del; | ||
202 | } args = { | ||
203 | .ioctl.type = NVIF_IOCTL_V0_DEL, | ||
204 | }; | ||
205 | |||
206 | nvif_object_unmap(object); | ||
207 | nvif_object_ioctl(object, &args, sizeof(args), NULL); | ||
208 | if (object->data) { | ||
209 | object->size = 0; | ||
210 | object->data = NULL; | ||
211 | kfree(ctor); | ||
212 | } | ||
213 | nvif_object_ref(NULL, &object->parent); | ||
214 | } | ||
215 | } | ||
216 | |||
217 | int | ||
218 | nvif_object_init(struct nvif_object *parent, void (*dtor)(struct nvif_object *), | ||
219 | u32 handle, u32 oclass, void *data, u32 size, | ||
220 | struct nvif_object *object) | ||
221 | { | ||
222 | struct ctor *ctor; | ||
223 | int ret = 0; | ||
224 | |||
225 | object->parent = NULL; | ||
226 | object->object = object; | ||
227 | nvif_object_ref(parent, &object->parent); | ||
228 | kref_init(&object->refcount); | ||
229 | object->handle = handle; | ||
230 | object->oclass = oclass; | ||
231 | object->data = NULL; | ||
232 | object->size = 0; | ||
233 | object->dtor = dtor; | ||
234 | object->map.ptr = NULL; | ||
235 | object->map.size = 0; | ||
236 | |||
237 | if (object->parent) { | ||
238 | if (!(ctor = kmalloc(sizeof(*ctor) + size, GFP_KERNEL))) { | ||
239 | nvif_object_fini(object); | ||
240 | return -ENOMEM; | ||
241 | } | ||
242 | object->data = ctor->new.data; | ||
243 | object->size = size; | ||
244 | memcpy(object->data, data, size); | ||
245 | |||
246 | ctor->ioctl.version = 0; | ||
247 | ctor->ioctl.type = NVIF_IOCTL_V0_NEW; | ||
248 | ctor->new.version = 0; | ||
249 | ctor->new.route = NVIF_IOCTL_V0_ROUTE_NVIF; | ||
250 | ctor->new.token = (unsigned long)(void *)object; | ||
251 | ctor->new.handle = handle; | ||
252 | ctor->new.oclass = oclass; | ||
253 | |||
254 | ret = nvif_object_ioctl(parent, ctor, sizeof(*ctor) + | ||
255 | object->size, &object->priv); | ||
256 | } | ||
257 | |||
258 | if (ret) | ||
259 | nvif_object_fini(object); | ||
260 | return ret; | ||
261 | } | ||
262 | |||
263 | static void | ||
264 | nvif_object_del(struct nvif_object *object) | ||
265 | { | ||
266 | nvif_object_fini(object); | ||
267 | kfree(object); | ||
268 | } | ||
269 | |||
270 | int | ||
271 | nvif_object_new(struct nvif_object *parent, u32 handle, u32 oclass, | ||
272 | void *data, u32 size, struct nvif_object **pobject) | ||
273 | { | ||
274 | struct nvif_object *object = kzalloc(sizeof(*object), GFP_KERNEL); | ||
275 | if (object) { | ||
276 | int ret = nvif_object_init(parent, nvif_object_del, handle, | ||
277 | oclass, data, size, object); | ||
278 | if (ret) | ||
279 | kfree(object); | ||
280 | *pobject = object; | ||
281 | return ret; | ||
282 | } | ||
283 | return -ENOMEM; | ||
284 | } | ||
285 | |||
286 | static void | ||
287 | nvif_object_put(struct kref *kref) | ||
288 | { | ||
289 | struct nvif_object *object = | ||
290 | container_of(kref, typeof(*object), refcount); | ||
291 | object->dtor(object); | ||
292 | } | ||
293 | |||
294 | void | ||
295 | nvif_object_ref(struct nvif_object *object, struct nvif_object **pobject) | ||
296 | { | ||
297 | if (object) | ||
298 | kref_get(&object->refcount); | ||
299 | if (*pobject) | ||
300 | kref_put(&(*pobject)->refcount, nvif_object_put); | ||
301 | *pobject = object; | ||
302 | } | ||
diff --git a/drivers/gpu/drm/nouveau/nvif/object.h b/drivers/gpu/drm/nouveau/nvif/object.h new file mode 100644 index 000000000000..fac3a3bbec44 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvif/object.h | |||
@@ -0,0 +1,75 @@ | |||
1 | #ifndef __NVIF_OBJECT_H__ | ||
2 | #define __NVIF_OBJECT_H__ | ||
3 | |||
4 | #include <nvif/os.h> | ||
5 | |||
6 | struct nvif_object { | ||
7 | struct nvif_object *parent; | ||
8 | struct nvif_object *object; /*XXX: hack for nvif_object() */ | ||
9 | struct kref refcount; | ||
10 | u32 handle; | ||
11 | u32 oclass; | ||
12 | void *data; | ||
13 | u32 size; | ||
14 | void *priv; /*XXX: hack */ | ||
15 | void (*dtor)(struct nvif_object *); | ||
16 | struct { | ||
17 | void *ptr; | ||
18 | u32 size; | ||
19 | } map; | ||
20 | }; | ||
21 | |||
22 | int nvif_object_init(struct nvif_object *, void (*dtor)(struct nvif_object *), | ||
23 | u32 handle, u32 oclass, void *, u32, | ||
24 | struct nvif_object *); | ||
25 | void nvif_object_fini(struct nvif_object *); | ||
26 | int nvif_object_new(struct nvif_object *, u32 handle, u32 oclass, | ||
27 | void *, u32, struct nvif_object **); | ||
28 | void nvif_object_ref(struct nvif_object *, struct nvif_object **); | ||
29 | int nvif_object_ioctl(struct nvif_object *, void *, u32, void **); | ||
30 | int nvif_object_sclass(struct nvif_object *, u32 *, int); | ||
31 | u32 nvif_object_rd(struct nvif_object *, int, u64); | ||
32 | void nvif_object_wr(struct nvif_object *, int, u64, u32); | ||
33 | int nvif_object_mthd(struct nvif_object *, u32, void *, u32); | ||
34 | int nvif_object_map(struct nvif_object *); | ||
35 | void nvif_object_unmap(struct nvif_object *); | ||
36 | |||
37 | #define nvif_object(a) (a)->object | ||
38 | |||
39 | #define ioread8_native ioread8 | ||
40 | #define iowrite8_native iowrite8 | ||
41 | #define nvif_rd(a,b,c) ({ \ | ||
42 | struct nvif_object *_object = nvif_object(a); \ | ||
43 | u32 _data; \ | ||
44 | if (likely(_object->map.ptr)) \ | ||
45 | _data = ioread##b##_native((u8 *)_object->map.ptr + (c)); \ | ||
46 | else \ | ||
47 | _data = nvif_object_rd(_object, (b) / 8, (c)); \ | ||
48 | _data; \ | ||
49 | }) | ||
50 | #define nvif_wr(a,b,c,d) ({ \ | ||
51 | struct nvif_object *_object = nvif_object(a); \ | ||
52 | if (likely(_object->map.ptr)) \ | ||
53 | iowrite##b##_native((d), (u8 *)_object->map.ptr + (c)); \ | ||
54 | else \ | ||
55 | nvif_object_wr(_object, (b) / 8, (c), (d)); \ | ||
56 | }) | ||
57 | #define nvif_rd08(a,b) ({ u8 _v = nvif_rd((a), 8, (b)); _v; }) | ||
58 | #define nvif_rd16(a,b) ({ u16 _v = nvif_rd((a), 16, (b)); _v; }) | ||
59 | #define nvif_rd32(a,b) ({ u32 _v = nvif_rd((a), 32, (b)); _v; }) | ||
60 | #define nvif_wr08(a,b,c) nvif_wr((a), 8, (b), (u8)(c)) | ||
61 | #define nvif_wr16(a,b,c) nvif_wr((a), 16, (b), (u16)(c)) | ||
62 | #define nvif_wr32(a,b,c) nvif_wr((a), 32, (b), (u32)(c)) | ||
63 | #define nvif_mask(a,b,c,d) ({ \ | ||
64 | u32 _v = nvif_rd32(nvif_object(a), (b)); \ | ||
65 | nvif_wr32(nvif_object(a), (b), (_v & ~(c)) | (d)); \ | ||
66 | _v; \ | ||
67 | }) | ||
68 | |||
69 | #define nvif_mthd(a,b,c,d) nvif_object_mthd(nvif_object(a), (b), (c), (d)) | ||
70 | |||
71 | /*XXX*/ | ||
72 | #include <core/object.h> | ||
73 | #define nvkm_object(a) ((struct nouveau_object *)nvif_object(a)->priv) | ||
74 | |||
75 | #endif | ||
diff --git a/drivers/gpu/drm/nouveau/nvif/os.h b/drivers/gpu/drm/nouveau/nvif/os.h new file mode 120000 index 000000000000..bd744b2cf5cf --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvif/os.h | |||
@@ -0,0 +1 @@ | |||
../core/os.h \ No newline at end of file | |||
diff --git a/drivers/gpu/drm/nouveau/nvif/unpack.h b/drivers/gpu/drm/nouveau/nvif/unpack.h new file mode 100644 index 000000000000..5933188b4a77 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvif/unpack.h | |||
@@ -0,0 +1,24 @@ | |||
1 | #ifndef __NVIF_UNPACK_H__ | ||
2 | #define __NVIF_UNPACK_H__ | ||
3 | |||
4 | #define nvif_unvers(d) ({ \ | ||
5 | ret = (size == sizeof(d)) ? 0 : -ENOSYS; \ | ||
6 | (ret == 0); \ | ||
7 | }) | ||
8 | |||
9 | #define nvif_unpack(d,vl,vh,m) ({ \ | ||
10 | if ((vl) == 0 || ret == -ENOSYS) { \ | ||
11 | int _size = sizeof(d); \ | ||
12 | if (_size <= size && (d).version >= (vl) && \ | ||
13 | (d).version <= (vh)) { \ | ||
14 | data = (u8 *)data + _size; \ | ||
15 | size = size - _size; \ | ||
16 | ret = ((m) || !size) ? 0 : -E2BIG; \ | ||
17 | } else { \ | ||
18 | ret = -ENOSYS; \ | ||
19 | } \ | ||
20 | } \ | ||
21 | (ret == 0); \ | ||
22 | }) | ||
23 | |||
24 | #endif | ||
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c index ca65df144765..c96db433f8af 100644 --- a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c +++ b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c | |||
@@ -848,6 +848,7 @@ static int ttm_dma_pool_get_pages(struct dma_pool *pool, | |||
848 | if (count) { | 848 | if (count) { |
849 | d_page = list_first_entry(&pool->free_list, struct dma_page, page_list); | 849 | d_page = list_first_entry(&pool->free_list, struct dma_page, page_list); |
850 | ttm->pages[index] = d_page->p; | 850 | ttm->pages[index] = d_page->p; |
851 | ttm_dma->cpu_address[index] = d_page->vaddr; | ||
851 | ttm_dma->dma_address[index] = d_page->dma; | 852 | ttm_dma->dma_address[index] = d_page->dma; |
852 | list_move_tail(&d_page->page_list, &ttm_dma->pages_list); | 853 | list_move_tail(&d_page->page_list, &ttm_dma->pages_list); |
853 | r = 0; | 854 | r = 0; |
@@ -979,6 +980,7 @@ void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev) | |||
979 | INIT_LIST_HEAD(&ttm_dma->pages_list); | 980 | INIT_LIST_HEAD(&ttm_dma->pages_list); |
980 | for (i = 0; i < ttm->num_pages; i++) { | 981 | for (i = 0; i < ttm->num_pages; i++) { |
981 | ttm->pages[i] = NULL; | 982 | ttm->pages[i] = NULL; |
983 | ttm_dma->cpu_address[i] = 0; | ||
982 | ttm_dma->dma_address[i] = 0; | 984 | ttm_dma->dma_address[i] = 0; |
983 | } | 985 | } |
984 | 986 | ||
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c index 75f319090043..bf080abc86d1 100644 --- a/drivers/gpu/drm/ttm/ttm_tt.c +++ b/drivers/gpu/drm/ttm/ttm_tt.c | |||
@@ -55,9 +55,12 @@ static void ttm_tt_alloc_page_directory(struct ttm_tt *ttm) | |||
55 | 55 | ||
56 | static void ttm_dma_tt_alloc_page_directory(struct ttm_dma_tt *ttm) | 56 | static void ttm_dma_tt_alloc_page_directory(struct ttm_dma_tt *ttm) |
57 | { | 57 | { |
58 | ttm->ttm.pages = drm_calloc_large(ttm->ttm.num_pages, sizeof(void*)); | 58 | ttm->ttm.pages = drm_calloc_large(ttm->ttm.num_pages, |
59 | ttm->dma_address = drm_calloc_large(ttm->ttm.num_pages, | 59 | sizeof(*ttm->ttm.pages) + |
60 | sizeof(*ttm->dma_address)); | 60 | sizeof(*ttm->dma_address) + |
61 | sizeof(*ttm->cpu_address)); | ||
62 | ttm->cpu_address = (void *) (ttm->ttm.pages + ttm->ttm.num_pages); | ||
63 | ttm->dma_address = (void *) (ttm->cpu_address + ttm->ttm.num_pages); | ||
61 | } | 64 | } |
62 | 65 | ||
63 | #ifdef CONFIG_X86 | 66 | #ifdef CONFIG_X86 |
@@ -228,7 +231,7 @@ int ttm_dma_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_bo_device *bdev, | |||
228 | 231 | ||
229 | INIT_LIST_HEAD(&ttm_dma->pages_list); | 232 | INIT_LIST_HEAD(&ttm_dma->pages_list); |
230 | ttm_dma_tt_alloc_page_directory(ttm_dma); | 233 | ttm_dma_tt_alloc_page_directory(ttm_dma); |
231 | if (!ttm->pages || !ttm_dma->dma_address) { | 234 | if (!ttm->pages) { |
232 | ttm_tt_destroy(ttm); | 235 | ttm_tt_destroy(ttm); |
233 | pr_err("Failed allocating page table\n"); | 236 | pr_err("Failed allocating page table\n"); |
234 | return -ENOMEM; | 237 | return -ENOMEM; |
@@ -243,7 +246,7 @@ void ttm_dma_tt_fini(struct ttm_dma_tt *ttm_dma) | |||
243 | 246 | ||
244 | drm_free_large(ttm->pages); | 247 | drm_free_large(ttm->pages); |
245 | ttm->pages = NULL; | 248 | ttm->pages = NULL; |
246 | drm_free_large(ttm_dma->dma_address); | 249 | ttm_dma->cpu_address = NULL; |
247 | ttm_dma->dma_address = NULL; | 250 | ttm_dma->dma_address = NULL; |
248 | } | 251 | } |
249 | EXPORT_SYMBOL(ttm_dma_tt_fini); | 252 | EXPORT_SYMBOL(ttm_dma_tt_fini); |
diff --git a/drivers/i2c/Kconfig b/drivers/i2c/Kconfig index 7b7ea320a258..3e3b680dc007 100644 --- a/drivers/i2c/Kconfig +++ b/drivers/i2c/Kconfig | |||
@@ -2,7 +2,9 @@ | |||
2 | # I2C subsystem configuration | 2 | # I2C subsystem configuration |
3 | # | 3 | # |
4 | 4 | ||
5 | menuconfig I2C | 5 | menu "I2C support" |
6 | |||
7 | config I2C | ||
6 | tristate "I2C support" | 8 | tristate "I2C support" |
7 | select RT_MUTEXES | 9 | select RT_MUTEXES |
8 | ---help--- | 10 | ---help--- |
@@ -21,6 +23,18 @@ menuconfig I2C | |||
21 | This I2C support can also be built as a module. If so, the module | 23 | This I2C support can also be built as a module. If so, the module |
22 | will be called i2c-core. | 24 | will be called i2c-core. |
23 | 25 | ||
26 | config I2C_ACPI | ||
27 | bool "I2C ACPI support" | ||
28 | select I2C | ||
29 | depends on ACPI | ||
30 | default y | ||
31 | help | ||
32 | Say Y here if you want to enable ACPI I2C support. This includes support | ||
33 | for automatic enumeration of I2C slave devices and support for ACPI I2C | ||
34 | Operation Regions. Operation Regions allow firmware (BIOS) code to | ||
35 | access I2C slave devices, such as smart batteries through an I2C host | ||
36 | controller driver. | ||
37 | |||
24 | if I2C | 38 | if I2C |
25 | 39 | ||
26 | config I2C_BOARDINFO | 40 | config I2C_BOARDINFO |
@@ -124,3 +138,5 @@ config I2C_DEBUG_BUS | |||
124 | on. | 138 | on. |
125 | 139 | ||
126 | endif # I2C | 140 | endif # I2C |
141 | |||
142 | endmenu | ||
diff --git a/drivers/i2c/Makefile b/drivers/i2c/Makefile index 1722f50f2473..a1f590cbb435 100644 --- a/drivers/i2c/Makefile +++ b/drivers/i2c/Makefile | |||
@@ -2,8 +2,11 @@ | |||
2 | # Makefile for the i2c core. | 2 | # Makefile for the i2c core. |
3 | # | 3 | # |
4 | 4 | ||
5 | i2ccore-y := i2c-core.o | ||
6 | i2ccore-$(CONFIG_I2C_ACPI) += i2c-acpi.o | ||
7 | |||
5 | obj-$(CONFIG_I2C_BOARDINFO) += i2c-boardinfo.o | 8 | obj-$(CONFIG_I2C_BOARDINFO) += i2c-boardinfo.o |
6 | obj-$(CONFIG_I2C) += i2c-core.o | 9 | obj-$(CONFIG_I2C) += i2ccore.o |
7 | obj-$(CONFIG_I2C_SMBUS) += i2c-smbus.o | 10 | obj-$(CONFIG_I2C_SMBUS) += i2c-smbus.o |
8 | obj-$(CONFIG_I2C_CHARDEV) += i2c-dev.o | 11 | obj-$(CONFIG_I2C_CHARDEV) += i2c-dev.o |
9 | obj-$(CONFIG_I2C_MUX) += i2c-mux.o | 12 | obj-$(CONFIG_I2C_MUX) += i2c-mux.o |
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig index 9f7d5859cf65..2ac87fa3058d 100644 --- a/drivers/i2c/busses/Kconfig +++ b/drivers/i2c/busses/Kconfig | |||
@@ -109,6 +109,7 @@ config I2C_I801 | |||
109 | Avoton (SOC) | 109 | Avoton (SOC) |
110 | Wellsburg (PCH) | 110 | Wellsburg (PCH) |
111 | Coleto Creek (PCH) | 111 | Coleto Creek (PCH) |
112 | Wildcat Point (PCH) | ||
112 | Wildcat Point-LP (PCH) | 113 | Wildcat Point-LP (PCH) |
113 | BayTrail (SOC) | 114 | BayTrail (SOC) |
114 | 115 | ||
@@ -465,9 +466,9 @@ config I2C_EG20T | |||
465 | config I2C_EXYNOS5 | 466 | config I2C_EXYNOS5 |
466 | tristate "Exynos5 high-speed I2C driver" | 467 | tristate "Exynos5 high-speed I2C driver" |
467 | depends on ARCH_EXYNOS5 && OF | 468 | depends on ARCH_EXYNOS5 && OF |
469 | default y | ||
468 | help | 470 | help |
469 | Say Y here to include support for high-speed I2C controller in the | 471 | High-speed I2C controller on Exynos5 based Samsung SoCs. |
470 | Exynos5 based Samsung SoCs. | ||
471 | 472 | ||
472 | config I2C_GPIO | 473 | config I2C_GPIO |
473 | tristate "GPIO-based bitbanging I2C" | 474 | tristate "GPIO-based bitbanging I2C" |
@@ -700,16 +701,6 @@ config I2C_S3C2410 | |||
700 | Say Y here to include support for I2C controller in the | 701 | Say Y here to include support for I2C controller in the |
701 | Samsung SoCs. | 702 | Samsung SoCs. |
702 | 703 | ||
703 | config I2C_S6000 | ||
704 | tristate "S6000 I2C support" | ||
705 | depends on XTENSA_VARIANT_S6000 | ||
706 | help | ||
707 | This driver supports the on chip I2C device on the | ||
708 | S6000 xtensa processor family. | ||
709 | |||
710 | To compile this driver as a module, choose M here. The module | ||
711 | will be called i2c-s6000. | ||
712 | |||
713 | config I2C_SH7760 | 704 | config I2C_SH7760 |
714 | tristate "Renesas SH7760 I2C Controller" | 705 | tristate "Renesas SH7760 I2C Controller" |
715 | depends on CPU_SUBTYPE_SH7760 | 706 | depends on CPU_SUBTYPE_SH7760 |
@@ -1018,37 +1009,6 @@ config I2C_CROS_EC_TUNNEL | |||
1018 | connected there. This will work whatever the interface used to | 1009 | connected there. This will work whatever the interface used to |
1019 | talk to the EC (SPI, I2C or LPC). | 1010 | talk to the EC (SPI, I2C or LPC). |
1020 | 1011 | ||
1021 | config SCx200_I2C | ||
1022 | tristate "NatSemi SCx200 I2C using GPIO pins (DEPRECATED)" | ||
1023 | depends on SCx200_GPIO | ||
1024 | select I2C_ALGOBIT | ||
1025 | help | ||
1026 | Enable the use of two GPIO pins of a SCx200 processor as an I2C bus. | ||
1027 | |||
1028 | If you don't know what to do here, say N. | ||
1029 | |||
1030 | This support is also available as a module. If so, the module | ||
1031 | will be called scx200_i2c. | ||
1032 | |||
1033 | This driver is deprecated and will be dropped soon. Use i2c-gpio | ||
1034 | (or scx200_acb) instead. | ||
1035 | |||
1036 | config SCx200_I2C_SCL | ||
1037 | int "GPIO pin used for SCL" | ||
1038 | depends on SCx200_I2C | ||
1039 | default "12" | ||
1040 | help | ||
1041 | Enter the GPIO pin number used for the SCL signal. This value can | ||
1042 | also be specified with a module parameter. | ||
1043 | |||
1044 | config SCx200_I2C_SDA | ||
1045 | int "GPIO pin used for SDA" | ||
1046 | depends on SCx200_I2C | ||
1047 | default "13" | ||
1048 | help | ||
1049 | Enter the GPIO pin number used for the SSA signal. This value can | ||
1050 | also be specified with a module parameter. | ||
1051 | |||
1052 | config SCx200_ACB | 1012 | config SCx200_ACB |
1053 | tristate "Geode ACCESS.bus support" | 1013 | tristate "Geode ACCESS.bus support" |
1054 | depends on X86_32 && PCI | 1014 | depends on X86_32 && PCI |
diff --git a/drivers/i2c/busses/Makefile b/drivers/i2c/busses/Makefile index dd9a7f8e873f..49bf07e5ef4d 100644 --- a/drivers/i2c/busses/Makefile +++ b/drivers/i2c/busses/Makefile | |||
@@ -68,7 +68,6 @@ obj-$(CONFIG_I2C_QUP) += i2c-qup.o | |||
68 | obj-$(CONFIG_I2C_RIIC) += i2c-riic.o | 68 | obj-$(CONFIG_I2C_RIIC) += i2c-riic.o |
69 | obj-$(CONFIG_I2C_RK3X) += i2c-rk3x.o | 69 | obj-$(CONFIG_I2C_RK3X) += i2c-rk3x.o |
70 | obj-$(CONFIG_I2C_S3C2410) += i2c-s3c2410.o | 70 | obj-$(CONFIG_I2C_S3C2410) += i2c-s3c2410.o |
71 | obj-$(CONFIG_I2C_S6000) += i2c-s6000.o | ||
72 | obj-$(CONFIG_I2C_SH7760) += i2c-sh7760.o | 71 | obj-$(CONFIG_I2C_SH7760) += i2c-sh7760.o |
73 | obj-$(CONFIG_I2C_SH_MOBILE) += i2c-sh_mobile.o | 72 | obj-$(CONFIG_I2C_SH_MOBILE) += i2c-sh_mobile.o |
74 | obj-$(CONFIG_I2C_SIMTEC) += i2c-simtec.o | 73 | obj-$(CONFIG_I2C_SIMTEC) += i2c-simtec.o |
@@ -101,6 +100,5 @@ obj-$(CONFIG_I2C_ELEKTOR) += i2c-elektor.o | |||
101 | obj-$(CONFIG_I2C_PCA_ISA) += i2c-pca-isa.o | 100 | obj-$(CONFIG_I2C_PCA_ISA) += i2c-pca-isa.o |
102 | obj-$(CONFIG_I2C_SIBYTE) += i2c-sibyte.o | 101 | obj-$(CONFIG_I2C_SIBYTE) += i2c-sibyte.o |
103 | obj-$(CONFIG_SCx200_ACB) += scx200_acb.o | 102 | obj-$(CONFIG_SCx200_ACB) += scx200_acb.o |
104 | obj-$(CONFIG_SCx200_I2C) += scx200_i2c.o | ||
105 | 103 | ||
106 | ccflags-$(CONFIG_I2C_DEBUG_BUS) := -DDEBUG | 104 | ccflags-$(CONFIG_I2C_DEBUG_BUS) := -DDEBUG |
diff --git a/drivers/i2c/busses/i2c-at91.c b/drivers/i2c/busses/i2c-at91.c index e95f9ba96790..79a68999a696 100644 --- a/drivers/i2c/busses/i2c-at91.c +++ b/drivers/i2c/busses/i2c-at91.c | |||
@@ -210,7 +210,7 @@ static void at91_twi_write_data_dma_callback(void *data) | |||
210 | struct at91_twi_dev *dev = (struct at91_twi_dev *)data; | 210 | struct at91_twi_dev *dev = (struct at91_twi_dev *)data; |
211 | 211 | ||
212 | dma_unmap_single(dev->dev, sg_dma_address(&dev->dma.sg), | 212 | dma_unmap_single(dev->dev, sg_dma_address(&dev->dma.sg), |
213 | dev->buf_len, DMA_MEM_TO_DEV); | 213 | dev->buf_len, DMA_TO_DEVICE); |
214 | 214 | ||
215 | at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_STOP); | 215 | at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_STOP); |
216 | } | 216 | } |
@@ -289,7 +289,7 @@ static void at91_twi_read_data_dma_callback(void *data) | |||
289 | struct at91_twi_dev *dev = (struct at91_twi_dev *)data; | 289 | struct at91_twi_dev *dev = (struct at91_twi_dev *)data; |
290 | 290 | ||
291 | dma_unmap_single(dev->dev, sg_dma_address(&dev->dma.sg), | 291 | dma_unmap_single(dev->dev, sg_dma_address(&dev->dma.sg), |
292 | dev->buf_len, DMA_DEV_TO_MEM); | 292 | dev->buf_len, DMA_FROM_DEVICE); |
293 | 293 | ||
294 | /* The last two bytes have to be read without using dma */ | 294 | /* The last two bytes have to be read without using dma */ |
295 | dev->buf += dev->buf_len - 2; | 295 | dev->buf += dev->buf_len - 2; |
@@ -768,7 +768,7 @@ static int at91_twi_probe(struct platform_device *pdev) | |||
768 | snprintf(dev->adapter.name, sizeof(dev->adapter.name), "AT91"); | 768 | snprintf(dev->adapter.name, sizeof(dev->adapter.name), "AT91"); |
769 | i2c_set_adapdata(&dev->adapter, dev); | 769 | i2c_set_adapdata(&dev->adapter, dev); |
770 | dev->adapter.owner = THIS_MODULE; | 770 | dev->adapter.owner = THIS_MODULE; |
771 | dev->adapter.class = I2C_CLASS_HWMON | I2C_CLASS_DEPRECATED; | 771 | dev->adapter.class = I2C_CLASS_DEPRECATED; |
772 | dev->adapter.algo = &at91_twi_algorithm; | 772 | dev->adapter.algo = &at91_twi_algorithm; |
773 | dev->adapter.dev.parent = dev->dev; | 773 | dev->adapter.dev.parent = dev->dev; |
774 | dev->adapter.nr = pdev->id; | 774 | dev->adapter.nr = pdev->id; |
diff --git a/drivers/i2c/busses/i2c-bcm2835.c b/drivers/i2c/busses/i2c-bcm2835.c index 214ff9700efe..4b8ecd0b3661 100644 --- a/drivers/i2c/busses/i2c-bcm2835.c +++ b/drivers/i2c/busses/i2c-bcm2835.c | |||
@@ -277,7 +277,7 @@ static int bcm2835_i2c_probe(struct platform_device *pdev) | |||
277 | adap = &i2c_dev->adapter; | 277 | adap = &i2c_dev->adapter; |
278 | i2c_set_adapdata(adap, i2c_dev); | 278 | i2c_set_adapdata(adap, i2c_dev); |
279 | adap->owner = THIS_MODULE; | 279 | adap->owner = THIS_MODULE; |
280 | adap->class = I2C_CLASS_HWMON | I2C_CLASS_DEPRECATED; | 280 | adap->class = I2C_CLASS_DEPRECATED; |
281 | strlcpy(adap->name, "bcm2835 I2C adapter", sizeof(adap->name)); | 281 | strlcpy(adap->name, "bcm2835 I2C adapter", sizeof(adap->name)); |
282 | adap->algo = &bcm2835_i2c_algo; | 282 | adap->algo = &bcm2835_i2c_algo; |
283 | adap->dev.parent = &pdev->dev; | 283 | adap->dev.parent = &pdev->dev; |
diff --git a/drivers/i2c/busses/i2c-bfin-twi.c b/drivers/i2c/busses/i2c-bfin-twi.c index 3e271e7558d3..067c1615e968 100644 --- a/drivers/i2c/busses/i2c-bfin-twi.c +++ b/drivers/i2c/busses/i2c-bfin-twi.c | |||
@@ -648,7 +648,7 @@ static int i2c_bfin_twi_probe(struct platform_device *pdev) | |||
648 | strlcpy(p_adap->name, pdev->name, sizeof(p_adap->name)); | 648 | strlcpy(p_adap->name, pdev->name, sizeof(p_adap->name)); |
649 | p_adap->algo = &bfin_twi_algorithm; | 649 | p_adap->algo = &bfin_twi_algorithm; |
650 | p_adap->algo_data = iface; | 650 | p_adap->algo_data = iface; |
651 | p_adap->class = I2C_CLASS_HWMON | I2C_CLASS_SPD | I2C_CLASS_DEPRECATED; | 651 | p_adap->class = I2C_CLASS_DEPRECATED; |
652 | p_adap->dev.parent = &pdev->dev; | 652 | p_adap->dev.parent = &pdev->dev; |
653 | p_adap->timeout = 5 * HZ; | 653 | p_adap->timeout = 5 * HZ; |
654 | p_adap->retries = 3; | 654 | p_adap->retries = 3; |
diff --git a/drivers/i2c/busses/i2c-davinci.c b/drivers/i2c/busses/i2c-davinci.c index 389bc68c55ad..4d9614719128 100644 --- a/drivers/i2c/busses/i2c-davinci.c +++ b/drivers/i2c/busses/i2c-davinci.c | |||
@@ -712,7 +712,7 @@ static int davinci_i2c_probe(struct platform_device *pdev) | |||
712 | adap = &dev->adapter; | 712 | adap = &dev->adapter; |
713 | i2c_set_adapdata(adap, dev); | 713 | i2c_set_adapdata(adap, dev); |
714 | adap->owner = THIS_MODULE; | 714 | adap->owner = THIS_MODULE; |
715 | adap->class = I2C_CLASS_HWMON | I2C_CLASS_DEPRECATED; | 715 | adap->class = I2C_CLASS_DEPRECATED; |
716 | strlcpy(adap->name, "DaVinci I2C adapter", sizeof(adap->name)); | 716 | strlcpy(adap->name, "DaVinci I2C adapter", sizeof(adap->name)); |
717 | adap->algo = &i2c_davinci_algo; | 717 | adap->algo = &i2c_davinci_algo; |
718 | adap->dev.parent = &pdev->dev; | 718 | adap->dev.parent = &pdev->dev; |
diff --git a/drivers/i2c/busses/i2c-designware-pcidrv.c b/drivers/i2c/busses/i2c-designware-pcidrv.c index 3356f7ab9f79..d31d313ab4f7 100644 --- a/drivers/i2c/busses/i2c-designware-pcidrv.c +++ b/drivers/i2c/busses/i2c-designware-pcidrv.c | |||
@@ -188,6 +188,7 @@ static struct dw_pci_controller dw_pci_controllers[] = { | |||
188 | .scl_sda_cfg = &hsw_config, | 188 | .scl_sda_cfg = &hsw_config, |
189 | }, | 189 | }, |
190 | }; | 190 | }; |
191 | |||
191 | static struct i2c_algorithm i2c_dw_algo = { | 192 | static struct i2c_algorithm i2c_dw_algo = { |
192 | .master_xfer = i2c_dw_xfer, | 193 | .master_xfer = i2c_dw_xfer, |
193 | .functionality = i2c_dw_func, | 194 | .functionality = i2c_dw_func, |
@@ -350,6 +351,14 @@ static const struct pci_device_id i2_designware_pci_ids[] = { | |||
350 | /* Haswell */ | 351 | /* Haswell */ |
351 | { PCI_VDEVICE(INTEL, 0x9c61), haswell }, | 352 | { PCI_VDEVICE(INTEL, 0x9c61), haswell }, |
352 | { PCI_VDEVICE(INTEL, 0x9c62), haswell }, | 353 | { PCI_VDEVICE(INTEL, 0x9c62), haswell }, |
354 | /* Braswell / Cherrytrail */ | ||
355 | { PCI_VDEVICE(INTEL, 0x22C1), baytrail,}, | ||
356 | { PCI_VDEVICE(INTEL, 0x22C2), baytrail }, | ||
357 | { PCI_VDEVICE(INTEL, 0x22C3), baytrail }, | ||
358 | { PCI_VDEVICE(INTEL, 0x22C4), baytrail }, | ||
359 | { PCI_VDEVICE(INTEL, 0x22C5), baytrail }, | ||
360 | { PCI_VDEVICE(INTEL, 0x22C6), baytrail }, | ||
361 | { PCI_VDEVICE(INTEL, 0x22C7), baytrail }, | ||
353 | { 0,} | 362 | { 0,} |
354 | }; | 363 | }; |
355 | MODULE_DEVICE_TABLE(pci, i2_designware_pci_ids); | 364 | MODULE_DEVICE_TABLE(pci, i2_designware_pci_ids); |
diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c index 402ec3970fed..bc8773333155 100644 --- a/drivers/i2c/busses/i2c-designware-platdrv.c +++ b/drivers/i2c/busses/i2c-designware-platdrv.c | |||
@@ -106,6 +106,7 @@ static const struct acpi_device_id dw_i2c_acpi_match[] = { | |||
106 | { "INT3432", 0 }, | 106 | { "INT3432", 0 }, |
107 | { "INT3433", 0 }, | 107 | { "INT3433", 0 }, |
108 | { "80860F41", 0 }, | 108 | { "80860F41", 0 }, |
109 | { "808622C1", 0 }, | ||
109 | { } | 110 | { } |
110 | }; | 111 | }; |
111 | MODULE_DEVICE_TABLE(acpi, dw_i2c_acpi_match); | 112 | MODULE_DEVICE_TABLE(acpi, dw_i2c_acpi_match); |
@@ -202,7 +203,7 @@ static int dw_i2c_probe(struct platform_device *pdev) | |||
202 | adap = &dev->adapter; | 203 | adap = &dev->adapter; |
203 | i2c_set_adapdata(adap, dev); | 204 | i2c_set_adapdata(adap, dev); |
204 | adap->owner = THIS_MODULE; | 205 | adap->owner = THIS_MODULE; |
205 | adap->class = I2C_CLASS_HWMON | I2C_CLASS_DEPRECATED; | 206 | adap->class = I2C_CLASS_DEPRECATED; |
206 | strlcpy(adap->name, "Synopsys DesignWare I2C adapter", | 207 | strlcpy(adap->name, "Synopsys DesignWare I2C adapter", |
207 | sizeof(adap->name)); | 208 | sizeof(adap->name)); |
208 | adap->algo = &i2c_dw_algo; | 209 | adap->algo = &i2c_dw_algo; |
diff --git a/drivers/i2c/busses/i2c-efm32.c b/drivers/i2c/busses/i2c-efm32.c index f7eccd682de9..10b8323b08d4 100644 --- a/drivers/i2c/busses/i2c-efm32.c +++ b/drivers/i2c/busses/i2c-efm32.c | |||
@@ -370,7 +370,13 @@ static int efm32_i2c_probe(struct platform_device *pdev) | |||
370 | return ret; | 370 | return ret; |
371 | } | 371 | } |
372 | 372 | ||
373 | ret = of_property_read_u32(np, "efm32,location", &location); | 373 | |
374 | ret = of_property_read_u32(np, "energymicro,location", &location); | ||
375 | |||
376 | if (ret) | ||
377 | /* fall back to wrongly namespaced property */ | ||
378 | ret = of_property_read_u32(np, "efm32,location", &location); | ||
379 | |||
374 | if (!ret) { | 380 | if (!ret) { |
375 | dev_dbg(&pdev->dev, "using location %u\n", location); | 381 | dev_dbg(&pdev->dev, "using location %u\n", location); |
376 | } else { | 382 | } else { |
diff --git a/drivers/i2c/busses/i2c-exynos5.c b/drivers/i2c/busses/i2c-exynos5.c index 63d229202854..28073f1d6d47 100644 --- a/drivers/i2c/busses/i2c-exynos5.c +++ b/drivers/i2c/busses/i2c-exynos5.c | |||
@@ -405,7 +405,6 @@ static irqreturn_t exynos5_i2c_irq(int irqno, void *dev_id) | |||
405 | 405 | ||
406 | int_status = readl(i2c->regs + HSI2C_INT_STATUS); | 406 | int_status = readl(i2c->regs + HSI2C_INT_STATUS); |
407 | writel(int_status, i2c->regs + HSI2C_INT_STATUS); | 407 | writel(int_status, i2c->regs + HSI2C_INT_STATUS); |
408 | fifo_status = readl(i2c->regs + HSI2C_FIFO_STATUS); | ||
409 | 408 | ||
410 | /* handle interrupt related to the transfer status */ | 409 | /* handle interrupt related to the transfer status */ |
411 | if (int_status & HSI2C_INT_I2C) { | 410 | if (int_status & HSI2C_INT_I2C) { |
@@ -526,7 +525,7 @@ static void exynos5_i2c_message_start(struct exynos5_i2c *i2c, int stop) | |||
526 | if (i2c->msg->flags & I2C_M_RD) { | 525 | if (i2c->msg->flags & I2C_M_RD) { |
527 | i2c_ctl |= HSI2C_RXCHON; | 526 | i2c_ctl |= HSI2C_RXCHON; |
528 | 527 | ||
529 | i2c_auto_conf = HSI2C_READ_WRITE; | 528 | i2c_auto_conf |= HSI2C_READ_WRITE; |
530 | 529 | ||
531 | trig_lvl = (i2c->msg->len > i2c->variant->fifo_depth) ? | 530 | trig_lvl = (i2c->msg->len > i2c->variant->fifo_depth) ? |
532 | (i2c->variant->fifo_depth * 3 / 4) : i2c->msg->len; | 531 | (i2c->variant->fifo_depth * 3 / 4) : i2c->msg->len; |
@@ -549,7 +548,6 @@ static void exynos5_i2c_message_start(struct exynos5_i2c *i2c, int stop) | |||
549 | writel(fifo_ctl, i2c->regs + HSI2C_FIFO_CTL); | 548 | writel(fifo_ctl, i2c->regs + HSI2C_FIFO_CTL); |
550 | writel(i2c_ctl, i2c->regs + HSI2C_CTL); | 549 | writel(i2c_ctl, i2c->regs + HSI2C_CTL); |
551 | 550 | ||
552 | |||
553 | /* | 551 | /* |
554 | * Enable interrupts before starting the transfer so that we don't | 552 | * Enable interrupts before starting the transfer so that we don't |
555 | * miss any INT_I2C interrupts. | 553 | * miss any INT_I2C interrupts. |
@@ -789,8 +787,16 @@ static int exynos5_i2c_resume_noirq(struct device *dev) | |||
789 | } | 787 | } |
790 | #endif | 788 | #endif |
791 | 789 | ||
792 | static SIMPLE_DEV_PM_OPS(exynos5_i2c_dev_pm_ops, exynos5_i2c_suspend_noirq, | 790 | static const struct dev_pm_ops exynos5_i2c_dev_pm_ops = { |
793 | exynos5_i2c_resume_noirq); | 791 | #ifdef CONFIG_PM_SLEEP |
792 | .suspend_noirq = exynos5_i2c_suspend_noirq, | ||
793 | .resume_noirq = exynos5_i2c_resume_noirq, | ||
794 | .freeze_noirq = exynos5_i2c_suspend_noirq, | ||
795 | .thaw_noirq = exynos5_i2c_resume_noirq, | ||
796 | .poweroff_noirq = exynos5_i2c_suspend_noirq, | ||
797 | .restore_noirq = exynos5_i2c_resume_noirq, | ||
798 | #endif | ||
799 | }; | ||
794 | 800 | ||
795 | static struct platform_driver exynos5_i2c_driver = { | 801 | static struct platform_driver exynos5_i2c_driver = { |
796 | .probe = exynos5_i2c_probe, | 802 | .probe = exynos5_i2c_probe, |
diff --git a/drivers/i2c/busses/i2c-gpio.c b/drivers/i2c/busses/i2c-gpio.c index 71a45b210a24..933f1e453e41 100644 --- a/drivers/i2c/busses/i2c-gpio.c +++ b/drivers/i2c/busses/i2c-gpio.c | |||
@@ -238,12 +238,10 @@ static int i2c_gpio_probe(struct platform_device *pdev) | |||
238 | static int i2c_gpio_remove(struct platform_device *pdev) | 238 | static int i2c_gpio_remove(struct platform_device *pdev) |
239 | { | 239 | { |
240 | struct i2c_gpio_private_data *priv; | 240 | struct i2c_gpio_private_data *priv; |
241 | struct i2c_gpio_platform_data *pdata; | ||
242 | struct i2c_adapter *adap; | 241 | struct i2c_adapter *adap; |
243 | 242 | ||
244 | priv = platform_get_drvdata(pdev); | 243 | priv = platform_get_drvdata(pdev); |
245 | adap = &priv->adap; | 244 | adap = &priv->adap; |
246 | pdata = &priv->pdata; | ||
247 | 245 | ||
248 | i2c_del_adapter(adap); | 246 | i2c_del_adapter(adap); |
249 | 247 | ||
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c index 6777cd6f8776..2994690b26e9 100644 --- a/drivers/i2c/busses/i2c-i801.c +++ b/drivers/i2c/busses/i2c-i801.c | |||
@@ -22,57 +22,58 @@ | |||
22 | */ | 22 | */ |
23 | 23 | ||
24 | /* | 24 | /* |
25 | Supports the following Intel I/O Controller Hubs (ICH): | 25 | * Supports the following Intel I/O Controller Hubs (ICH): |
26 | 26 | * | |
27 | I/O Block I2C | 27 | * I/O Block I2C |
28 | region SMBus Block proc. block | 28 | * region SMBus Block proc. block |
29 | Chip name PCI ID size PEC buffer call read | 29 | * Chip name PCI ID size PEC buffer call read |
30 | ---------------------------------------------------------------------- | 30 | * --------------------------------------------------------------------------- |
31 | 82801AA (ICH) 0x2413 16 no no no no | 31 | * 82801AA (ICH) 0x2413 16 no no no no |
32 | 82801AB (ICH0) 0x2423 16 no no no no | 32 | * 82801AB (ICH0) 0x2423 16 no no no no |
33 | 82801BA (ICH2) 0x2443 16 no no no no | 33 | * 82801BA (ICH2) 0x2443 16 no no no no |
34 | 82801CA (ICH3) 0x2483 32 soft no no no | 34 | * 82801CA (ICH3) 0x2483 32 soft no no no |
35 | 82801DB (ICH4) 0x24c3 32 hard yes no no | 35 | * 82801DB (ICH4) 0x24c3 32 hard yes no no |
36 | 82801E (ICH5) 0x24d3 32 hard yes yes yes | 36 | * 82801E (ICH5) 0x24d3 32 hard yes yes yes |
37 | 6300ESB 0x25a4 32 hard yes yes yes | 37 | * 6300ESB 0x25a4 32 hard yes yes yes |
38 | 82801F (ICH6) 0x266a 32 hard yes yes yes | 38 | * 82801F (ICH6) 0x266a 32 hard yes yes yes |
39 | 6310ESB/6320ESB 0x269b 32 hard yes yes yes | 39 | * 6310ESB/6320ESB 0x269b 32 hard yes yes yes |
40 | 82801G (ICH7) 0x27da 32 hard yes yes yes | 40 | * 82801G (ICH7) 0x27da 32 hard yes yes yes |
41 | 82801H (ICH8) 0x283e 32 hard yes yes yes | 41 | * 82801H (ICH8) 0x283e 32 hard yes yes yes |
42 | 82801I (ICH9) 0x2930 32 hard yes yes yes | 42 | * 82801I (ICH9) 0x2930 32 hard yes yes yes |
43 | EP80579 (Tolapai) 0x5032 32 hard yes yes yes | 43 | * EP80579 (Tolapai) 0x5032 32 hard yes yes yes |
44 | ICH10 0x3a30 32 hard yes yes yes | 44 | * ICH10 0x3a30 32 hard yes yes yes |
45 | ICH10 0x3a60 32 hard yes yes yes | 45 | * ICH10 0x3a60 32 hard yes yes yes |
46 | 5/3400 Series (PCH) 0x3b30 32 hard yes yes yes | 46 | * 5/3400 Series (PCH) 0x3b30 32 hard yes yes yes |
47 | 6 Series (PCH) 0x1c22 32 hard yes yes yes | 47 | * 6 Series (PCH) 0x1c22 32 hard yes yes yes |
48 | Patsburg (PCH) 0x1d22 32 hard yes yes yes | 48 | * Patsburg (PCH) 0x1d22 32 hard yes yes yes |
49 | Patsburg (PCH) IDF 0x1d70 32 hard yes yes yes | 49 | * Patsburg (PCH) IDF 0x1d70 32 hard yes yes yes |
50 | Patsburg (PCH) IDF 0x1d71 32 hard yes yes yes | 50 | * Patsburg (PCH) IDF 0x1d71 32 hard yes yes yes |
51 | Patsburg (PCH) IDF 0x1d72 32 hard yes yes yes | 51 | * Patsburg (PCH) IDF 0x1d72 32 hard yes yes yes |
52 | DH89xxCC (PCH) 0x2330 32 hard yes yes yes | 52 | * DH89xxCC (PCH) 0x2330 32 hard yes yes yes |
53 | Panther Point (PCH) 0x1e22 32 hard yes yes yes | 53 | * Panther Point (PCH) 0x1e22 32 hard yes yes yes |
54 | Lynx Point (PCH) 0x8c22 32 hard yes yes yes | 54 | * Lynx Point (PCH) 0x8c22 32 hard yes yes yes |
55 | Lynx Point-LP (PCH) 0x9c22 32 hard yes yes yes | 55 | * Lynx Point-LP (PCH) 0x9c22 32 hard yes yes yes |
56 | Avoton (SOC) 0x1f3c 32 hard yes yes yes | 56 | * Avoton (SOC) 0x1f3c 32 hard yes yes yes |
57 | Wellsburg (PCH) 0x8d22 32 hard yes yes yes | 57 | * Wellsburg (PCH) 0x8d22 32 hard yes yes yes |
58 | Wellsburg (PCH) MS 0x8d7d 32 hard yes yes yes | 58 | * Wellsburg (PCH) MS 0x8d7d 32 hard yes yes yes |
59 | Wellsburg (PCH) MS 0x8d7e 32 hard yes yes yes | 59 | * Wellsburg (PCH) MS 0x8d7e 32 hard yes yes yes |
60 | Wellsburg (PCH) MS 0x8d7f 32 hard yes yes yes | 60 | * Wellsburg (PCH) MS 0x8d7f 32 hard yes yes yes |
61 | Coleto Creek (PCH) 0x23b0 32 hard yes yes yes | 61 | * Coleto Creek (PCH) 0x23b0 32 hard yes yes yes |
62 | Wildcat Point-LP (PCH) 0x9ca2 32 hard yes yes yes | 62 | * Wildcat Point (PCH) 0x8ca2 32 hard yes yes yes |
63 | BayTrail (SOC) 0x0f12 32 hard yes yes yes | 63 | * Wildcat Point-LP (PCH) 0x9ca2 32 hard yes yes yes |
64 | 64 | * BayTrail (SOC) 0x0f12 32 hard yes yes yes | |
65 | Features supported by this driver: | 65 | * |
66 | Software PEC no | 66 | * Features supported by this driver: |
67 | Hardware PEC yes | 67 | * Software PEC no |
68 | Block buffer yes | 68 | * Hardware PEC yes |
69 | Block process call transaction no | 69 | * Block buffer yes |
70 | I2C block read transaction yes (doesn't use the block buffer) | 70 | * Block process call transaction no |
71 | Slave mode no | 71 | * I2C block read transaction yes (doesn't use the block buffer) |
72 | Interrupt processing yes | 72 | * Slave mode no |
73 | 73 | * Interrupt processing yes | |
74 | See the file Documentation/i2c/busses/i2c-i801 for details. | 74 | * |
75 | */ | 75 | * See the file Documentation/i2c/busses/i2c-i801 for details. |
76 | */ | ||
76 | 77 | ||
77 | #include <linux/interrupt.h> | 78 | #include <linux/interrupt.h> |
78 | #include <linux/module.h> | 79 | #include <linux/module.h> |
@@ -162,24 +163,25 @@ | |||
162 | STATUS_ERROR_FLAGS) | 163 | STATUS_ERROR_FLAGS) |
163 | 164 | ||
164 | /* Older devices have their ID defined in <linux/pci_ids.h> */ | 165 | /* Older devices have their ID defined in <linux/pci_ids.h> */ |
165 | #define PCI_DEVICE_ID_INTEL_BAYTRAIL_SMBUS 0x0f12 | 166 | #define PCI_DEVICE_ID_INTEL_BAYTRAIL_SMBUS 0x0f12 |
166 | #define PCI_DEVICE_ID_INTEL_COUGARPOINT_SMBUS 0x1c22 | 167 | #define PCI_DEVICE_ID_INTEL_COUGARPOINT_SMBUS 0x1c22 |
167 | #define PCI_DEVICE_ID_INTEL_PATSBURG_SMBUS 0x1d22 | 168 | #define PCI_DEVICE_ID_INTEL_PATSBURG_SMBUS 0x1d22 |
168 | /* Patsburg also has three 'Integrated Device Function' SMBus controllers */ | 169 | /* Patsburg also has three 'Integrated Device Function' SMBus controllers */ |
169 | #define PCI_DEVICE_ID_INTEL_PATSBURG_SMBUS_IDF0 0x1d70 | 170 | #define PCI_DEVICE_ID_INTEL_PATSBURG_SMBUS_IDF0 0x1d70 |
170 | #define PCI_DEVICE_ID_INTEL_PATSBURG_SMBUS_IDF1 0x1d71 | 171 | #define PCI_DEVICE_ID_INTEL_PATSBURG_SMBUS_IDF1 0x1d71 |
171 | #define PCI_DEVICE_ID_INTEL_PATSBURG_SMBUS_IDF2 0x1d72 | 172 | #define PCI_DEVICE_ID_INTEL_PATSBURG_SMBUS_IDF2 0x1d72 |
172 | #define PCI_DEVICE_ID_INTEL_PANTHERPOINT_SMBUS 0x1e22 | 173 | #define PCI_DEVICE_ID_INTEL_PANTHERPOINT_SMBUS 0x1e22 |
173 | #define PCI_DEVICE_ID_INTEL_AVOTON_SMBUS 0x1f3c | 174 | #define PCI_DEVICE_ID_INTEL_AVOTON_SMBUS 0x1f3c |
174 | #define PCI_DEVICE_ID_INTEL_DH89XXCC_SMBUS 0x2330 | 175 | #define PCI_DEVICE_ID_INTEL_DH89XXCC_SMBUS 0x2330 |
175 | #define PCI_DEVICE_ID_INTEL_COLETOCREEK_SMBUS 0x23b0 | 176 | #define PCI_DEVICE_ID_INTEL_COLETOCREEK_SMBUS 0x23b0 |
176 | #define PCI_DEVICE_ID_INTEL_5_3400_SERIES_SMBUS 0x3b30 | 177 | #define PCI_DEVICE_ID_INTEL_5_3400_SERIES_SMBUS 0x3b30 |
177 | #define PCI_DEVICE_ID_INTEL_LYNXPOINT_SMBUS 0x8c22 | 178 | #define PCI_DEVICE_ID_INTEL_LYNXPOINT_SMBUS 0x8c22 |
178 | #define PCI_DEVICE_ID_INTEL_WELLSBURG_SMBUS 0x8d22 | 179 | #define PCI_DEVICE_ID_INTEL_WILDCATPOINT_SMBUS 0x8ca2 |
179 | #define PCI_DEVICE_ID_INTEL_WELLSBURG_SMBUS_MS0 0x8d7d | 180 | #define PCI_DEVICE_ID_INTEL_WELLSBURG_SMBUS 0x8d22 |
180 | #define PCI_DEVICE_ID_INTEL_WELLSBURG_SMBUS_MS1 0x8d7e | 181 | #define PCI_DEVICE_ID_INTEL_WELLSBURG_SMBUS_MS0 0x8d7d |
181 | #define PCI_DEVICE_ID_INTEL_WELLSBURG_SMBUS_MS2 0x8d7f | 182 | #define PCI_DEVICE_ID_INTEL_WELLSBURG_SMBUS_MS1 0x8d7e |
182 | #define PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_SMBUS 0x9c22 | 183 | #define PCI_DEVICE_ID_INTEL_WELLSBURG_SMBUS_MS2 0x8d7f |
184 | #define PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_SMBUS 0x9c22 | ||
183 | #define PCI_DEVICE_ID_INTEL_WILDCATPOINT_LP_SMBUS 0x9ca2 | 185 | #define PCI_DEVICE_ID_INTEL_WILDCATPOINT_LP_SMBUS 0x9ca2 |
184 | 186 | ||
185 | struct i801_mux_config { | 187 | struct i801_mux_config { |
@@ -823,6 +825,7 @@ static const struct pci_device_id i801_ids[] = { | |||
823 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_WELLSBURG_SMBUS_MS1) }, | 825 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_WELLSBURG_SMBUS_MS1) }, |
824 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_WELLSBURG_SMBUS_MS2) }, | 826 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_WELLSBURG_SMBUS_MS2) }, |
825 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_COLETOCREEK_SMBUS) }, | 827 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_COLETOCREEK_SMBUS) }, |
828 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_WILDCATPOINT_SMBUS) }, | ||
826 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_WILDCATPOINT_LP_SMBUS) }, | 829 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_WILDCATPOINT_LP_SMBUS) }, |
827 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BAYTRAIL_SMBUS) }, | 830 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BAYTRAIL_SMBUS) }, |
828 | { 0, } | 831 | { 0, } |
diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c index aa8bc146718b..613069bc561a 100644 --- a/drivers/i2c/busses/i2c-imx.c +++ b/drivers/i2c/busses/i2c-imx.c | |||
@@ -735,10 +735,7 @@ static int i2c_imx_probe(struct platform_device *pdev) | |||
735 | clk_disable_unprepare(i2c_imx->clk); | 735 | clk_disable_unprepare(i2c_imx->clk); |
736 | 736 | ||
737 | dev_dbg(&i2c_imx->adapter.dev, "claimed irq %d\n", irq); | 737 | dev_dbg(&i2c_imx->adapter.dev, "claimed irq %d\n", irq); |
738 | dev_dbg(&i2c_imx->adapter.dev, "device resources from 0x%x to 0x%x\n", | 738 | dev_dbg(&i2c_imx->adapter.dev, "device resources: %pR\n", res); |
739 | res->start, res->end); | ||
740 | dev_dbg(&i2c_imx->adapter.dev, "allocated %d bytes at 0x%x\n", | ||
741 | resource_size(res), res->start); | ||
742 | dev_dbg(&i2c_imx->adapter.dev, "adapter name: \"%s\"\n", | 739 | dev_dbg(&i2c_imx->adapter.dev, "adapter name: \"%s\"\n", |
743 | i2c_imx->adapter.name); | 740 | i2c_imx->adapter.name); |
744 | dev_info(&i2c_imx->adapter.dev, "IMX I2C adapter registered\n"); | 741 | dev_info(&i2c_imx->adapter.dev, "IMX I2C adapter registered\n"); |
diff --git a/drivers/i2c/busses/i2c-mpc.c b/drivers/i2c/busses/i2c-mpc.c index 6a32aa095f83..0edf630b099a 100644 --- a/drivers/i2c/busses/i2c-mpc.c +++ b/drivers/i2c/busses/i2c-mpc.c | |||
@@ -341,8 +341,7 @@ static u32 mpc_i2c_get_sec_cfg_8xxx(void) | |||
341 | iounmap(reg); | 341 | iounmap(reg); |
342 | } | 342 | } |
343 | } | 343 | } |
344 | if (node) | 344 | of_node_put(node); |
345 | of_node_put(node); | ||
346 | 345 | ||
347 | return val; | 346 | return val; |
348 | } | 347 | } |
diff --git a/drivers/i2c/busses/i2c-mv64xxx.c b/drivers/i2c/busses/i2c-mv64xxx.c index 9f4b775e2e39..6dc5ded86f62 100644 --- a/drivers/i2c/busses/i2c-mv64xxx.c +++ b/drivers/i2c/busses/i2c-mv64xxx.c | |||
@@ -863,7 +863,7 @@ mv64xxx_i2c_probe(struct platform_device *pd) | |||
863 | drv_data->adapter.dev.parent = &pd->dev; | 863 | drv_data->adapter.dev.parent = &pd->dev; |
864 | drv_data->adapter.algo = &mv64xxx_i2c_algo; | 864 | drv_data->adapter.algo = &mv64xxx_i2c_algo; |
865 | drv_data->adapter.owner = THIS_MODULE; | 865 | drv_data->adapter.owner = THIS_MODULE; |
866 | drv_data->adapter.class = I2C_CLASS_HWMON | I2C_CLASS_SPD | I2C_CLASS_DEPRECATED; | 866 | drv_data->adapter.class = I2C_CLASS_DEPRECATED; |
867 | drv_data->adapter.nr = pd->id; | 867 | drv_data->adapter.nr = pd->id; |
868 | drv_data->adapter.dev.of_node = pd->dev.of_node; | 868 | drv_data->adapter.dev.of_node = pd->dev.of_node; |
869 | platform_set_drvdata(pd, drv_data); | 869 | platform_set_drvdata(pd, drv_data); |
diff --git a/drivers/i2c/busses/i2c-nomadik.c b/drivers/i2c/busses/i2c-nomadik.c index 0e55d85fd4ed..9ad038d223c4 100644 --- a/drivers/i2c/busses/i2c-nomadik.c +++ b/drivers/i2c/busses/i2c-nomadik.c | |||
@@ -1032,10 +1032,10 @@ static int nmk_i2c_probe(struct amba_device *adev, const struct amba_id *id) | |||
1032 | adap = &dev->adap; | 1032 | adap = &dev->adap; |
1033 | adap->dev.of_node = np; | 1033 | adap->dev.of_node = np; |
1034 | adap->dev.parent = &adev->dev; | 1034 | adap->dev.parent = &adev->dev; |
1035 | adap->owner = THIS_MODULE; | 1035 | adap->owner = THIS_MODULE; |
1036 | adap->class = I2C_CLASS_HWMON | I2C_CLASS_SPD | I2C_CLASS_DEPRECATED; | 1036 | adap->class = I2C_CLASS_DEPRECATED; |
1037 | adap->algo = &nmk_i2c_algo; | 1037 | adap->algo = &nmk_i2c_algo; |
1038 | adap->timeout = msecs_to_jiffies(dev->timeout); | 1038 | adap->timeout = msecs_to_jiffies(dev->timeout); |
1039 | snprintf(adap->name, sizeof(adap->name), | 1039 | snprintf(adap->name, sizeof(adap->name), |
1040 | "Nomadik I2C at %pR", &adev->res); | 1040 | "Nomadik I2C at %pR", &adev->res); |
1041 | 1041 | ||
diff --git a/drivers/i2c/busses/i2c-ocores.c b/drivers/i2c/busses/i2c-ocores.c index 0e10cc6182f0..2a4fe0b7cfb7 100644 --- a/drivers/i2c/busses/i2c-ocores.c +++ b/drivers/i2c/busses/i2c-ocores.c | |||
@@ -239,15 +239,15 @@ static u32 ocores_func(struct i2c_adapter *adap) | |||
239 | } | 239 | } |
240 | 240 | ||
241 | static const struct i2c_algorithm ocores_algorithm = { | 241 | static const struct i2c_algorithm ocores_algorithm = { |
242 | .master_xfer = ocores_xfer, | 242 | .master_xfer = ocores_xfer, |
243 | .functionality = ocores_func, | 243 | .functionality = ocores_func, |
244 | }; | 244 | }; |
245 | 245 | ||
246 | static struct i2c_adapter ocores_adapter = { | 246 | static struct i2c_adapter ocores_adapter = { |
247 | .owner = THIS_MODULE, | 247 | .owner = THIS_MODULE, |
248 | .name = "i2c-ocores", | 248 | .name = "i2c-ocores", |
249 | .class = I2C_CLASS_HWMON | I2C_CLASS_SPD | I2C_CLASS_DEPRECATED, | 249 | .class = I2C_CLASS_DEPRECATED, |
250 | .algo = &ocores_algorithm, | 250 | .algo = &ocores_algorithm, |
251 | }; | 251 | }; |
252 | 252 | ||
253 | static const struct of_device_id ocores_i2c_match[] = { | 253 | static const struct of_device_id ocores_i2c_match[] = { |
diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c index b182793a4051..0dffb0e62c3b 100644 --- a/drivers/i2c/busses/i2c-omap.c +++ b/drivers/i2c/busses/i2c-omap.c | |||
@@ -1236,7 +1236,7 @@ omap_i2c_probe(struct platform_device *pdev) | |||
1236 | adap = &dev->adapter; | 1236 | adap = &dev->adapter; |
1237 | i2c_set_adapdata(adap, dev); | 1237 | i2c_set_adapdata(adap, dev); |
1238 | adap->owner = THIS_MODULE; | 1238 | adap->owner = THIS_MODULE; |
1239 | adap->class = I2C_CLASS_HWMON | I2C_CLASS_DEPRECATED; | 1239 | adap->class = I2C_CLASS_DEPRECATED; |
1240 | strlcpy(adap->name, "OMAP I2C adapter", sizeof(adap->name)); | 1240 | strlcpy(adap->name, "OMAP I2C adapter", sizeof(adap->name)); |
1241 | adap->algo = &omap_i2c_algo; | 1241 | adap->algo = &omap_i2c_algo; |
1242 | adap->dev.parent = &pdev->dev; | 1242 | adap->dev.parent = &pdev->dev; |
diff --git a/drivers/i2c/busses/i2c-qup.c b/drivers/i2c/busses/i2c-qup.c index 2a5efb5b487c..3a4d64e1dfb1 100644 --- a/drivers/i2c/busses/i2c-qup.c +++ b/drivers/i2c/busses/i2c-qup.c | |||
@@ -633,13 +633,17 @@ static int qup_i2c_probe(struct platform_device *pdev) | |||
633 | * associated with each byte written/received | 633 | * associated with each byte written/received |
634 | */ | 634 | */ |
635 | size = QUP_OUTPUT_BLOCK_SIZE(io_mode); | 635 | size = QUP_OUTPUT_BLOCK_SIZE(io_mode); |
636 | if (size >= ARRAY_SIZE(blk_sizes)) | 636 | if (size >= ARRAY_SIZE(blk_sizes)) { |
637 | return -EIO; | 637 | ret = -EIO; |
638 | goto fail; | ||
639 | } | ||
638 | qup->out_blk_sz = blk_sizes[size] / 2; | 640 | qup->out_blk_sz = blk_sizes[size] / 2; |
639 | 641 | ||
640 | size = QUP_INPUT_BLOCK_SIZE(io_mode); | 642 | size = QUP_INPUT_BLOCK_SIZE(io_mode); |
641 | if (size >= ARRAY_SIZE(blk_sizes)) | 643 | if (size >= ARRAY_SIZE(blk_sizes)) { |
642 | return -EIO; | 644 | ret = -EIO; |
645 | goto fail; | ||
646 | } | ||
643 | qup->in_blk_sz = blk_sizes[size] / 2; | 647 | qup->in_blk_sz = blk_sizes[size] / 2; |
644 | 648 | ||
645 | size = QUP_OUTPUT_FIFO_SIZE(io_mode); | 649 | size = QUP_OUTPUT_FIFO_SIZE(io_mode); |
diff --git a/drivers/i2c/busses/i2c-rcar.c b/drivers/i2c/busses/i2c-rcar.c index 899405923678..f3c7139dfa25 100644 --- a/drivers/i2c/busses/i2c-rcar.c +++ b/drivers/i2c/busses/i2c-rcar.c | |||
@@ -541,13 +541,13 @@ static int rcar_i2c_probe(struct platform_device *pdev) | |||
541 | irq = platform_get_irq(pdev, 0); | 541 | irq = platform_get_irq(pdev, 0); |
542 | init_waitqueue_head(&priv->wait); | 542 | init_waitqueue_head(&priv->wait); |
543 | 543 | ||
544 | adap = &priv->adap; | 544 | adap = &priv->adap; |
545 | adap->nr = pdev->id; | 545 | adap->nr = pdev->id; |
546 | adap->algo = &rcar_i2c_algo; | 546 | adap->algo = &rcar_i2c_algo; |
547 | adap->class = I2C_CLASS_HWMON | I2C_CLASS_SPD | I2C_CLASS_DEPRECATED; | 547 | adap->class = I2C_CLASS_DEPRECATED; |
548 | adap->retries = 3; | 548 | adap->retries = 3; |
549 | adap->dev.parent = dev; | 549 | adap->dev.parent = dev; |
550 | adap->dev.of_node = dev->of_node; | 550 | adap->dev.of_node = dev->of_node; |
551 | i2c_set_adapdata(adap, priv); | 551 | i2c_set_adapdata(adap, priv); |
552 | strlcpy(adap->name, pdev->name, sizeof(adap->name)); | 552 | strlcpy(adap->name, pdev->name, sizeof(adap->name)); |
553 | 553 | ||
diff --git a/drivers/i2c/busses/i2c-rk3x.c b/drivers/i2c/busses/i2c-rk3x.c index a9791509966a..69e11853e8bf 100644 --- a/drivers/i2c/busses/i2c-rk3x.c +++ b/drivers/i2c/busses/i2c-rk3x.c | |||
@@ -399,7 +399,7 @@ static irqreturn_t rk3x_i2c_irq(int irqno, void *dev_id) | |||
399 | } | 399 | } |
400 | 400 | ||
401 | /* is there anything left to handle? */ | 401 | /* is there anything left to handle? */ |
402 | if (unlikely(ipd == 0)) | 402 | if (unlikely((ipd & REG_INT_ALL) == 0)) |
403 | goto out; | 403 | goto out; |
404 | 404 | ||
405 | switch (i2c->state) { | 405 | switch (i2c->state) { |
diff --git a/drivers/i2c/busses/i2c-s3c2410.c b/drivers/i2c/busses/i2c-s3c2410.c index e828a1dba0e5..e086fb075f2b 100644 --- a/drivers/i2c/busses/i2c-s3c2410.c +++ b/drivers/i2c/busses/i2c-s3c2410.c | |||
@@ -1128,11 +1128,11 @@ static int s3c24xx_i2c_probe(struct platform_device *pdev) | |||
1128 | s3c24xx_i2c_parse_dt(pdev->dev.of_node, i2c); | 1128 | s3c24xx_i2c_parse_dt(pdev->dev.of_node, i2c); |
1129 | 1129 | ||
1130 | strlcpy(i2c->adap.name, "s3c2410-i2c", sizeof(i2c->adap.name)); | 1130 | strlcpy(i2c->adap.name, "s3c2410-i2c", sizeof(i2c->adap.name)); |
1131 | i2c->adap.owner = THIS_MODULE; | 1131 | i2c->adap.owner = THIS_MODULE; |
1132 | i2c->adap.algo = &s3c24xx_i2c_algorithm; | 1132 | i2c->adap.algo = &s3c24xx_i2c_algorithm; |
1133 | i2c->adap.retries = 2; | 1133 | i2c->adap.retries = 2; |
1134 | i2c->adap.class = I2C_CLASS_HWMON | I2C_CLASS_SPD | I2C_CLASS_DEPRECATED; | 1134 | i2c->adap.class = I2C_CLASS_DEPRECATED; |
1135 | i2c->tx_setup = 50; | 1135 | i2c->tx_setup = 50; |
1136 | 1136 | ||
1137 | init_waitqueue_head(&i2c->wait); | 1137 | init_waitqueue_head(&i2c->wait); |
1138 | 1138 | ||
@@ -1267,7 +1267,7 @@ static int s3c24xx_i2c_suspend_noirq(struct device *dev) | |||
1267 | return 0; | 1267 | return 0; |
1268 | } | 1268 | } |
1269 | 1269 | ||
1270 | static int s3c24xx_i2c_resume(struct device *dev) | 1270 | static int s3c24xx_i2c_resume_noirq(struct device *dev) |
1271 | { | 1271 | { |
1272 | struct platform_device *pdev = to_platform_device(dev); | 1272 | struct platform_device *pdev = to_platform_device(dev); |
1273 | struct s3c24xx_i2c *i2c = platform_get_drvdata(pdev); | 1273 | struct s3c24xx_i2c *i2c = platform_get_drvdata(pdev); |
@@ -1285,7 +1285,11 @@ static int s3c24xx_i2c_resume(struct device *dev) | |||
1285 | static const struct dev_pm_ops s3c24xx_i2c_dev_pm_ops = { | 1285 | static const struct dev_pm_ops s3c24xx_i2c_dev_pm_ops = { |
1286 | #ifdef CONFIG_PM_SLEEP | 1286 | #ifdef CONFIG_PM_SLEEP |
1287 | .suspend_noirq = s3c24xx_i2c_suspend_noirq, | 1287 | .suspend_noirq = s3c24xx_i2c_suspend_noirq, |
1288 | .resume = s3c24xx_i2c_resume, | 1288 | .resume_noirq = s3c24xx_i2c_resume_noirq, |
1289 | .freeze_noirq = s3c24xx_i2c_suspend_noirq, | ||
1290 | .thaw_noirq = s3c24xx_i2c_resume_noirq, | ||
1291 | .poweroff_noirq = s3c24xx_i2c_suspend_noirq, | ||
1292 | .restore_noirq = s3c24xx_i2c_resume_noirq, | ||
1289 | #endif | 1293 | #endif |
1290 | }; | 1294 | }; |
1291 | 1295 | ||
diff --git a/drivers/i2c/busses/i2c-s6000.c b/drivers/i2c/busses/i2c-s6000.c deleted file mode 100644 index dd186a037684..000000000000 --- a/drivers/i2c/busses/i2c-s6000.c +++ /dev/null | |||
@@ -1,404 +0,0 @@ | |||
1 | /* | ||
2 | * drivers/i2c/busses/i2c-s6000.c | ||
3 | * | ||
4 | * Description: Driver for S6000 Family I2C Interface | ||
5 | * Copyright (c) 2008 emlix GmbH | ||
6 | * Author: Oskar Schirmer <oskar@scara.com> | ||
7 | * | ||
8 | * Partially based on i2c-bfin-twi.c driver by <sonic.zhang@analog.com> | ||
9 | * Copyright (c) 2005-2007 Analog Devices, Inc. | ||
10 | * | ||
11 | * This program is free software; you can redistribute it and/or modify | ||
12 | * it under the terms of the GNU General Public License as published by | ||
13 | * the Free Software Foundation; either version 2 of the License, or | ||
14 | * (at your option) any later version. | ||
15 | * | ||
16 | * This program is distributed in the hope that it will be useful, | ||
17 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
18 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
19 | * GNU General Public License for more details. | ||
20 | * | ||
21 | * You should have received a copy of the GNU General Public License | ||
22 | * along with this program; if not, write to the Free Software | ||
23 | * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA | ||
24 | */ | ||
25 | |||
26 | #include <linux/clk.h> | ||
27 | #include <linux/err.h> | ||
28 | #include <linux/module.h> | ||
29 | #include <linux/kernel.h> | ||
30 | #include <linux/init.h> | ||
31 | #include <linux/delay.h> | ||
32 | #include <linux/i2c.h> | ||
33 | #include <linux/i2c/s6000.h> | ||
34 | #include <linux/timer.h> | ||
35 | #include <linux/spinlock.h> | ||
36 | #include <linux/completion.h> | ||
37 | #include <linux/interrupt.h> | ||
38 | #include <linux/platform_device.h> | ||
39 | #include <linux/io.h> | ||
40 | |||
41 | #include "i2c-s6000.h" | ||
42 | |||
43 | #define DRV_NAME "i2c-s6000" | ||
44 | |||
45 | #define POLL_TIMEOUT (2 * HZ) | ||
46 | |||
47 | struct s6i2c_if { | ||
48 | u8 __iomem *reg; /* memory mapped registers */ | ||
49 | int irq; | ||
50 | spinlock_t lock; | ||
51 | struct i2c_msg *msgs; /* messages currently handled */ | ||
52 | int msgs_num; /* nb of msgs to do */ | ||
53 | int msgs_push; /* nb of msgs read/written */ | ||
54 | int msgs_done; /* nb of msgs finally handled */ | ||
55 | unsigned push; /* nb of bytes read/written in msg */ | ||
56 | unsigned done; /* nb of bytes finally handled */ | ||
57 | int timeout_count; /* timeout retries left */ | ||
58 | struct timer_list timeout_timer; | ||
59 | struct i2c_adapter adap; | ||
60 | struct completion complete; | ||
61 | struct clk *clk; | ||
62 | struct resource *res; | ||
63 | }; | ||
64 | |||
65 | static inline u16 i2c_rd16(struct s6i2c_if *iface, unsigned n) | ||
66 | { | ||
67 | return readw(iface->reg + (n)); | ||
68 | } | ||
69 | |||
70 | static inline void i2c_wr16(struct s6i2c_if *iface, unsigned n, u16 v) | ||
71 | { | ||
72 | writew(v, iface->reg + (n)); | ||
73 | } | ||
74 | |||
75 | static inline u32 i2c_rd32(struct s6i2c_if *iface, unsigned n) | ||
76 | { | ||
77 | return readl(iface->reg + (n)); | ||
78 | } | ||
79 | |||
80 | static inline void i2c_wr32(struct s6i2c_if *iface, unsigned n, u32 v) | ||
81 | { | ||
82 | writel(v, iface->reg + (n)); | ||
83 | } | ||
84 | |||
85 | static struct s6i2c_if s6i2c_if; | ||
86 | |||
87 | static void s6i2c_handle_interrupt(struct s6i2c_if *iface) | ||
88 | { | ||
89 | if (i2c_rd16(iface, S6_I2C_INTRSTAT) & (1 << S6_I2C_INTR_TXABRT)) { | ||
90 | i2c_rd16(iface, S6_I2C_CLRTXABRT); | ||
91 | i2c_wr16(iface, S6_I2C_INTRMASK, 0); | ||
92 | complete(&iface->complete); | ||
93 | return; | ||
94 | } | ||
95 | if (iface->msgs_done >= iface->msgs_num) { | ||
96 | dev_err(&iface->adap.dev, "s6i2c: spurious I2C irq: %04x\n", | ||
97 | i2c_rd16(iface, S6_I2C_INTRSTAT)); | ||
98 | i2c_wr16(iface, S6_I2C_INTRMASK, 0); | ||
99 | return; | ||
100 | } | ||
101 | while ((iface->msgs_push < iface->msgs_num) | ||
102 | && (i2c_rd16(iface, S6_I2C_STATUS) & (1 << S6_I2C_STATUS_TFNF))) { | ||
103 | struct i2c_msg *m = &iface->msgs[iface->msgs_push]; | ||
104 | if (!(m->flags & I2C_M_RD)) | ||
105 | i2c_wr16(iface, S6_I2C_DATACMD, m->buf[iface->push]); | ||
106 | else | ||
107 | i2c_wr16(iface, S6_I2C_DATACMD, | ||
108 | 1 << S6_I2C_DATACMD_READ); | ||
109 | if (++iface->push >= m->len) { | ||
110 | iface->push = 0; | ||
111 | iface->msgs_push += 1; | ||
112 | } | ||
113 | } | ||
114 | do { | ||
115 | struct i2c_msg *m = &iface->msgs[iface->msgs_done]; | ||
116 | if (!(m->flags & I2C_M_RD)) { | ||
117 | if (iface->msgs_done < iface->msgs_push) | ||
118 | iface->msgs_done += 1; | ||
119 | else | ||
120 | break; | ||
121 | } else if (i2c_rd16(iface, S6_I2C_STATUS) | ||
122 | & (1 << S6_I2C_STATUS_RFNE)) { | ||
123 | m->buf[iface->done] = i2c_rd16(iface, S6_I2C_DATACMD); | ||
124 | if (++iface->done >= m->len) { | ||
125 | iface->done = 0; | ||
126 | iface->msgs_done += 1; | ||
127 | } | ||
128 | } else{ | ||
129 | break; | ||
130 | } | ||
131 | } while (iface->msgs_done < iface->msgs_num); | ||
132 | if (iface->msgs_done >= iface->msgs_num) { | ||
133 | i2c_wr16(iface, S6_I2C_INTRMASK, 1 << S6_I2C_INTR_TXABRT); | ||
134 | complete(&iface->complete); | ||
135 | } else if (iface->msgs_push >= iface->msgs_num) { | ||
136 | i2c_wr16(iface, S6_I2C_INTRMASK, (1 << S6_I2C_INTR_TXABRT) | | ||
137 | (1 << S6_I2C_INTR_RXFULL)); | ||
138 | } else { | ||
139 | i2c_wr16(iface, S6_I2C_INTRMASK, (1 << S6_I2C_INTR_TXABRT) | | ||
140 | (1 << S6_I2C_INTR_TXEMPTY) | | ||
141 | (1 << S6_I2C_INTR_RXFULL)); | ||
142 | } | ||
143 | } | ||
144 | |||
145 | static irqreturn_t s6i2c_interrupt_entry(int irq, void *dev_id) | ||
146 | { | ||
147 | struct s6i2c_if *iface = dev_id; | ||
148 | if (!(i2c_rd16(iface, S6_I2C_STATUS) & ((1 << S6_I2C_INTR_RXUNDER) | ||
149 | | (1 << S6_I2C_INTR_RXOVER) | ||
150 | | (1 << S6_I2C_INTR_RXFULL) | ||
151 | | (1 << S6_I2C_INTR_TXOVER) | ||
152 | | (1 << S6_I2C_INTR_TXEMPTY) | ||
153 | | (1 << S6_I2C_INTR_RDREQ) | ||
154 | | (1 << S6_I2C_INTR_TXABRT) | ||
155 | | (1 << S6_I2C_INTR_RXDONE) | ||
156 | | (1 << S6_I2C_INTR_ACTIVITY) | ||
157 | | (1 << S6_I2C_INTR_STOPDET) | ||
158 | | (1 << S6_I2C_INTR_STARTDET) | ||
159 | | (1 << S6_I2C_INTR_GENCALL)))) | ||
160 | return IRQ_NONE; | ||
161 | |||
162 | spin_lock(&iface->lock); | ||
163 | del_timer(&iface->timeout_timer); | ||
164 | s6i2c_handle_interrupt(iface); | ||
165 | spin_unlock(&iface->lock); | ||
166 | return IRQ_HANDLED; | ||
167 | } | ||
168 | |||
169 | static void s6i2c_timeout(unsigned long data) | ||
170 | { | ||
171 | struct s6i2c_if *iface = (struct s6i2c_if *)data; | ||
172 | unsigned long flags; | ||
173 | |||
174 | spin_lock_irqsave(&iface->lock, flags); | ||
175 | s6i2c_handle_interrupt(iface); | ||
176 | if (--iface->timeout_count > 0) { | ||
177 | iface->timeout_timer.expires = jiffies + POLL_TIMEOUT; | ||
178 | add_timer(&iface->timeout_timer); | ||
179 | } else { | ||
180 | complete(&iface->complete); | ||
181 | i2c_wr16(iface, S6_I2C_INTRMASK, 0); | ||
182 | } | ||
183 | spin_unlock_irqrestore(&iface->lock, flags); | ||
184 | } | ||
185 | |||
186 | static int s6i2c_master_xfer(struct i2c_adapter *adap, | ||
187 | struct i2c_msg *msgs, int num) | ||
188 | { | ||
189 | struct s6i2c_if *iface = adap->algo_data; | ||
190 | int i; | ||
191 | if (num == 0) | ||
192 | return 0; | ||
193 | if (i2c_rd16(iface, S6_I2C_STATUS) & (1 << S6_I2C_STATUS_ACTIVITY)) | ||
194 | yield(); | ||
195 | i2c_wr16(iface, S6_I2C_INTRMASK, 0); | ||
196 | i2c_rd16(iface, S6_I2C_CLRINTR); | ||
197 | for (i = 0; i < num; i++) { | ||
198 | if (msgs[i].flags & I2C_M_TEN) { | ||
199 | dev_err(&adap->dev, | ||
200 | "s6i2c: 10 bits addr not supported\n"); | ||
201 | return -EINVAL; | ||
202 | } | ||
203 | if (msgs[i].len == 0) { | ||
204 | dev_err(&adap->dev, | ||
205 | "s6i2c: zero length message not supported\n"); | ||
206 | return -EINVAL; | ||
207 | } | ||
208 | if (msgs[i].addr != msgs[0].addr) { | ||
209 | dev_err(&adap->dev, | ||
210 | "s6i2c: multiple xfer cannot change target\n"); | ||
211 | return -EINVAL; | ||
212 | } | ||
213 | } | ||
214 | |||
215 | iface->msgs = msgs; | ||
216 | iface->msgs_num = num; | ||
217 | iface->msgs_push = 0; | ||
218 | iface->msgs_done = 0; | ||
219 | iface->push = 0; | ||
220 | iface->done = 0; | ||
221 | iface->timeout_count = 10; | ||
222 | i2c_wr16(iface, S6_I2C_TAR, msgs[0].addr); | ||
223 | i2c_wr16(iface, S6_I2C_ENABLE, 1); | ||
224 | i2c_wr16(iface, S6_I2C_INTRMASK, (1 << S6_I2C_INTR_TXEMPTY) | | ||
225 | (1 << S6_I2C_INTR_TXABRT)); | ||
226 | |||
227 | iface->timeout_timer.expires = jiffies + POLL_TIMEOUT; | ||
228 | add_timer(&iface->timeout_timer); | ||
229 | wait_for_completion(&iface->complete); | ||
230 | del_timer_sync(&iface->timeout_timer); | ||
231 | while (i2c_rd32(iface, S6_I2C_TXFLR) > 0) | ||
232 | schedule(); | ||
233 | while (i2c_rd16(iface, S6_I2C_STATUS) & (1 << S6_I2C_STATUS_ACTIVITY)) | ||
234 | schedule(); | ||
235 | |||
236 | i2c_wr16(iface, S6_I2C_INTRMASK, 0); | ||
237 | i2c_wr16(iface, S6_I2C_ENABLE, 0); | ||
238 | return iface->msgs_done; | ||
239 | } | ||
240 | |||
241 | static u32 s6i2c_functionality(struct i2c_adapter *adap) | ||
242 | { | ||
243 | return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; | ||
244 | } | ||
245 | |||
246 | static struct i2c_algorithm s6i2c_algorithm = { | ||
247 | .master_xfer = s6i2c_master_xfer, | ||
248 | .functionality = s6i2c_functionality, | ||
249 | }; | ||
250 | |||
251 | static u16 nanoseconds_on_clk(struct s6i2c_if *iface, u32 ns) | ||
252 | { | ||
253 | u32 dividend = ((clk_get_rate(iface->clk) / 1000) * ns) / 1000000; | ||
254 | if (dividend > 0xffff) | ||
255 | return 0xffff; | ||
256 | return dividend; | ||
257 | } | ||
258 | |||
259 | static int s6i2c_probe(struct platform_device *dev) | ||
260 | { | ||
261 | struct s6i2c_if *iface = &s6i2c_if; | ||
262 | struct i2c_adapter *p_adap; | ||
263 | const char *clock; | ||
264 | int bus_num, rc; | ||
265 | spin_lock_init(&iface->lock); | ||
266 | init_completion(&iface->complete); | ||
267 | iface->irq = platform_get_irq(dev, 0); | ||
268 | if (iface->irq < 0) { | ||
269 | rc = iface->irq; | ||
270 | goto err_out; | ||
271 | } | ||
272 | iface->res = platform_get_resource(dev, IORESOURCE_MEM, 0); | ||
273 | if (!iface->res) { | ||
274 | rc = -ENXIO; | ||
275 | goto err_out; | ||
276 | } | ||
277 | iface->res = request_mem_region(iface->res->start, | ||
278 | resource_size(iface->res), | ||
279 | dev->dev.bus_id); | ||
280 | if (!iface->res) { | ||
281 | rc = -EBUSY; | ||
282 | goto err_out; | ||
283 | } | ||
284 | iface->reg = ioremap_nocache(iface->res->start, | ||
285 | resource_size(iface->res)); | ||
286 | if (!iface->reg) { | ||
287 | rc = -ENOMEM; | ||
288 | goto err_reg; | ||
289 | } | ||
290 | |||
291 | clock = 0; | ||
292 | bus_num = -1; | ||
293 | if (dev_get_platdata(&dev->dev)) { | ||
294 | struct s6_i2c_platform_data *pdata = | ||
295 | dev_get_platdata(&dev->dev); | ||
296 | bus_num = pdata->bus_num; | ||
297 | clock = pdata->clock; | ||
298 | } | ||
299 | iface->clk = clk_get(&dev->dev, clock); | ||
300 | if (IS_ERR(iface->clk)) { | ||
301 | rc = PTR_ERR(iface->clk); | ||
302 | goto err_map; | ||
303 | } | ||
304 | rc = clk_enable(iface->clk); | ||
305 | if (rc < 0) | ||
306 | goto err_clk_put; | ||
307 | init_timer(&iface->timeout_timer); | ||
308 | iface->timeout_timer.function = s6i2c_timeout; | ||
309 | iface->timeout_timer.data = (unsigned long)iface; | ||
310 | |||
311 | p_adap = &iface->adap; | ||
312 | strlcpy(p_adap->name, dev->name, sizeof(p_adap->name)); | ||
313 | p_adap->algo = &s6i2c_algorithm; | ||
314 | p_adap->algo_data = iface; | ||
315 | p_adap->nr = bus_num; | ||
316 | p_adap->class = 0; | ||
317 | p_adap->dev.parent = &dev->dev; | ||
318 | i2c_wr16(iface, S6_I2C_INTRMASK, 0); | ||
319 | rc = request_irq(iface->irq, s6i2c_interrupt_entry, | ||
320 | IRQF_SHARED, dev->name, iface); | ||
321 | if (rc) { | ||
322 | dev_err(&p_adap->dev, "s6i2c: can't get IRQ %d\n", iface->irq); | ||
323 | goto err_clk_dis; | ||
324 | } | ||
325 | |||
326 | i2c_wr16(iface, S6_I2C_ENABLE, 0); | ||
327 | udelay(1); | ||
328 | i2c_wr32(iface, S6_I2C_SRESET, 1 << S6_I2C_SRESET_IC_SRST); | ||
329 | i2c_wr16(iface, S6_I2C_CLRTXABRT, 1); | ||
330 | i2c_wr16(iface, S6_I2C_CON, | ||
331 | (1 << S6_I2C_CON_MASTER) | | ||
332 | (S6_I2C_CON_SPEED_NORMAL << S6_I2C_CON_SPEED) | | ||
333 | (0 << S6_I2C_CON_10BITSLAVE) | | ||
334 | (0 << S6_I2C_CON_10BITMASTER) | | ||
335 | (1 << S6_I2C_CON_RESTARTENA) | | ||
336 | (1 << S6_I2C_CON_SLAVEDISABLE)); | ||
337 | i2c_wr16(iface, S6_I2C_SSHCNT, nanoseconds_on_clk(iface, 4000)); | ||
338 | i2c_wr16(iface, S6_I2C_SSLCNT, nanoseconds_on_clk(iface, 4700)); | ||
339 | i2c_wr16(iface, S6_I2C_FSHCNT, nanoseconds_on_clk(iface, 600)); | ||
340 | i2c_wr16(iface, S6_I2C_FSLCNT, nanoseconds_on_clk(iface, 1300)); | ||
341 | i2c_wr16(iface, S6_I2C_RXTL, 0); | ||
342 | i2c_wr16(iface, S6_I2C_TXTL, 0); | ||
343 | |||
344 | platform_set_drvdata(dev, iface); | ||
345 | rc = i2c_add_numbered_adapter(p_adap); | ||
346 | if (rc) | ||
347 | goto err_irq_free; | ||
348 | return 0; | ||
349 | |||
350 | err_irq_free: | ||
351 | free_irq(iface->irq, iface); | ||
352 | err_clk_dis: | ||
353 | clk_disable(iface->clk); | ||
354 | err_clk_put: | ||
355 | clk_put(iface->clk); | ||
356 | err_map: | ||
357 | iounmap(iface->reg); | ||
358 | err_reg: | ||
359 | release_mem_region(iface->res->start, | ||
360 | resource_size(iface->res)); | ||
361 | err_out: | ||
362 | return rc; | ||
363 | } | ||
364 | |||
365 | static int s6i2c_remove(struct platform_device *pdev) | ||
366 | { | ||
367 | struct s6i2c_if *iface = platform_get_drvdata(pdev); | ||
368 | i2c_wr16(iface, S6_I2C_ENABLE, 0); | ||
369 | i2c_del_adapter(&iface->adap); | ||
370 | free_irq(iface->irq, iface); | ||
371 | clk_disable(iface->clk); | ||
372 | clk_put(iface->clk); | ||
373 | iounmap(iface->reg); | ||
374 | release_mem_region(iface->res->start, | ||
375 | resource_size(iface->res)); | ||
376 | return 0; | ||
377 | } | ||
378 | |||
379 | static struct platform_driver s6i2c_driver = { | ||
380 | .probe = s6i2c_probe, | ||
381 | .remove = s6i2c_remove, | ||
382 | .driver = { | ||
383 | .name = DRV_NAME, | ||
384 | .owner = THIS_MODULE, | ||
385 | }, | ||
386 | }; | ||
387 | |||
388 | static int __init s6i2c_init(void) | ||
389 | { | ||
390 | pr_info("I2C: S6000 I2C driver\n"); | ||
391 | return platform_driver_register(&s6i2c_driver); | ||
392 | } | ||
393 | |||
394 | static void __exit s6i2c_exit(void) | ||
395 | { | ||
396 | platform_driver_unregister(&s6i2c_driver); | ||
397 | } | ||
398 | |||
399 | MODULE_DESCRIPTION("I2C-Bus adapter routines for S6000 I2C"); | ||
400 | MODULE_LICENSE("GPL"); | ||
401 | MODULE_ALIAS("platform:" DRV_NAME); | ||
402 | |||
403 | subsys_initcall(s6i2c_init); | ||
404 | module_exit(s6i2c_exit); | ||
diff --git a/drivers/i2c/busses/i2c-s6000.h b/drivers/i2c/busses/i2c-s6000.h deleted file mode 100644 index 4936f9f2256f..000000000000 --- a/drivers/i2c/busses/i2c-s6000.h +++ /dev/null | |||
@@ -1,79 +0,0 @@ | |||
1 | /* | ||
2 | * drivers/i2c/busses/i2c-s6000.h | ||
3 | * | ||
4 | * This file is subject to the terms and conditions of the GNU General Public | ||
5 | * License. See the file "COPYING" in the main directory of this archive | ||
6 | * for more details. | ||
7 | * | ||
8 | * Copyright (C) 2008 Emlix GmbH <info@emlix.com> | ||
9 | * Author: Oskar Schirmer <oskar@scara.com> | ||
10 | */ | ||
11 | |||
12 | #ifndef __DRIVERS_I2C_BUSSES_I2C_S6000_H | ||
13 | #define __DRIVERS_I2C_BUSSES_I2C_S6000_H | ||
14 | |||
15 | #define S6_I2C_CON 0x000 | ||
16 | #define S6_I2C_CON_MASTER 0 | ||
17 | #define S6_I2C_CON_SPEED 1 | ||
18 | #define S6_I2C_CON_SPEED_NORMAL 1 | ||
19 | #define S6_I2C_CON_SPEED_FAST 2 | ||
20 | #define S6_I2C_CON_SPEED_MASK 3 | ||
21 | #define S6_I2C_CON_10BITSLAVE 3 | ||
22 | #define S6_I2C_CON_10BITMASTER 4 | ||
23 | #define S6_I2C_CON_RESTARTENA 5 | ||
24 | #define S6_I2C_CON_SLAVEDISABLE 6 | ||
25 | #define S6_I2C_TAR 0x004 | ||
26 | #define S6_I2C_TAR_GCORSTART 10 | ||
27 | #define S6_I2C_TAR_SPECIAL 11 | ||
28 | #define S6_I2C_SAR 0x008 | ||
29 | #define S6_I2C_HSMADDR 0x00C | ||
30 | #define S6_I2C_DATACMD 0x010 | ||
31 | #define S6_I2C_DATACMD_READ 8 | ||
32 | #define S6_I2C_SSHCNT 0x014 | ||
33 | #define S6_I2C_SSLCNT 0x018 | ||
34 | #define S6_I2C_FSHCNT 0x01C | ||
35 | #define S6_I2C_FSLCNT 0x020 | ||
36 | #define S6_I2C_INTRSTAT 0x02C | ||
37 | #define S6_I2C_INTRMASK 0x030 | ||
38 | #define S6_I2C_RAWINTR 0x034 | ||
39 | #define S6_I2C_INTR_RXUNDER 0 | ||
40 | #define S6_I2C_INTR_RXOVER 1 | ||
41 | #define S6_I2C_INTR_RXFULL 2 | ||
42 | #define S6_I2C_INTR_TXOVER 3 | ||
43 | #define S6_I2C_INTR_TXEMPTY 4 | ||
44 | #define S6_I2C_INTR_RDREQ 5 | ||
45 | #define S6_I2C_INTR_TXABRT 6 | ||
46 | #define S6_I2C_INTR_RXDONE 7 | ||
47 | #define S6_I2C_INTR_ACTIVITY 8 | ||
48 | #define S6_I2C_INTR_STOPDET 9 | ||
49 | #define S6_I2C_INTR_STARTDET 10 | ||
50 | #define S6_I2C_INTR_GENCALL 11 | ||
51 | #define S6_I2C_RXTL 0x038 | ||
52 | #define S6_I2C_TXTL 0x03C | ||
53 | #define S6_I2C_CLRINTR 0x040 | ||
54 | #define S6_I2C_CLRRXUNDER 0x044 | ||
55 | #define S6_I2C_CLRRXOVER 0x048 | ||
56 | #define S6_I2C_CLRTXOVER 0x04C | ||
57 | #define S6_I2C_CLRRDREQ 0x050 | ||
58 | #define S6_I2C_CLRTXABRT 0x054 | ||
59 | #define S6_I2C_CLRRXDONE 0x058 | ||
60 | #define S6_I2C_CLRACTIVITY 0x05C | ||
61 | #define S6_I2C_CLRSTOPDET 0x060 | ||
62 | #define S6_I2C_CLRSTARTDET 0x064 | ||
63 | #define S6_I2C_CLRGENCALL 0x068 | ||
64 | #define S6_I2C_ENABLE 0x06C | ||
65 | #define S6_I2C_STATUS 0x070 | ||
66 | #define S6_I2C_STATUS_ACTIVITY 0 | ||
67 | #define S6_I2C_STATUS_TFNF 1 | ||
68 | #define S6_I2C_STATUS_TFE 2 | ||
69 | #define S6_I2C_STATUS_RFNE 3 | ||
70 | #define S6_I2C_STATUS_RFF 4 | ||
71 | #define S6_I2C_TXFLR 0x074 | ||
72 | #define S6_I2C_RXFLR 0x078 | ||
73 | #define S6_I2C_SRESET 0x07C | ||
74 | #define S6_I2C_SRESET_IC_SRST 0 | ||
75 | #define S6_I2C_SRESET_IC_MASTER_SRST 1 | ||
76 | #define S6_I2C_SRESET_IC_SLAVE_SRST 2 | ||
77 | #define S6_I2C_TXABRTSOURCE 0x080 | ||
78 | |||
79 | #endif | ||
diff --git a/drivers/i2c/busses/i2c-sirf.c b/drivers/i2c/busses/i2c-sirf.c index a3216defc1d3..b1336d5f0531 100644 --- a/drivers/i2c/busses/i2c-sirf.c +++ b/drivers/i2c/busses/i2c-sirf.c | |||
@@ -311,7 +311,7 @@ static int i2c_sirfsoc_probe(struct platform_device *pdev) | |||
311 | goto out; | 311 | goto out; |
312 | } | 312 | } |
313 | adap = &siic->adapter; | 313 | adap = &siic->adapter; |
314 | adap->class = I2C_CLASS_HWMON | I2C_CLASS_DEPRECATED; | 314 | adap->class = I2C_CLASS_DEPRECATED; |
315 | 315 | ||
316 | mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 316 | mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
317 | siic->base = devm_ioremap_resource(&pdev->dev, mem_res); | 317 | siic->base = devm_ioremap_resource(&pdev->dev, mem_res); |
diff --git a/drivers/i2c/busses/i2c-st.c b/drivers/i2c/busses/i2c-st.c index 95b947670386..2e4eccd6599a 100644 --- a/drivers/i2c/busses/i2c-st.c +++ b/drivers/i2c/busses/i2c-st.c | |||
@@ -206,25 +206,31 @@ static inline void st_i2c_clr_bits(void __iomem *reg, u32 mask) | |||
206 | writel_relaxed(readl_relaxed(reg) & ~mask, reg); | 206 | writel_relaxed(readl_relaxed(reg) & ~mask, reg); |
207 | } | 207 | } |
208 | 208 | ||
209 | /* From I2C Specifications v0.5 */ | 209 | /* |
210 | * From I2C Specifications v0.5. | ||
211 | * | ||
212 | * All the values below have +10% margin added to be | ||
213 | * compatible with some out-of-spec devices, | ||
214 | * like HDMI link of the Toshiba 19AV600 TV. | ||
215 | */ | ||
210 | static struct st_i2c_timings i2c_timings[] = { | 216 | static struct st_i2c_timings i2c_timings[] = { |
211 | [I2C_MODE_STANDARD] = { | 217 | [I2C_MODE_STANDARD] = { |
212 | .rate = 100000, | 218 | .rate = 100000, |
213 | .rep_start_hold = 4000, | 219 | .rep_start_hold = 4400, |
214 | .rep_start_setup = 4700, | 220 | .rep_start_setup = 5170, |
215 | .start_hold = 4000, | 221 | .start_hold = 4400, |
216 | .data_setup_time = 250, | 222 | .data_setup_time = 275, |
217 | .stop_setup_time = 4000, | 223 | .stop_setup_time = 4400, |
218 | .bus_free_time = 4700, | 224 | .bus_free_time = 5170, |
219 | }, | 225 | }, |
220 | [I2C_MODE_FAST] = { | 226 | [I2C_MODE_FAST] = { |
221 | .rate = 400000, | 227 | .rate = 400000, |
222 | .rep_start_hold = 600, | 228 | .rep_start_hold = 660, |
223 | .rep_start_setup = 600, | 229 | .rep_start_setup = 660, |
224 | .start_hold = 600, | 230 | .start_hold = 660, |
225 | .data_setup_time = 100, | 231 | .data_setup_time = 110, |
226 | .stop_setup_time = 600, | 232 | .stop_setup_time = 660, |
227 | .bus_free_time = 1300, | 233 | .bus_free_time = 1430, |
228 | }, | 234 | }, |
229 | }; | 235 | }; |
230 | 236 | ||
@@ -815,7 +821,7 @@ static int st_i2c_probe(struct platform_device *pdev) | |||
815 | 821 | ||
816 | adap = &i2c_dev->adap; | 822 | adap = &i2c_dev->adap; |
817 | i2c_set_adapdata(adap, i2c_dev); | 823 | i2c_set_adapdata(adap, i2c_dev); |
818 | snprintf(adap->name, sizeof(adap->name), "ST I2C(0x%x)", res->start); | 824 | snprintf(adap->name, sizeof(adap->name), "ST I2C(0x%pa)", &res->start); |
819 | adap->owner = THIS_MODULE; | 825 | adap->owner = THIS_MODULE; |
820 | adap->timeout = 2 * HZ; | 826 | adap->timeout = 2 * HZ; |
821 | adap->retries = 0; | 827 | adap->retries = 0; |
diff --git a/drivers/i2c/busses/i2c-stu300.c b/drivers/i2c/busses/i2c-stu300.c index fefb1c19ec1d..6a44f37798c8 100644 --- a/drivers/i2c/busses/i2c-stu300.c +++ b/drivers/i2c/busses/i2c-stu300.c | |||
@@ -909,7 +909,7 @@ static int stu300_probe(struct platform_device *pdev) | |||
909 | adap = &dev->adapter; | 909 | adap = &dev->adapter; |
910 | adap->owner = THIS_MODULE; | 910 | adap->owner = THIS_MODULE; |
911 | /* DDC class but actually often used for more generic I2C */ | 911 | /* DDC class but actually often used for more generic I2C */ |
912 | adap->class = I2C_CLASS_DDC | I2C_CLASS_DEPRECATED; | 912 | adap->class = I2C_CLASS_DEPRECATED; |
913 | strlcpy(adap->name, "ST Microelectronics DDC I2C adapter", | 913 | strlcpy(adap->name, "ST Microelectronics DDC I2C adapter", |
914 | sizeof(adap->name)); | 914 | sizeof(adap->name)); |
915 | adap->nr = bus_nr; | 915 | adap->nr = bus_nr; |
diff --git a/drivers/i2c/busses/i2c-taos-evm.c b/drivers/i2c/busses/i2c-taos-evm.c index 057602683553..10855a0b7e7f 100644 --- a/drivers/i2c/busses/i2c-taos-evm.c +++ b/drivers/i2c/busses/i2c-taos-evm.c | |||
@@ -311,19 +311,8 @@ static struct serio_driver taos_drv = { | |||
311 | .interrupt = taos_interrupt, | 311 | .interrupt = taos_interrupt, |
312 | }; | 312 | }; |
313 | 313 | ||
314 | static int __init taos_init(void) | 314 | module_serio_driver(taos_drv); |
315 | { | ||
316 | return serio_register_driver(&taos_drv); | ||
317 | } | ||
318 | |||
319 | static void __exit taos_exit(void) | ||
320 | { | ||
321 | serio_unregister_driver(&taos_drv); | ||
322 | } | ||
323 | 315 | ||
324 | MODULE_AUTHOR("Jean Delvare <jdelvare@suse.de>"); | 316 | MODULE_AUTHOR("Jean Delvare <jdelvare@suse.de>"); |
325 | MODULE_DESCRIPTION("TAOS evaluation module driver"); | 317 | MODULE_DESCRIPTION("TAOS evaluation module driver"); |
326 | MODULE_LICENSE("GPL"); | 318 | MODULE_LICENSE("GPL"); |
327 | |||
328 | module_init(taos_init); | ||
329 | module_exit(taos_exit); | ||
diff --git a/drivers/i2c/busses/i2c-tegra.c b/drivers/i2c/busses/i2c-tegra.c index f1bb2fc06791..87d0371cebb7 100644 --- a/drivers/i2c/busses/i2c-tegra.c +++ b/drivers/i2c/busses/i2c-tegra.c | |||
@@ -792,7 +792,7 @@ static int tegra_i2c_probe(struct platform_device *pdev) | |||
792 | 792 | ||
793 | i2c_set_adapdata(&i2c_dev->adapter, i2c_dev); | 793 | i2c_set_adapdata(&i2c_dev->adapter, i2c_dev); |
794 | i2c_dev->adapter.owner = THIS_MODULE; | 794 | i2c_dev->adapter.owner = THIS_MODULE; |
795 | i2c_dev->adapter.class = I2C_CLASS_HWMON | I2C_CLASS_DEPRECATED; | 795 | i2c_dev->adapter.class = I2C_CLASS_DEPRECATED; |
796 | strlcpy(i2c_dev->adapter.name, "Tegra I2C adapter", | 796 | strlcpy(i2c_dev->adapter.name, "Tegra I2C adapter", |
797 | sizeof(i2c_dev->adapter.name)); | 797 | sizeof(i2c_dev->adapter.name)); |
798 | i2c_dev->adapter.algo = &tegra_i2c_algo; | 798 | i2c_dev->adapter.algo = &tegra_i2c_algo; |
diff --git a/drivers/i2c/busses/i2c-xiic.c b/drivers/i2c/busses/i2c-xiic.c index 7731f1795869..ade9223912d3 100644 --- a/drivers/i2c/busses/i2c-xiic.c +++ b/drivers/i2c/busses/i2c-xiic.c | |||
@@ -677,15 +677,15 @@ static u32 xiic_func(struct i2c_adapter *adap) | |||
677 | } | 677 | } |
678 | 678 | ||
679 | static const struct i2c_algorithm xiic_algorithm = { | 679 | static const struct i2c_algorithm xiic_algorithm = { |
680 | .master_xfer = xiic_xfer, | 680 | .master_xfer = xiic_xfer, |
681 | .functionality = xiic_func, | 681 | .functionality = xiic_func, |
682 | }; | 682 | }; |
683 | 683 | ||
684 | static struct i2c_adapter xiic_adapter = { | 684 | static struct i2c_adapter xiic_adapter = { |
685 | .owner = THIS_MODULE, | 685 | .owner = THIS_MODULE, |
686 | .name = DRIVER_NAME, | 686 | .name = DRIVER_NAME, |
687 | .class = I2C_CLASS_HWMON | I2C_CLASS_SPD | I2C_CLASS_DEPRECATED, | 687 | .class = I2C_CLASS_DEPRECATED, |
688 | .algo = &xiic_algorithm, | 688 | .algo = &xiic_algorithm, |
689 | }; | 689 | }; |
690 | 690 | ||
691 | 691 | ||
diff --git a/drivers/i2c/busses/scx200_i2c.c b/drivers/i2c/busses/scx200_i2c.c deleted file mode 100644 index 8eadf0f47ad7..000000000000 --- a/drivers/i2c/busses/scx200_i2c.c +++ /dev/null | |||
@@ -1,129 +0,0 @@ | |||
1 | /* linux/drivers/i2c/busses/scx200_i2c.c | ||
2 | |||
3 | Copyright (c) 2001,2002 Christer Weinigel <wingel@nano-system.com> | ||
4 | |||
5 | National Semiconductor SCx200 I2C bus on GPIO pins | ||
6 | |||
7 | Based on i2c-velleman.c Copyright (C) 1995-96, 2000 Simon G. Vogl | ||
8 | |||
9 | This program is free software; you can redistribute it and/or modify | ||
10 | it under the terms of the GNU General Public License as published by | ||
11 | the Free Software Foundation; either version 2 of the License, or | ||
12 | (at your option) any later version. | ||
13 | |||
14 | This program is distributed in the hope that it will be useful, | ||
15 | but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
16 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
17 | GNU General Public License for more details. | ||
18 | |||
19 | You should have received a copy of the GNU General Public License | ||
20 | along with this program; if not, write to the Free Software | ||
21 | Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | ||
22 | */ | ||
23 | |||
24 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
25 | |||
26 | #include <linux/module.h> | ||
27 | #include <linux/errno.h> | ||
28 | #include <linux/kernel.h> | ||
29 | #include <linux/i2c.h> | ||
30 | #include <linux/i2c-algo-bit.h> | ||
31 | #include <linux/io.h> | ||
32 | |||
33 | #include <linux/scx200_gpio.h> | ||
34 | |||
35 | MODULE_AUTHOR("Christer Weinigel <wingel@nano-system.com>"); | ||
36 | MODULE_DESCRIPTION("NatSemi SCx200 I2C Driver"); | ||
37 | MODULE_LICENSE("GPL"); | ||
38 | |||
39 | static int scl = CONFIG_SCx200_I2C_SCL; | ||
40 | static int sda = CONFIG_SCx200_I2C_SDA; | ||
41 | |||
42 | module_param(scl, int, 0); | ||
43 | MODULE_PARM_DESC(scl, "GPIO line for SCL"); | ||
44 | module_param(sda, int, 0); | ||
45 | MODULE_PARM_DESC(sda, "GPIO line for SDA"); | ||
46 | |||
47 | static void scx200_i2c_setscl(void *data, int state) | ||
48 | { | ||
49 | scx200_gpio_set(scl, state); | ||
50 | } | ||
51 | |||
52 | static void scx200_i2c_setsda(void *data, int state) | ||
53 | { | ||
54 | scx200_gpio_set(sda, state); | ||
55 | } | ||
56 | |||
57 | static int scx200_i2c_getscl(void *data) | ||
58 | { | ||
59 | return scx200_gpio_get(scl); | ||
60 | } | ||
61 | |||
62 | static int scx200_i2c_getsda(void *data) | ||
63 | { | ||
64 | return scx200_gpio_get(sda); | ||
65 | } | ||
66 | |||
67 | /* ------------------------------------------------------------------------ | ||
68 | * Encapsulate the above functions in the correct operations structure. | ||
69 | * This is only done when more than one hardware adapter is supported. | ||
70 | */ | ||
71 | |||
72 | static struct i2c_algo_bit_data scx200_i2c_data = { | ||
73 | .setsda = scx200_i2c_setsda, | ||
74 | .setscl = scx200_i2c_setscl, | ||
75 | .getsda = scx200_i2c_getsda, | ||
76 | .getscl = scx200_i2c_getscl, | ||
77 | .udelay = 10, | ||
78 | .timeout = HZ, | ||
79 | }; | ||
80 | |||
81 | static struct i2c_adapter scx200_i2c_ops = { | ||
82 | .owner = THIS_MODULE, | ||
83 | .class = I2C_CLASS_HWMON | I2C_CLASS_SPD, | ||
84 | .algo_data = &scx200_i2c_data, | ||
85 | .name = "NatSemi SCx200 I2C", | ||
86 | }; | ||
87 | |||
88 | static int scx200_i2c_init(void) | ||
89 | { | ||
90 | pr_debug("NatSemi SCx200 I2C Driver\n"); | ||
91 | |||
92 | if (!scx200_gpio_present()) { | ||
93 | pr_err("no SCx200 gpio pins available\n"); | ||
94 | return -ENODEV; | ||
95 | } | ||
96 | |||
97 | pr_debug("SCL=GPIO%02u, SDA=GPIO%02u\n", scl, sda); | ||
98 | |||
99 | if (scl == -1 || sda == -1 || scl == sda) { | ||
100 | pr_err("scl and sda must be specified\n"); | ||
101 | return -EINVAL; | ||
102 | } | ||
103 | |||
104 | /* Configure GPIOs as open collector outputs */ | ||
105 | scx200_gpio_configure(scl, ~2, 5); | ||
106 | scx200_gpio_configure(sda, ~2, 5); | ||
107 | |||
108 | if (i2c_bit_add_bus(&scx200_i2c_ops) < 0) { | ||
109 | pr_err("adapter %s registration failed\n", scx200_i2c_ops.name); | ||
110 | return -ENODEV; | ||
111 | } | ||
112 | |||
113 | return 0; | ||
114 | } | ||
115 | |||
116 | static void scx200_i2c_cleanup(void) | ||
117 | { | ||
118 | i2c_del_adapter(&scx200_i2c_ops); | ||
119 | } | ||
120 | |||
121 | module_init(scx200_i2c_init); | ||
122 | module_exit(scx200_i2c_cleanup); | ||
123 | |||
124 | /* | ||
125 | Local variables: | ||
126 | compile-command: "make -k -C ../.. SUBDIRS=drivers/i2c modules" | ||
127 | c-basic-offset: 8 | ||
128 | End: | ||
129 | */ | ||
diff --git a/drivers/i2c/i2c-acpi.c b/drivers/i2c/i2c-acpi.c new file mode 100644 index 000000000000..e8b61967334b --- /dev/null +++ b/drivers/i2c/i2c-acpi.c | |||
@@ -0,0 +1,362 @@ | |||
1 | /* | ||
2 | * I2C ACPI code | ||
3 | * | ||
4 | * Copyright (C) 2014 Intel Corp | ||
5 | * | ||
6 | * Author: Lan Tianyu <tianyu.lan@intel.com> | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License version 2 as | ||
10 | * published by the Free Software Foundation. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, but | ||
13 | * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY | ||
14 | * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | ||
15 | * for more details. | ||
16 | */ | ||
17 | #define pr_fmt(fmt) "I2C/ACPI : " fmt | ||
18 | |||
19 | #include <linux/kernel.h> | ||
20 | #include <linux/errno.h> | ||
21 | #include <linux/err.h> | ||
22 | #include <linux/i2c.h> | ||
23 | #include <linux/acpi.h> | ||
24 | |||
25 | struct acpi_i2c_handler_data { | ||
26 | struct acpi_connection_info info; | ||
27 | struct i2c_adapter *adapter; | ||
28 | }; | ||
29 | |||
30 | struct gsb_buffer { | ||
31 | u8 status; | ||
32 | u8 len; | ||
33 | union { | ||
34 | u16 wdata; | ||
35 | u8 bdata; | ||
36 | u8 data[0]; | ||
37 | }; | ||
38 | } __packed; | ||
39 | |||
40 | static int acpi_i2c_add_resource(struct acpi_resource *ares, void *data) | ||
41 | { | ||
42 | struct i2c_board_info *info = data; | ||
43 | |||
44 | if (ares->type == ACPI_RESOURCE_TYPE_SERIAL_BUS) { | ||
45 | struct acpi_resource_i2c_serialbus *sb; | ||
46 | |||
47 | sb = &ares->data.i2c_serial_bus; | ||
48 | if (sb->type == ACPI_RESOURCE_SERIAL_TYPE_I2C) { | ||
49 | info->addr = sb->slave_address; | ||
50 | if (sb->access_mode == ACPI_I2C_10BIT_MODE) | ||
51 | info->flags |= I2C_CLIENT_TEN; | ||
52 | } | ||
53 | } else if (info->irq < 0) { | ||
54 | struct resource r; | ||
55 | |||
56 | if (acpi_dev_resource_interrupt(ares, 0, &r)) | ||
57 | info->irq = r.start; | ||
58 | } | ||
59 | |||
60 | /* Tell the ACPI core to skip this resource */ | ||
61 | return 1; | ||
62 | } | ||
63 | |||
64 | static acpi_status acpi_i2c_add_device(acpi_handle handle, u32 level, | ||
65 | void *data, void **return_value) | ||
66 | { | ||
67 | struct i2c_adapter *adapter = data; | ||
68 | struct list_head resource_list; | ||
69 | struct i2c_board_info info; | ||
70 | struct acpi_device *adev; | ||
71 | int ret; | ||
72 | |||
73 | if (acpi_bus_get_device(handle, &adev)) | ||
74 | return AE_OK; | ||
75 | if (acpi_bus_get_status(adev) || !adev->status.present) | ||
76 | return AE_OK; | ||
77 | |||
78 | memset(&info, 0, sizeof(info)); | ||
79 | info.acpi_node.companion = adev; | ||
80 | info.irq = -1; | ||
81 | |||
82 | INIT_LIST_HEAD(&resource_list); | ||
83 | ret = acpi_dev_get_resources(adev, &resource_list, | ||
84 | acpi_i2c_add_resource, &info); | ||
85 | acpi_dev_free_resource_list(&resource_list); | ||
86 | |||
87 | if (ret < 0 || !info.addr) | ||
88 | return AE_OK; | ||
89 | |||
90 | adev->power.flags.ignore_parent = true; | ||
91 | strlcpy(info.type, dev_name(&adev->dev), sizeof(info.type)); | ||
92 | if (!i2c_new_device(adapter, &info)) { | ||
93 | adev->power.flags.ignore_parent = false; | ||
94 | dev_err(&adapter->dev, | ||
95 | "failed to add I2C device %s from ACPI\n", | ||
96 | dev_name(&adev->dev)); | ||
97 | } | ||
98 | |||
99 | return AE_OK; | ||
100 | } | ||
101 | |||
102 | /** | ||
103 | * acpi_i2c_register_devices - enumerate I2C slave devices behind adapter | ||
104 | * @adap: pointer to adapter | ||
105 | * | ||
106 | * Enumerate all I2C slave devices behind this adapter by walking the ACPI | ||
107 | * namespace. When a device is found it will be added to the Linux device | ||
108 | * model and bound to the corresponding ACPI handle. | ||
109 | */ | ||
110 | void acpi_i2c_register_devices(struct i2c_adapter *adap) | ||
111 | { | ||
112 | acpi_handle handle; | ||
113 | acpi_status status; | ||
114 | |||
115 | if (!adap->dev.parent) | ||
116 | return; | ||
117 | |||
118 | handle = ACPI_HANDLE(adap->dev.parent); | ||
119 | if (!handle) | ||
120 | return; | ||
121 | |||
122 | status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1, | ||
123 | acpi_i2c_add_device, NULL, | ||
124 | adap, NULL); | ||
125 | if (ACPI_FAILURE(status)) | ||
126 | dev_warn(&adap->dev, "failed to enumerate I2C slaves\n"); | ||
127 | } | ||
128 | |||
129 | static int acpi_gsb_i2c_read_bytes(struct i2c_client *client, | ||
130 | u8 cmd, u8 *data, u8 data_len) | ||
131 | { | ||
132 | |||
133 | struct i2c_msg msgs[2]; | ||
134 | int ret; | ||
135 | u8 *buffer; | ||
136 | |||
137 | buffer = kzalloc(data_len, GFP_KERNEL); | ||
138 | if (!buffer) | ||
139 | return AE_NO_MEMORY; | ||
140 | |||
141 | msgs[0].addr = client->addr; | ||
142 | msgs[0].flags = client->flags; | ||
143 | msgs[0].len = 1; | ||
144 | msgs[0].buf = &cmd; | ||
145 | |||
146 | msgs[1].addr = client->addr; | ||
147 | msgs[1].flags = client->flags | I2C_M_RD; | ||
148 | msgs[1].len = data_len; | ||
149 | msgs[1].buf = buffer; | ||
150 | |||
151 | ret = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs)); | ||
152 | if (ret < 0) | ||
153 | dev_err(&client->adapter->dev, "i2c read failed\n"); | ||
154 | else | ||
155 | memcpy(data, buffer, data_len); | ||
156 | |||
157 | kfree(buffer); | ||
158 | return ret; | ||
159 | } | ||
160 | |||
161 | static int acpi_gsb_i2c_write_bytes(struct i2c_client *client, | ||
162 | u8 cmd, u8 *data, u8 data_len) | ||
163 | { | ||
164 | |||
165 | struct i2c_msg msgs[1]; | ||
166 | u8 *buffer; | ||
167 | int ret = AE_OK; | ||
168 | |||
169 | buffer = kzalloc(data_len + 1, GFP_KERNEL); | ||
170 | if (!buffer) | ||
171 | return AE_NO_MEMORY; | ||
172 | |||
173 | buffer[0] = cmd; | ||
174 | memcpy(buffer + 1, data, data_len); | ||
175 | |||
176 | msgs[0].addr = client->addr; | ||
177 | msgs[0].flags = client->flags; | ||
178 | msgs[0].len = data_len + 1; | ||
179 | msgs[0].buf = buffer; | ||
180 | |||
181 | ret = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs)); | ||
182 | if (ret < 0) | ||
183 | dev_err(&client->adapter->dev, "i2c write failed\n"); | ||
184 | |||
185 | kfree(buffer); | ||
186 | return ret; | ||
187 | } | ||
188 | |||
189 | static acpi_status | ||
190 | acpi_i2c_space_handler(u32 function, acpi_physical_address command, | ||
191 | u32 bits, u64 *value64, | ||
192 | void *handler_context, void *region_context) | ||
193 | { | ||
194 | struct gsb_buffer *gsb = (struct gsb_buffer *)value64; | ||
195 | struct acpi_i2c_handler_data *data = handler_context; | ||
196 | struct acpi_connection_info *info = &data->info; | ||
197 | struct acpi_resource_i2c_serialbus *sb; | ||
198 | struct i2c_adapter *adapter = data->adapter; | ||
199 | struct i2c_client client; | ||
200 | struct acpi_resource *ares; | ||
201 | u32 accessor_type = function >> 16; | ||
202 | u8 action = function & ACPI_IO_MASK; | ||
203 | acpi_status ret = AE_OK; | ||
204 | int status; | ||
205 | |||
206 | ret = acpi_buffer_to_resource(info->connection, info->length, &ares); | ||
207 | if (ACPI_FAILURE(ret)) | ||
208 | return ret; | ||
209 | |||
210 | if (!value64 || ares->type != ACPI_RESOURCE_TYPE_SERIAL_BUS) { | ||
211 | ret = AE_BAD_PARAMETER; | ||
212 | goto err; | ||
213 | } | ||
214 | |||
215 | sb = &ares->data.i2c_serial_bus; | ||
216 | if (sb->type != ACPI_RESOURCE_SERIAL_TYPE_I2C) { | ||
217 | ret = AE_BAD_PARAMETER; | ||
218 | goto err; | ||
219 | } | ||
220 | |||
221 | memset(&client, 0, sizeof(client)); | ||
222 | client.adapter = adapter; | ||
223 | client.addr = sb->slave_address; | ||
224 | client.flags = 0; | ||
225 | |||
226 | if (sb->access_mode == ACPI_I2C_10BIT_MODE) | ||
227 | client.flags |= I2C_CLIENT_TEN; | ||
228 | |||
229 | switch (accessor_type) { | ||
230 | case ACPI_GSB_ACCESS_ATTRIB_SEND_RCV: | ||
231 | if (action == ACPI_READ) { | ||
232 | status = i2c_smbus_read_byte(&client); | ||
233 | if (status >= 0) { | ||
234 | gsb->bdata = status; | ||
235 | status = 0; | ||
236 | } | ||
237 | } else { | ||
238 | status = i2c_smbus_write_byte(&client, gsb->bdata); | ||
239 | } | ||
240 | break; | ||
241 | |||
242 | case ACPI_GSB_ACCESS_ATTRIB_BYTE: | ||
243 | if (action == ACPI_READ) { | ||
244 | status = i2c_smbus_read_byte_data(&client, command); | ||
245 | if (status >= 0) { | ||
246 | gsb->bdata = status; | ||
247 | status = 0; | ||
248 | } | ||
249 | } else { | ||
250 | status = i2c_smbus_write_byte_data(&client, command, | ||
251 | gsb->bdata); | ||
252 | } | ||
253 | break; | ||
254 | |||
255 | case ACPI_GSB_ACCESS_ATTRIB_WORD: | ||
256 | if (action == ACPI_READ) { | ||
257 | status = i2c_smbus_read_word_data(&client, command); | ||
258 | if (status >= 0) { | ||
259 | gsb->wdata = status; | ||
260 | status = 0; | ||
261 | } | ||
262 | } else { | ||
263 | status = i2c_smbus_write_word_data(&client, command, | ||
264 | gsb->wdata); | ||
265 | } | ||
266 | break; | ||
267 | |||
268 | case ACPI_GSB_ACCESS_ATTRIB_BLOCK: | ||
269 | if (action == ACPI_READ) { | ||
270 | status = i2c_smbus_read_block_data(&client, command, | ||
271 | gsb->data); | ||
272 | if (status >= 0) { | ||
273 | gsb->len = status; | ||
274 | status = 0; | ||
275 | } | ||
276 | } else { | ||
277 | status = i2c_smbus_write_block_data(&client, command, | ||
278 | gsb->len, gsb->data); | ||
279 | } | ||
280 | break; | ||
281 | |||
282 | case ACPI_GSB_ACCESS_ATTRIB_MULTIBYTE: | ||
283 | if (action == ACPI_READ) { | ||
284 | status = acpi_gsb_i2c_read_bytes(&client, command, | ||
285 | gsb->data, info->access_length); | ||
286 | if (status > 0) | ||
287 | status = 0; | ||
288 | } else { | ||
289 | status = acpi_gsb_i2c_write_bytes(&client, command, | ||
290 | gsb->data, info->access_length); | ||
291 | } | ||
292 | break; | ||
293 | |||
294 | default: | ||
295 | pr_info("protocol(0x%02x) is not supported.\n", accessor_type); | ||
296 | ret = AE_BAD_PARAMETER; | ||
297 | goto err; | ||
298 | } | ||
299 | |||
300 | gsb->status = status; | ||
301 | |||
302 | err: | ||
303 | ACPI_FREE(ares); | ||
304 | return ret; | ||
305 | } | ||
306 | |||
307 | |||
308 | int acpi_i2c_install_space_handler(struct i2c_adapter *adapter) | ||
309 | { | ||
310 | acpi_handle handle = ACPI_HANDLE(adapter->dev.parent); | ||
311 | struct acpi_i2c_handler_data *data; | ||
312 | acpi_status status; | ||
313 | |||
314 | if (!handle) | ||
315 | return -ENODEV; | ||
316 | |||
317 | data = kzalloc(sizeof(struct acpi_i2c_handler_data), | ||
318 | GFP_KERNEL); | ||
319 | if (!data) | ||
320 | return -ENOMEM; | ||
321 | |||
322 | data->adapter = adapter; | ||
323 | status = acpi_bus_attach_private_data(handle, (void *)data); | ||
324 | if (ACPI_FAILURE(status)) { | ||
325 | kfree(data); | ||
326 | return -ENOMEM; | ||
327 | } | ||
328 | |||
329 | status = acpi_install_address_space_handler(handle, | ||
330 | ACPI_ADR_SPACE_GSBUS, | ||
331 | &acpi_i2c_space_handler, | ||
332 | NULL, | ||
333 | data); | ||
334 | if (ACPI_FAILURE(status)) { | ||
335 | dev_err(&adapter->dev, "Error installing i2c space handler\n"); | ||
336 | acpi_bus_detach_private_data(handle); | ||
337 | kfree(data); | ||
338 | return -ENOMEM; | ||
339 | } | ||
340 | |||
341 | return 0; | ||
342 | } | ||
343 | |||
344 | void acpi_i2c_remove_space_handler(struct i2c_adapter *adapter) | ||
345 | { | ||
346 | acpi_handle handle = ACPI_HANDLE(adapter->dev.parent); | ||
347 | struct acpi_i2c_handler_data *data; | ||
348 | acpi_status status; | ||
349 | |||
350 | if (!handle) | ||
351 | return; | ||
352 | |||
353 | acpi_remove_address_space_handler(handle, | ||
354 | ACPI_ADR_SPACE_GSBUS, | ||
355 | &acpi_i2c_space_handler); | ||
356 | |||
357 | status = acpi_bus_get_private_data(handle, (void **)&data); | ||
358 | if (ACPI_SUCCESS(status)) | ||
359 | kfree(data); | ||
360 | |||
361 | acpi_bus_detach_private_data(handle); | ||
362 | } | ||
diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c index 66aa83b99383..632057a44615 100644 --- a/drivers/i2c/i2c-core.c +++ b/drivers/i2c/i2c-core.c | |||
@@ -1097,101 +1097,6 @@ EXPORT_SYMBOL(of_find_i2c_adapter_by_node); | |||
1097 | static void of_i2c_register_devices(struct i2c_adapter *adap) { } | 1097 | static void of_i2c_register_devices(struct i2c_adapter *adap) { } |
1098 | #endif /* CONFIG_OF */ | 1098 | #endif /* CONFIG_OF */ |
1099 | 1099 | ||
1100 | /* ACPI support code */ | ||
1101 | |||
1102 | #if IS_ENABLED(CONFIG_ACPI) | ||
1103 | static int acpi_i2c_add_resource(struct acpi_resource *ares, void *data) | ||
1104 | { | ||
1105 | struct i2c_board_info *info = data; | ||
1106 | |||
1107 | if (ares->type == ACPI_RESOURCE_TYPE_SERIAL_BUS) { | ||
1108 | struct acpi_resource_i2c_serialbus *sb; | ||
1109 | |||
1110 | sb = &ares->data.i2c_serial_bus; | ||
1111 | if (sb->type == ACPI_RESOURCE_SERIAL_TYPE_I2C) { | ||
1112 | info->addr = sb->slave_address; | ||
1113 | if (sb->access_mode == ACPI_I2C_10BIT_MODE) | ||
1114 | info->flags |= I2C_CLIENT_TEN; | ||
1115 | } | ||
1116 | } else if (info->irq < 0) { | ||
1117 | struct resource r; | ||
1118 | |||
1119 | if (acpi_dev_resource_interrupt(ares, 0, &r)) | ||
1120 | info->irq = r.start; | ||
1121 | } | ||
1122 | |||
1123 | /* Tell the ACPI core to skip this resource */ | ||
1124 | return 1; | ||
1125 | } | ||
1126 | |||
1127 | static acpi_status acpi_i2c_add_device(acpi_handle handle, u32 level, | ||
1128 | void *data, void **return_value) | ||
1129 | { | ||
1130 | struct i2c_adapter *adapter = data; | ||
1131 | struct list_head resource_list; | ||
1132 | struct i2c_board_info info; | ||
1133 | struct acpi_device *adev; | ||
1134 | int ret; | ||
1135 | |||
1136 | if (acpi_bus_get_device(handle, &adev)) | ||
1137 | return AE_OK; | ||
1138 | if (acpi_bus_get_status(adev) || !adev->status.present) | ||
1139 | return AE_OK; | ||
1140 | |||
1141 | memset(&info, 0, sizeof(info)); | ||
1142 | info.acpi_node.companion = adev; | ||
1143 | info.irq = -1; | ||
1144 | |||
1145 | INIT_LIST_HEAD(&resource_list); | ||
1146 | ret = acpi_dev_get_resources(adev, &resource_list, | ||
1147 | acpi_i2c_add_resource, &info); | ||
1148 | acpi_dev_free_resource_list(&resource_list); | ||
1149 | |||
1150 | if (ret < 0 || !info.addr) | ||
1151 | return AE_OK; | ||
1152 | |||
1153 | adev->power.flags.ignore_parent = true; | ||
1154 | strlcpy(info.type, dev_name(&adev->dev), sizeof(info.type)); | ||
1155 | if (!i2c_new_device(adapter, &info)) { | ||
1156 | adev->power.flags.ignore_parent = false; | ||
1157 | dev_err(&adapter->dev, | ||
1158 | "failed to add I2C device %s from ACPI\n", | ||
1159 | dev_name(&adev->dev)); | ||
1160 | } | ||
1161 | |||
1162 | return AE_OK; | ||
1163 | } | ||
1164 | |||
1165 | /** | ||
1166 | * acpi_i2c_register_devices - enumerate I2C slave devices behind adapter | ||
1167 | * @adap: pointer to adapter | ||
1168 | * | ||
1169 | * Enumerate all I2C slave devices behind this adapter by walking the ACPI | ||
1170 | * namespace. When a device is found it will be added to the Linux device | ||
1171 | * model and bound to the corresponding ACPI handle. | ||
1172 | */ | ||
1173 | static void acpi_i2c_register_devices(struct i2c_adapter *adap) | ||
1174 | { | ||
1175 | acpi_handle handle; | ||
1176 | acpi_status status; | ||
1177 | |||
1178 | if (!adap->dev.parent) | ||
1179 | return; | ||
1180 | |||
1181 | handle = ACPI_HANDLE(adap->dev.parent); | ||
1182 | if (!handle) | ||
1183 | return; | ||
1184 | |||
1185 | status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1, | ||
1186 | acpi_i2c_add_device, NULL, | ||
1187 | adap, NULL); | ||
1188 | if (ACPI_FAILURE(status)) | ||
1189 | dev_warn(&adap->dev, "failed to enumerate I2C slaves\n"); | ||
1190 | } | ||
1191 | #else | ||
1192 | static inline void acpi_i2c_register_devices(struct i2c_adapter *adap) {} | ||
1193 | #endif /* CONFIG_ACPI */ | ||
1194 | |||
1195 | static int i2c_do_add_adapter(struct i2c_driver *driver, | 1100 | static int i2c_do_add_adapter(struct i2c_driver *driver, |
1196 | struct i2c_adapter *adap) | 1101 | struct i2c_adapter *adap) |
1197 | { | 1102 | { |
@@ -1298,6 +1203,7 @@ exit_recovery: | |||
1298 | /* create pre-declared device nodes */ | 1203 | /* create pre-declared device nodes */ |
1299 | of_i2c_register_devices(adap); | 1204 | of_i2c_register_devices(adap); |
1300 | acpi_i2c_register_devices(adap); | 1205 | acpi_i2c_register_devices(adap); |
1206 | acpi_i2c_install_space_handler(adap); | ||
1301 | 1207 | ||
1302 | if (adap->nr < __i2c_first_dynamic_bus_num) | 1208 | if (adap->nr < __i2c_first_dynamic_bus_num) |
1303 | i2c_scan_static_board_info(adap); | 1209 | i2c_scan_static_board_info(adap); |
@@ -1471,6 +1377,7 @@ void i2c_del_adapter(struct i2c_adapter *adap) | |||
1471 | return; | 1377 | return; |
1472 | } | 1378 | } |
1473 | 1379 | ||
1380 | acpi_i2c_remove_space_handler(adap); | ||
1474 | /* Tell drivers about this removal */ | 1381 | /* Tell drivers about this removal */ |
1475 | mutex_lock(&core_lock); | 1382 | mutex_lock(&core_lock); |
1476 | bus_for_each_drv(&i2c_bus_type, NULL, adap, | 1383 | bus_for_each_drv(&i2c_bus_type, NULL, adap, |
@@ -2013,6 +1920,16 @@ static int i2c_detect(struct i2c_adapter *adapter, struct i2c_driver *driver) | |||
2013 | if (!driver->detect || !address_list) | 1920 | if (!driver->detect || !address_list) |
2014 | return 0; | 1921 | return 0; |
2015 | 1922 | ||
1923 | /* Warn that the adapter lost class based instantiation */ | ||
1924 | if (adapter->class == I2C_CLASS_DEPRECATED) { | ||
1925 | dev_dbg(&adapter->dev, | ||
1926 | "This adapter dropped support for I2C classes and " | ||
1927 | "won't auto-detect %s devices anymore. If you need it, check " | ||
1928 | "'Documentation/i2c/instantiating-devices' for alternatives.\n", | ||
1929 | driver->driver.name); | ||
1930 | return 0; | ||
1931 | } | ||
1932 | |||
2016 | /* Stop here if the classes do not match */ | 1933 | /* Stop here if the classes do not match */ |
2017 | if (!(adapter->class & driver->class)) | 1934 | if (!(adapter->class & driver->class)) |
2018 | return 0; | 1935 | return 0; |
diff --git a/drivers/i2c/i2c-stub.c b/drivers/i2c/i2c-stub.c index 77e4849d2f2a..d241aa295d96 100644 --- a/drivers/i2c/i2c-stub.c +++ b/drivers/i2c/i2c-stub.c | |||
@@ -2,7 +2,7 @@ | |||
2 | i2c-stub.c - I2C/SMBus chip emulator | 2 | i2c-stub.c - I2C/SMBus chip emulator |
3 | 3 | ||
4 | Copyright (c) 2004 Mark M. Hoffman <mhoffman@lightlink.com> | 4 | Copyright (c) 2004 Mark M. Hoffman <mhoffman@lightlink.com> |
5 | Copyright (C) 2007, 2012 Jean Delvare <jdelvare@suse.de> | 5 | Copyright (C) 2007-2014 Jean Delvare <jdelvare@suse.de> |
6 | 6 | ||
7 | This program is free software; you can redistribute it and/or modify | 7 | This program is free software; you can redistribute it and/or modify |
8 | it under the terms of the GNU General Public License as published by | 8 | it under the terms of the GNU General Public License as published by |
@@ -27,28 +27,109 @@ | |||
27 | #include <linux/slab.h> | 27 | #include <linux/slab.h> |
28 | #include <linux/errno.h> | 28 | #include <linux/errno.h> |
29 | #include <linux/i2c.h> | 29 | #include <linux/i2c.h> |
30 | #include <linux/list.h> | ||
30 | 31 | ||
31 | #define MAX_CHIPS 10 | 32 | #define MAX_CHIPS 10 |
32 | #define STUB_FUNC (I2C_FUNC_SMBUS_QUICK | I2C_FUNC_SMBUS_BYTE | \ | 33 | |
33 | I2C_FUNC_SMBUS_BYTE_DATA | I2C_FUNC_SMBUS_WORD_DATA | \ | 34 | /* |
34 | I2C_FUNC_SMBUS_I2C_BLOCK) | 35 | * Support for I2C_FUNC_SMBUS_BLOCK_DATA is disabled by default and must |
36 | * be enabled explicitly by setting the I2C_FUNC_SMBUS_BLOCK_DATA bits | ||
37 | * in the 'functionality' module parameter. | ||
38 | */ | ||
39 | #define STUB_FUNC_DEFAULT \ | ||
40 | (I2C_FUNC_SMBUS_QUICK | I2C_FUNC_SMBUS_BYTE | \ | ||
41 | I2C_FUNC_SMBUS_BYTE_DATA | I2C_FUNC_SMBUS_WORD_DATA | \ | ||
42 | I2C_FUNC_SMBUS_I2C_BLOCK) | ||
43 | |||
44 | #define STUB_FUNC_ALL \ | ||
45 | (STUB_FUNC_DEFAULT | I2C_FUNC_SMBUS_BLOCK_DATA) | ||
35 | 46 | ||
36 | static unsigned short chip_addr[MAX_CHIPS]; | 47 | static unsigned short chip_addr[MAX_CHIPS]; |
37 | module_param_array(chip_addr, ushort, NULL, S_IRUGO); | 48 | module_param_array(chip_addr, ushort, NULL, S_IRUGO); |
38 | MODULE_PARM_DESC(chip_addr, | 49 | MODULE_PARM_DESC(chip_addr, |
39 | "Chip addresses (up to 10, between 0x03 and 0x77)"); | 50 | "Chip addresses (up to 10, between 0x03 and 0x77)"); |
40 | 51 | ||
41 | static unsigned long functionality = STUB_FUNC; | 52 | static unsigned long functionality = STUB_FUNC_DEFAULT; |
42 | module_param(functionality, ulong, S_IRUGO | S_IWUSR); | 53 | module_param(functionality, ulong, S_IRUGO | S_IWUSR); |
43 | MODULE_PARM_DESC(functionality, "Override functionality bitfield"); | 54 | MODULE_PARM_DESC(functionality, "Override functionality bitfield"); |
44 | 55 | ||
56 | /* Some chips have banked register ranges */ | ||
57 | |||
58 | static u8 bank_reg[MAX_CHIPS]; | ||
59 | module_param_array(bank_reg, byte, NULL, S_IRUGO); | ||
60 | MODULE_PARM_DESC(bank_reg, "Bank register"); | ||
61 | |||
62 | static u8 bank_mask[MAX_CHIPS]; | ||
63 | module_param_array(bank_mask, byte, NULL, S_IRUGO); | ||
64 | MODULE_PARM_DESC(bank_mask, "Bank value mask"); | ||
65 | |||
66 | static u8 bank_start[MAX_CHIPS]; | ||
67 | module_param_array(bank_start, byte, NULL, S_IRUGO); | ||
68 | MODULE_PARM_DESC(bank_start, "First banked register"); | ||
69 | |||
70 | static u8 bank_end[MAX_CHIPS]; | ||
71 | module_param_array(bank_end, byte, NULL, S_IRUGO); | ||
72 | MODULE_PARM_DESC(bank_end, "Last banked register"); | ||
73 | |||
74 | struct smbus_block_data { | ||
75 | struct list_head node; | ||
76 | u8 command; | ||
77 | u8 len; | ||
78 | u8 block[I2C_SMBUS_BLOCK_MAX]; | ||
79 | }; | ||
80 | |||
45 | struct stub_chip { | 81 | struct stub_chip { |
46 | u8 pointer; | 82 | u8 pointer; |
47 | u16 words[256]; /* Byte operations use the LSB as per SMBus | 83 | u16 words[256]; /* Byte operations use the LSB as per SMBus |
48 | specification */ | 84 | specification */ |
85 | struct list_head smbus_blocks; | ||
86 | |||
87 | /* For chips with banks, extra registers are allocated dynamically */ | ||
88 | u8 bank_reg; | ||
89 | u8 bank_shift; | ||
90 | u8 bank_mask; | ||
91 | u8 bank_sel; /* Currently selected bank */ | ||
92 | u8 bank_start; | ||
93 | u8 bank_end; | ||
94 | u16 bank_size; | ||
95 | u16 *bank_words; /* Room for bank_mask * bank_size registers */ | ||
49 | }; | 96 | }; |
50 | 97 | ||
51 | static struct stub_chip *stub_chips; | 98 | static struct stub_chip *stub_chips; |
99 | static int stub_chips_nr; | ||
100 | |||
101 | static struct smbus_block_data *stub_find_block(struct device *dev, | ||
102 | struct stub_chip *chip, | ||
103 | u8 command, bool create) | ||
104 | { | ||
105 | struct smbus_block_data *b, *rb = NULL; | ||
106 | |||
107 | list_for_each_entry(b, &chip->smbus_blocks, node) { | ||
108 | if (b->command == command) { | ||
109 | rb = b; | ||
110 | break; | ||
111 | } | ||
112 | } | ||
113 | if (rb == NULL && create) { | ||
114 | rb = devm_kzalloc(dev, sizeof(*rb), GFP_KERNEL); | ||
115 | if (rb == NULL) | ||
116 | return rb; | ||
117 | rb->command = command; | ||
118 | list_add(&rb->node, &chip->smbus_blocks); | ||
119 | } | ||
120 | return rb; | ||
121 | } | ||
122 | |||
123 | static u16 *stub_get_wordp(struct stub_chip *chip, u8 offset) | ||
124 | { | ||
125 | if (chip->bank_sel && | ||
126 | offset >= chip->bank_start && offset <= chip->bank_end) | ||
127 | return chip->bank_words + | ||
128 | (chip->bank_sel - 1) * chip->bank_size + | ||
129 | offset - chip->bank_start; | ||
130 | else | ||
131 | return chip->words + offset; | ||
132 | } | ||
52 | 133 | ||
53 | /* Return negative errno on error. */ | 134 | /* Return negative errno on error. */ |
54 | static s32 stub_xfer(struct i2c_adapter *adap, u16 addr, unsigned short flags, | 135 | static s32 stub_xfer(struct i2c_adapter *adap, u16 addr, unsigned short flags, |
@@ -57,9 +138,11 @@ static s32 stub_xfer(struct i2c_adapter *adap, u16 addr, unsigned short flags, | |||
57 | s32 ret; | 138 | s32 ret; |
58 | int i, len; | 139 | int i, len; |
59 | struct stub_chip *chip = NULL; | 140 | struct stub_chip *chip = NULL; |
141 | struct smbus_block_data *b; | ||
142 | u16 *wordp; | ||
60 | 143 | ||
61 | /* Search for the right chip */ | 144 | /* Search for the right chip */ |
62 | for (i = 0; i < MAX_CHIPS && chip_addr[i]; i++) { | 145 | for (i = 0; i < stub_chips_nr; i++) { |
63 | if (addr == chip_addr[i]) { | 146 | if (addr == chip_addr[i]) { |
64 | chip = stub_chips + i; | 147 | chip = stub_chips + i; |
65 | break; | 148 | break; |
@@ -82,7 +165,8 @@ static s32 stub_xfer(struct i2c_adapter *adap, u16 addr, unsigned short flags, | |||
82 | "smbus byte - addr 0x%02x, wrote 0x%02x.\n", | 165 | "smbus byte - addr 0x%02x, wrote 0x%02x.\n", |
83 | addr, command); | 166 | addr, command); |
84 | } else { | 167 | } else { |
85 | data->byte = chip->words[chip->pointer++] & 0xff; | 168 | wordp = stub_get_wordp(chip, chip->pointer++); |
169 | data->byte = *wordp & 0xff; | ||
86 | dev_dbg(&adap->dev, | 170 | dev_dbg(&adap->dev, |
87 | "smbus byte - addr 0x%02x, read 0x%02x.\n", | 171 | "smbus byte - addr 0x%02x, read 0x%02x.\n", |
88 | addr, data->byte); | 172 | addr, data->byte); |
@@ -92,14 +176,25 @@ static s32 stub_xfer(struct i2c_adapter *adap, u16 addr, unsigned short flags, | |||
92 | break; | 176 | break; |
93 | 177 | ||
94 | case I2C_SMBUS_BYTE_DATA: | 178 | case I2C_SMBUS_BYTE_DATA: |
179 | wordp = stub_get_wordp(chip, command); | ||
95 | if (read_write == I2C_SMBUS_WRITE) { | 180 | if (read_write == I2C_SMBUS_WRITE) { |
96 | chip->words[command] &= 0xff00; | 181 | *wordp &= 0xff00; |
97 | chip->words[command] |= data->byte; | 182 | *wordp |= data->byte; |
98 | dev_dbg(&adap->dev, | 183 | dev_dbg(&adap->dev, |
99 | "smbus byte data - addr 0x%02x, wrote 0x%02x at 0x%02x.\n", | 184 | "smbus byte data - addr 0x%02x, wrote 0x%02x at 0x%02x.\n", |
100 | addr, data->byte, command); | 185 | addr, data->byte, command); |
186 | |||
187 | /* Set the bank as needed */ | ||
188 | if (chip->bank_words && command == chip->bank_reg) { | ||
189 | chip->bank_sel = | ||
190 | (data->byte >> chip->bank_shift) | ||
191 | & chip->bank_mask; | ||
192 | dev_dbg(&adap->dev, | ||
193 | "switching to bank %u.\n", | ||
194 | chip->bank_sel); | ||
195 | } | ||
101 | } else { | 196 | } else { |
102 | data->byte = chip->words[command] & 0xff; | 197 | data->byte = *wordp & 0xff; |
103 | dev_dbg(&adap->dev, | 198 | dev_dbg(&adap->dev, |
104 | "smbus byte data - addr 0x%02x, read 0x%02x at 0x%02x.\n", | 199 | "smbus byte data - addr 0x%02x, read 0x%02x at 0x%02x.\n", |
105 | addr, data->byte, command); | 200 | addr, data->byte, command); |
@@ -110,13 +205,14 @@ static s32 stub_xfer(struct i2c_adapter *adap, u16 addr, unsigned short flags, | |||
110 | break; | 205 | break; |
111 | 206 | ||
112 | case I2C_SMBUS_WORD_DATA: | 207 | case I2C_SMBUS_WORD_DATA: |
208 | wordp = stub_get_wordp(chip, command); | ||
113 | if (read_write == I2C_SMBUS_WRITE) { | 209 | if (read_write == I2C_SMBUS_WRITE) { |
114 | chip->words[command] = data->word; | 210 | *wordp = data->word; |
115 | dev_dbg(&adap->dev, | 211 | dev_dbg(&adap->dev, |
116 | "smbus word data - addr 0x%02x, wrote 0x%04x at 0x%02x.\n", | 212 | "smbus word data - addr 0x%02x, wrote 0x%04x at 0x%02x.\n", |
117 | addr, data->word, command); | 213 | addr, data->word, command); |
118 | } else { | 214 | } else { |
119 | data->word = chip->words[command]; | 215 | data->word = *wordp; |
120 | dev_dbg(&adap->dev, | 216 | dev_dbg(&adap->dev, |
121 | "smbus word data - addr 0x%02x, read 0x%04x at 0x%02x.\n", | 217 | "smbus word data - addr 0x%02x, read 0x%04x at 0x%02x.\n", |
122 | addr, data->word, command); | 218 | addr, data->word, command); |
@@ -126,6 +222,12 @@ static s32 stub_xfer(struct i2c_adapter *adap, u16 addr, unsigned short flags, | |||
126 | break; | 222 | break; |
127 | 223 | ||
128 | case I2C_SMBUS_I2C_BLOCK_DATA: | 224 | case I2C_SMBUS_I2C_BLOCK_DATA: |
225 | /* | ||
226 | * We ignore banks here, because banked chips don't use I2C | ||
227 | * block transfers | ||
228 | */ | ||
229 | if (data->block[0] > 256 - command) /* Avoid overrun */ | ||
230 | data->block[0] = 256 - command; | ||
129 | len = data->block[0]; | 231 | len = data->block[0]; |
130 | if (read_write == I2C_SMBUS_WRITE) { | 232 | if (read_write == I2C_SMBUS_WRITE) { |
131 | for (i = 0; i < len; i++) { | 233 | for (i = 0; i < len; i++) { |
@@ -148,6 +250,55 @@ static s32 stub_xfer(struct i2c_adapter *adap, u16 addr, unsigned short flags, | |||
148 | ret = 0; | 250 | ret = 0; |
149 | break; | 251 | break; |
150 | 252 | ||
253 | case I2C_SMBUS_BLOCK_DATA: | ||
254 | /* | ||
255 | * We ignore banks here, because chips typically don't use both | ||
256 | * banks and SMBus block transfers | ||
257 | */ | ||
258 | b = stub_find_block(&adap->dev, chip, command, false); | ||
259 | if (read_write == I2C_SMBUS_WRITE) { | ||
260 | len = data->block[0]; | ||
261 | if (len == 0 || len > I2C_SMBUS_BLOCK_MAX) { | ||
262 | ret = -EINVAL; | ||
263 | break; | ||
264 | } | ||
265 | if (b == NULL) { | ||
266 | b = stub_find_block(&adap->dev, chip, command, | ||
267 | true); | ||
268 | if (b == NULL) { | ||
269 | ret = -ENOMEM; | ||
270 | break; | ||
271 | } | ||
272 | } | ||
273 | /* Largest write sets read block length */ | ||
274 | if (len > b->len) | ||
275 | b->len = len; | ||
276 | for (i = 0; i < len; i++) | ||
277 | b->block[i] = data->block[i + 1]; | ||
278 | /* update for byte and word commands */ | ||
279 | chip->words[command] = (b->block[0] << 8) | b->len; | ||
280 | dev_dbg(&adap->dev, | ||
281 | "smbus block data - addr 0x%02x, wrote %d bytes at 0x%02x.\n", | ||
282 | addr, len, command); | ||
283 | } else { | ||
284 | if (b == NULL) { | ||
285 | dev_dbg(&adap->dev, | ||
286 | "SMBus block read command without prior block write not supported\n"); | ||
287 | ret = -EOPNOTSUPP; | ||
288 | break; | ||
289 | } | ||
290 | len = b->len; | ||
291 | data->block[0] = len; | ||
292 | for (i = 0; i < len; i++) | ||
293 | data->block[i + 1] = b->block[i]; | ||
294 | dev_dbg(&adap->dev, | ||
295 | "smbus block data - addr 0x%02x, read %d bytes at 0x%02x.\n", | ||
296 | addr, len, command); | ||
297 | } | ||
298 | |||
299 | ret = 0; | ||
300 | break; | ||
301 | |||
151 | default: | 302 | default: |
152 | dev_dbg(&adap->dev, "Unsupported I2C/SMBus command\n"); | 303 | dev_dbg(&adap->dev, "Unsupported I2C/SMBus command\n"); |
153 | ret = -EOPNOTSUPP; | 304 | ret = -EOPNOTSUPP; |
@@ -159,7 +310,7 @@ static s32 stub_xfer(struct i2c_adapter *adap, u16 addr, unsigned short flags, | |||
159 | 310 | ||
160 | static u32 stub_func(struct i2c_adapter *adapter) | 311 | static u32 stub_func(struct i2c_adapter *adapter) |
161 | { | 312 | { |
162 | return STUB_FUNC & functionality; | 313 | return STUB_FUNC_ALL & functionality; |
163 | } | 314 | } |
164 | 315 | ||
165 | static const struct i2c_algorithm smbus_algorithm = { | 316 | static const struct i2c_algorithm smbus_algorithm = { |
@@ -174,6 +325,43 @@ static struct i2c_adapter stub_adapter = { | |||
174 | .name = "SMBus stub driver", | 325 | .name = "SMBus stub driver", |
175 | }; | 326 | }; |
176 | 327 | ||
328 | static int __init i2c_stub_allocate_banks(int i) | ||
329 | { | ||
330 | struct stub_chip *chip = stub_chips + i; | ||
331 | |||
332 | chip->bank_reg = bank_reg[i]; | ||
333 | chip->bank_start = bank_start[i]; | ||
334 | chip->bank_end = bank_end[i]; | ||
335 | chip->bank_size = bank_end[i] - bank_start[i] + 1; | ||
336 | |||
337 | /* We assume that all bits in the mask are contiguous */ | ||
338 | chip->bank_mask = bank_mask[i]; | ||
339 | while (!(chip->bank_mask & 1)) { | ||
340 | chip->bank_shift++; | ||
341 | chip->bank_mask >>= 1; | ||
342 | } | ||
343 | |||
344 | chip->bank_words = kzalloc(chip->bank_mask * chip->bank_size * | ||
345 | sizeof(u16), GFP_KERNEL); | ||
346 | if (!chip->bank_words) | ||
347 | return -ENOMEM; | ||
348 | |||
349 | pr_debug("i2c-stub: Allocated %u banks of %u words each (registers 0x%02x to 0x%02x)\n", | ||
350 | chip->bank_mask, chip->bank_size, chip->bank_start, | ||
351 | chip->bank_end); | ||
352 | |||
353 | return 0; | ||
354 | } | ||
355 | |||
356 | static void i2c_stub_free(void) | ||
357 | { | ||
358 | int i; | ||
359 | |||
360 | for (i = 0; i < stub_chips_nr; i++) | ||
361 | kfree(stub_chips[i].bank_words); | ||
362 | kfree(stub_chips); | ||
363 | } | ||
364 | |||
177 | static int __init i2c_stub_init(void) | 365 | static int __init i2c_stub_init(void) |
178 | { | 366 | { |
179 | int i, ret; | 367 | int i, ret; |
@@ -194,22 +382,39 @@ static int __init i2c_stub_init(void) | |||
194 | } | 382 | } |
195 | 383 | ||
196 | /* Allocate memory for all chips at once */ | 384 | /* Allocate memory for all chips at once */ |
197 | stub_chips = kzalloc(i * sizeof(struct stub_chip), GFP_KERNEL); | 385 | stub_chips_nr = i; |
386 | stub_chips = kcalloc(stub_chips_nr, sizeof(struct stub_chip), | ||
387 | GFP_KERNEL); | ||
198 | if (!stub_chips) { | 388 | if (!stub_chips) { |
199 | pr_err("i2c-stub: Out of memory\n"); | 389 | pr_err("i2c-stub: Out of memory\n"); |
200 | return -ENOMEM; | 390 | return -ENOMEM; |
201 | } | 391 | } |
392 | for (i = 0; i < stub_chips_nr; i++) { | ||
393 | INIT_LIST_HEAD(&stub_chips[i].smbus_blocks); | ||
394 | |||
395 | /* Allocate extra memory for banked register ranges */ | ||
396 | if (bank_mask[i]) { | ||
397 | ret = i2c_stub_allocate_banks(i); | ||
398 | if (ret) | ||
399 | goto fail_free; | ||
400 | } | ||
401 | } | ||
202 | 402 | ||
203 | ret = i2c_add_adapter(&stub_adapter); | 403 | ret = i2c_add_adapter(&stub_adapter); |
204 | if (ret) | 404 | if (ret) |
205 | kfree(stub_chips); | 405 | goto fail_free; |
406 | |||
407 | return 0; | ||
408 | |||
409 | fail_free: | ||
410 | i2c_stub_free(); | ||
206 | return ret; | 411 | return ret; |
207 | } | 412 | } |
208 | 413 | ||
209 | static void __exit i2c_stub_exit(void) | 414 | static void __exit i2c_stub_exit(void) |
210 | { | 415 | { |
211 | i2c_del_adapter(&stub_adapter); | 416 | i2c_del_adapter(&stub_adapter); |
212 | kfree(stub_chips); | 417 | i2c_stub_free(); |
213 | } | 418 | } |
214 | 419 | ||
215 | MODULE_AUTHOR("Mark M. Hoffman <mhoffman@lightlink.com>"); | 420 | MODULE_AUTHOR("Mark M. Hoffman <mhoffman@lightlink.com>"); |
diff --git a/drivers/i2c/muxes/i2c-mux-pca954x.c b/drivers/i2c/muxes/i2c-mux-pca954x.c index 9bd4212782ab..ec11b404b433 100644 --- a/drivers/i2c/muxes/i2c-mux-pca954x.c +++ b/drivers/i2c/muxes/i2c-mux-pca954x.c | |||
@@ -41,6 +41,7 @@ | |||
41 | #include <linux/i2c-mux.h> | 41 | #include <linux/i2c-mux.h> |
42 | #include <linux/i2c/pca954x.h> | 42 | #include <linux/i2c/pca954x.h> |
43 | #include <linux/module.h> | 43 | #include <linux/module.h> |
44 | #include <linux/pm.h> | ||
44 | #include <linux/slab.h> | 45 | #include <linux/slab.h> |
45 | 46 | ||
46 | #define PCA954X_MAX_NCHANS 8 | 47 | #define PCA954X_MAX_NCHANS 8 |
@@ -273,9 +274,23 @@ static int pca954x_remove(struct i2c_client *client) | |||
273 | return 0; | 274 | return 0; |
274 | } | 275 | } |
275 | 276 | ||
277 | #ifdef CONFIG_PM_SLEEP | ||
278 | static int pca954x_resume(struct device *dev) | ||
279 | { | ||
280 | struct i2c_client *client = to_i2c_client(dev); | ||
281 | struct pca954x *data = i2c_get_clientdata(client); | ||
282 | |||
283 | data->last_chan = 0; | ||
284 | return i2c_smbus_write_byte(client, 0); | ||
285 | } | ||
286 | #endif | ||
287 | |||
288 | static SIMPLE_DEV_PM_OPS(pca954x_pm, NULL, pca954x_resume); | ||
289 | |||
276 | static struct i2c_driver pca954x_driver = { | 290 | static struct i2c_driver pca954x_driver = { |
277 | .driver = { | 291 | .driver = { |
278 | .name = "pca954x", | 292 | .name = "pca954x", |
293 | .pm = &pca954x_pm, | ||
279 | .owner = THIS_MODULE, | 294 | .owner = THIS_MODULE, |
280 | }, | 295 | }, |
281 | .probe = pca954x_probe, | 296 | .probe = pca954x_probe, |
diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c index f3ac4154cbb6..44ec72684df5 100644 --- a/fs/cifs/cifs_debug.c +++ b/fs/cifs/cifs_debug.c | |||
@@ -213,7 +213,7 @@ static int cifs_debug_data_proc_show(struct seq_file *m, void *v) | |||
213 | tcon->nativeFileSystem); | 213 | tcon->nativeFileSystem); |
214 | } | 214 | } |
215 | seq_printf(m, "DevInfo: 0x%x Attributes: 0x%x" | 215 | seq_printf(m, "DevInfo: 0x%x Attributes: 0x%x" |
216 | "\n\tPathComponentMax: %d Status: 0x%d", | 216 | "\n\tPathComponentMax: %d Status: %d", |
217 | le32_to_cpu(tcon->fsDevInfo.DeviceCharacteristics), | 217 | le32_to_cpu(tcon->fsDevInfo.DeviceCharacteristics), |
218 | le32_to_cpu(tcon->fsAttrInfo.Attributes), | 218 | le32_to_cpu(tcon->fsAttrInfo.Attributes), |
219 | le32_to_cpu(tcon->fsAttrInfo.MaxPathNameComponentLength), | 219 | le32_to_cpu(tcon->fsAttrInfo.MaxPathNameComponentLength), |
diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h index 70f178a7c759..560480263336 100644 --- a/fs/cifs/cifsfs.h +++ b/fs/cifs/cifsfs.h | |||
@@ -136,5 +136,5 @@ extern long cifs_ioctl(struct file *filep, unsigned int cmd, unsigned long arg); | |||
136 | extern const struct export_operations cifs_export_ops; | 136 | extern const struct export_operations cifs_export_ops; |
137 | #endif /* CONFIG_CIFS_NFSD_EXPORT */ | 137 | #endif /* CONFIG_CIFS_NFSD_EXPORT */ |
138 | 138 | ||
139 | #define CIFS_VERSION "2.03" | 139 | #define CIFS_VERSION "2.04" |
140 | #endif /* _CIFSFS_H */ | 140 | #endif /* _CIFSFS_H */ |
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h index de6aed8c78e5..0012e1e291d4 100644 --- a/fs/cifs/cifsglob.h +++ b/fs/cifs/cifsglob.h | |||
@@ -404,6 +404,11 @@ struct smb_version_operations { | |||
404 | const struct cifs_fid *, u32 *); | 404 | const struct cifs_fid *, u32 *); |
405 | int (*set_acl)(struct cifs_ntsd *, __u32, struct inode *, const char *, | 405 | int (*set_acl)(struct cifs_ntsd *, __u32, struct inode *, const char *, |
406 | int); | 406 | int); |
407 | /* writepages retry size */ | ||
408 | unsigned int (*wp_retry_size)(struct inode *); | ||
409 | /* get mtu credits */ | ||
410 | int (*wait_mtu_credits)(struct TCP_Server_Info *, unsigned int, | ||
411 | unsigned int *, unsigned int *); | ||
407 | }; | 412 | }; |
408 | 413 | ||
409 | struct smb_version_values { | 414 | struct smb_version_values { |
@@ -640,6 +645,16 @@ add_credits(struct TCP_Server_Info *server, const unsigned int add, | |||
640 | } | 645 | } |
641 | 646 | ||
642 | static inline void | 647 | static inline void |
648 | add_credits_and_wake_if(struct TCP_Server_Info *server, const unsigned int add, | ||
649 | const int optype) | ||
650 | { | ||
651 | if (add) { | ||
652 | server->ops->add_credits(server, add, optype); | ||
653 | wake_up(&server->request_q); | ||
654 | } | ||
655 | } | ||
656 | |||
657 | static inline void | ||
643 | set_credits(struct TCP_Server_Info *server, const int val) | 658 | set_credits(struct TCP_Server_Info *server, const int val) |
644 | { | 659 | { |
645 | server->ops->set_credits(server, val); | 660 | server->ops->set_credits(server, val); |
@@ -1044,6 +1059,7 @@ struct cifs_readdata { | |||
1044 | struct address_space *mapping; | 1059 | struct address_space *mapping; |
1045 | __u64 offset; | 1060 | __u64 offset; |
1046 | unsigned int bytes; | 1061 | unsigned int bytes; |
1062 | unsigned int got_bytes; | ||
1047 | pid_t pid; | 1063 | pid_t pid; |
1048 | int result; | 1064 | int result; |
1049 | struct work_struct work; | 1065 | struct work_struct work; |
@@ -1053,6 +1069,7 @@ struct cifs_readdata { | |||
1053 | struct kvec iov; | 1069 | struct kvec iov; |
1054 | unsigned int pagesz; | 1070 | unsigned int pagesz; |
1055 | unsigned int tailsz; | 1071 | unsigned int tailsz; |
1072 | unsigned int credits; | ||
1056 | unsigned int nr_pages; | 1073 | unsigned int nr_pages; |
1057 | struct page *pages[]; | 1074 | struct page *pages[]; |
1058 | }; | 1075 | }; |
@@ -1073,6 +1090,7 @@ struct cifs_writedata { | |||
1073 | int result; | 1090 | int result; |
1074 | unsigned int pagesz; | 1091 | unsigned int pagesz; |
1075 | unsigned int tailsz; | 1092 | unsigned int tailsz; |
1093 | unsigned int credits; | ||
1076 | unsigned int nr_pages; | 1094 | unsigned int nr_pages; |
1077 | struct page *pages[]; | 1095 | struct page *pages[]; |
1078 | }; | 1096 | }; |
@@ -1398,6 +1416,7 @@ static inline void free_dfs_info_array(struct dfs_info3_param *param, | |||
1398 | #define CIFS_OBREAK_OP 0x0100 /* oplock break request */ | 1416 | #define CIFS_OBREAK_OP 0x0100 /* oplock break request */ |
1399 | #define CIFS_NEG_OP 0x0200 /* negotiate request */ | 1417 | #define CIFS_NEG_OP 0x0200 /* negotiate request */ |
1400 | #define CIFS_OP_MASK 0x0380 /* mask request type */ | 1418 | #define CIFS_OP_MASK 0x0380 /* mask request type */ |
1419 | #define CIFS_HAS_CREDITS 0x0400 /* already has credits */ | ||
1401 | 1420 | ||
1402 | /* Security Flags: indicate type of session setup needed */ | 1421 | /* Security Flags: indicate type of session setup needed */ |
1403 | #define CIFSSEC_MAY_SIGN 0x00001 | 1422 | #define CIFSSEC_MAY_SIGN 0x00001 |
diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h index ca7980a1e303..c31ce98c1704 100644 --- a/fs/cifs/cifsproto.h +++ b/fs/cifs/cifsproto.h | |||
@@ -36,6 +36,7 @@ extern struct smb_hdr *cifs_buf_get(void); | |||
36 | extern void cifs_buf_release(void *); | 36 | extern void cifs_buf_release(void *); |
37 | extern struct smb_hdr *cifs_small_buf_get(void); | 37 | extern struct smb_hdr *cifs_small_buf_get(void); |
38 | extern void cifs_small_buf_release(void *); | 38 | extern void cifs_small_buf_release(void *); |
39 | extern void free_rsp_buf(int, void *); | ||
39 | extern void cifs_rqst_page_to_kvec(struct smb_rqst *rqst, unsigned int idx, | 40 | extern void cifs_rqst_page_to_kvec(struct smb_rqst *rqst, unsigned int idx, |
40 | struct kvec *iov); | 41 | struct kvec *iov); |
41 | extern int smb_send(struct TCP_Server_Info *, struct smb_hdr *, | 42 | extern int smb_send(struct TCP_Server_Info *, struct smb_hdr *, |
@@ -89,6 +90,9 @@ extern struct mid_q_entry *cifs_setup_async_request(struct TCP_Server_Info *, | |||
89 | struct smb_rqst *); | 90 | struct smb_rqst *); |
90 | extern int cifs_check_receive(struct mid_q_entry *mid, | 91 | extern int cifs_check_receive(struct mid_q_entry *mid, |
91 | struct TCP_Server_Info *server, bool log_error); | 92 | struct TCP_Server_Info *server, bool log_error); |
93 | extern int cifs_wait_mtu_credits(struct TCP_Server_Info *server, | ||
94 | unsigned int size, unsigned int *num, | ||
95 | unsigned int *credits); | ||
92 | extern int SendReceive2(const unsigned int /* xid */ , struct cifs_ses *, | 96 | extern int SendReceive2(const unsigned int /* xid */ , struct cifs_ses *, |
93 | struct kvec *, int /* nvec to send */, | 97 | struct kvec *, int /* nvec to send */, |
94 | int * /* type of buf returned */ , const int flags); | 98 | int * /* type of buf returned */ , const int flags); |
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c index 6ce4e0954b98..66f65001a6d8 100644 --- a/fs/cifs/cifssmb.c +++ b/fs/cifs/cifssmb.c | |||
@@ -196,10 +196,6 @@ cifs_reconnect_tcon(struct cifs_tcon *tcon, int smb_command) | |||
196 | if (rc) | 196 | if (rc) |
197 | goto out; | 197 | goto out; |
198 | 198 | ||
199 | /* | ||
200 | * FIXME: check if wsize needs updated due to negotiated smb buffer | ||
201 | * size shrinking | ||
202 | */ | ||
203 | atomic_inc(&tconInfoReconnectCount); | 199 | atomic_inc(&tconInfoReconnectCount); |
204 | 200 | ||
205 | /* tell server Unix caps we support */ | 201 | /* tell server Unix caps we support */ |
@@ -1517,7 +1513,6 @@ cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid) | |||
1517 | return length; | 1513 | return length; |
1518 | 1514 | ||
1519 | server->total_read += length; | 1515 | server->total_read += length; |
1520 | rdata->bytes = length; | ||
1521 | 1516 | ||
1522 | cifs_dbg(FYI, "total_read=%u buflen=%u remaining=%u\n", | 1517 | cifs_dbg(FYI, "total_read=%u buflen=%u remaining=%u\n", |
1523 | server->total_read, buflen, data_len); | 1518 | server->total_read, buflen, data_len); |
@@ -1560,12 +1555,18 @@ cifs_readv_callback(struct mid_q_entry *mid) | |||
1560 | rc); | 1555 | rc); |
1561 | } | 1556 | } |
1562 | /* FIXME: should this be counted toward the initiating task? */ | 1557 | /* FIXME: should this be counted toward the initiating task? */ |
1563 | task_io_account_read(rdata->bytes); | 1558 | task_io_account_read(rdata->got_bytes); |
1564 | cifs_stats_bytes_read(tcon, rdata->bytes); | 1559 | cifs_stats_bytes_read(tcon, rdata->got_bytes); |
1565 | break; | 1560 | break; |
1566 | case MID_REQUEST_SUBMITTED: | 1561 | case MID_REQUEST_SUBMITTED: |
1567 | case MID_RETRY_NEEDED: | 1562 | case MID_RETRY_NEEDED: |
1568 | rdata->result = -EAGAIN; | 1563 | rdata->result = -EAGAIN; |
1564 | if (server->sign && rdata->got_bytes) | ||
1565 | /* reset bytes number since we can not check a sign */ | ||
1566 | rdata->got_bytes = 0; | ||
1567 | /* FIXME: should this be counted toward the initiating task? */ | ||
1568 | task_io_account_read(rdata->got_bytes); | ||
1569 | cifs_stats_bytes_read(tcon, rdata->got_bytes); | ||
1569 | break; | 1570 | break; |
1570 | default: | 1571 | default: |
1571 | rdata->result = -EIO; | 1572 | rdata->result = -EIO; |
@@ -1734,10 +1735,7 @@ CIFSSMBRead(const unsigned int xid, struct cifs_io_parms *io_parms, | |||
1734 | 1735 | ||
1735 | /* cifs_small_buf_release(pSMB); */ /* Freed earlier now in SendReceive2 */ | 1736 | /* cifs_small_buf_release(pSMB); */ /* Freed earlier now in SendReceive2 */ |
1736 | if (*buf) { | 1737 | if (*buf) { |
1737 | if (resp_buf_type == CIFS_SMALL_BUFFER) | 1738 | free_rsp_buf(resp_buf_type, iov[0].iov_base); |
1738 | cifs_small_buf_release(iov[0].iov_base); | ||
1739 | else if (resp_buf_type == CIFS_LARGE_BUFFER) | ||
1740 | cifs_buf_release(iov[0].iov_base); | ||
1741 | } else if (resp_buf_type != CIFS_NO_BUFFER) { | 1739 | } else if (resp_buf_type != CIFS_NO_BUFFER) { |
1742 | /* return buffer to caller to free */ | 1740 | /* return buffer to caller to free */ |
1743 | *buf = iov[0].iov_base; | 1741 | *buf = iov[0].iov_base; |
@@ -1899,28 +1897,80 @@ cifs_writedata_release(struct kref *refcount) | |||
1899 | static void | 1897 | static void |
1900 | cifs_writev_requeue(struct cifs_writedata *wdata) | 1898 | cifs_writev_requeue(struct cifs_writedata *wdata) |
1901 | { | 1899 | { |
1902 | int i, rc; | 1900 | int i, rc = 0; |
1903 | struct inode *inode = wdata->cfile->dentry->d_inode; | 1901 | struct inode *inode = wdata->cfile->dentry->d_inode; |
1904 | struct TCP_Server_Info *server; | 1902 | struct TCP_Server_Info *server; |
1903 | unsigned int rest_len; | ||
1905 | 1904 | ||
1906 | for (i = 0; i < wdata->nr_pages; i++) { | 1905 | server = tlink_tcon(wdata->cfile->tlink)->ses->server; |
1907 | lock_page(wdata->pages[i]); | 1906 | i = 0; |
1908 | clear_page_dirty_for_io(wdata->pages[i]); | 1907 | rest_len = wdata->bytes; |
1909 | } | ||
1910 | |||
1911 | do { | 1908 | do { |
1912 | server = tlink_tcon(wdata->cfile->tlink)->ses->server; | 1909 | struct cifs_writedata *wdata2; |
1913 | rc = server->ops->async_writev(wdata, cifs_writedata_release); | 1910 | unsigned int j, nr_pages, wsize, tailsz, cur_len; |
1914 | } while (rc == -EAGAIN); | 1911 | |
1912 | wsize = server->ops->wp_retry_size(inode); | ||
1913 | if (wsize < rest_len) { | ||
1914 | nr_pages = wsize / PAGE_CACHE_SIZE; | ||
1915 | if (!nr_pages) { | ||
1916 | rc = -ENOTSUPP; | ||
1917 | break; | ||
1918 | } | ||
1919 | cur_len = nr_pages * PAGE_CACHE_SIZE; | ||
1920 | tailsz = PAGE_CACHE_SIZE; | ||
1921 | } else { | ||
1922 | nr_pages = DIV_ROUND_UP(rest_len, PAGE_CACHE_SIZE); | ||
1923 | cur_len = rest_len; | ||
1924 | tailsz = rest_len - (nr_pages - 1) * PAGE_CACHE_SIZE; | ||
1925 | } | ||
1915 | 1926 | ||
1916 | for (i = 0; i < wdata->nr_pages; i++) { | 1927 | wdata2 = cifs_writedata_alloc(nr_pages, cifs_writev_complete); |
1917 | unlock_page(wdata->pages[i]); | 1928 | if (!wdata2) { |
1918 | if (rc != 0) { | 1929 | rc = -ENOMEM; |
1919 | SetPageError(wdata->pages[i]); | 1930 | break; |
1920 | end_page_writeback(wdata->pages[i]); | ||
1921 | page_cache_release(wdata->pages[i]); | ||
1922 | } | 1931 | } |
1923 | } | 1932 | |
1933 | for (j = 0; j < nr_pages; j++) { | ||
1934 | wdata2->pages[j] = wdata->pages[i + j]; | ||
1935 | lock_page(wdata2->pages[j]); | ||
1936 | clear_page_dirty_for_io(wdata2->pages[j]); | ||
1937 | } | ||
1938 | |||
1939 | wdata2->sync_mode = wdata->sync_mode; | ||
1940 | wdata2->nr_pages = nr_pages; | ||
1941 | wdata2->offset = page_offset(wdata2->pages[0]); | ||
1942 | wdata2->pagesz = PAGE_CACHE_SIZE; | ||
1943 | wdata2->tailsz = tailsz; | ||
1944 | wdata2->bytes = cur_len; | ||
1945 | |||
1946 | wdata2->cfile = find_writable_file(CIFS_I(inode), false); | ||
1947 | if (!wdata2->cfile) { | ||
1948 | cifs_dbg(VFS, "No writable handles for inode\n"); | ||
1949 | rc = -EBADF; | ||
1950 | break; | ||
1951 | } | ||
1952 | wdata2->pid = wdata2->cfile->pid; | ||
1953 | rc = server->ops->async_writev(wdata2, cifs_writedata_release); | ||
1954 | |||
1955 | for (j = 0; j < nr_pages; j++) { | ||
1956 | unlock_page(wdata2->pages[j]); | ||
1957 | if (rc != 0 && rc != -EAGAIN) { | ||
1958 | SetPageError(wdata2->pages[j]); | ||
1959 | end_page_writeback(wdata2->pages[j]); | ||
1960 | page_cache_release(wdata2->pages[j]); | ||
1961 | } | ||
1962 | } | ||
1963 | |||
1964 | if (rc) { | ||
1965 | kref_put(&wdata2->refcount, cifs_writedata_release); | ||
1966 | if (rc == -EAGAIN) | ||
1967 | continue; | ||
1968 | break; | ||
1969 | } | ||
1970 | |||
1971 | rest_len -= cur_len; | ||
1972 | i += nr_pages; | ||
1973 | } while (i < wdata->nr_pages); | ||
1924 | 1974 | ||
1925 | mapping_set_error(inode->i_mapping, rc); | 1975 | mapping_set_error(inode->i_mapping, rc); |
1926 | kref_put(&wdata->refcount, cifs_writedata_release); | 1976 | kref_put(&wdata->refcount, cifs_writedata_release); |
@@ -2203,10 +2253,7 @@ CIFSSMBWrite2(const unsigned int xid, struct cifs_io_parms *io_parms, | |||
2203 | } | 2253 | } |
2204 | 2254 | ||
2205 | /* cifs_small_buf_release(pSMB); */ /* Freed earlier now in SendReceive2 */ | 2255 | /* cifs_small_buf_release(pSMB); */ /* Freed earlier now in SendReceive2 */ |
2206 | if (resp_buf_type == CIFS_SMALL_BUFFER) | 2256 | free_rsp_buf(resp_buf_type, iov[0].iov_base); |
2207 | cifs_small_buf_release(iov[0].iov_base); | ||
2208 | else if (resp_buf_type == CIFS_LARGE_BUFFER) | ||
2209 | cifs_buf_release(iov[0].iov_base); | ||
2210 | 2257 | ||
2211 | /* Note: On -EAGAIN error only caller can retry on handle based calls | 2258 | /* Note: On -EAGAIN error only caller can retry on handle based calls |
2212 | since file handle passed in no longer valid */ | 2259 | since file handle passed in no longer valid */ |
@@ -2451,10 +2498,7 @@ plk_err_exit: | |||
2451 | if (pSMB) | 2498 | if (pSMB) |
2452 | cifs_small_buf_release(pSMB); | 2499 | cifs_small_buf_release(pSMB); |
2453 | 2500 | ||
2454 | if (resp_buf_type == CIFS_SMALL_BUFFER) | 2501 | free_rsp_buf(resp_buf_type, iov[0].iov_base); |
2455 | cifs_small_buf_release(iov[0].iov_base); | ||
2456 | else if (resp_buf_type == CIFS_LARGE_BUFFER) | ||
2457 | cifs_buf_release(iov[0].iov_base); | ||
2458 | 2502 | ||
2459 | /* Note: On -EAGAIN error only caller can retry on handle based calls | 2503 | /* Note: On -EAGAIN error only caller can retry on handle based calls |
2460 | since file handle passed in no longer valid */ | 2504 | since file handle passed in no longer valid */ |
@@ -3838,10 +3882,7 @@ CIFSSMBGetCIFSACL(const unsigned int xid, struct cifs_tcon *tcon, __u16 fid, | |||
3838 | } | 3882 | } |
3839 | } | 3883 | } |
3840 | qsec_out: | 3884 | qsec_out: |
3841 | if (buf_type == CIFS_SMALL_BUFFER) | 3885 | free_rsp_buf(buf_type, iov[0].iov_base); |
3842 | cifs_small_buf_release(iov[0].iov_base); | ||
3843 | else if (buf_type == CIFS_LARGE_BUFFER) | ||
3844 | cifs_buf_release(iov[0].iov_base); | ||
3845 | /* cifs_small_buf_release(pSMB); */ /* Freed earlier now in SendReceive2 */ | 3886 | /* cifs_small_buf_release(pSMB); */ /* Freed earlier now in SendReceive2 */ |
3846 | return rc; | 3887 | return rc; |
3847 | } | 3888 | } |
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index b98366f21f9e..03ed8a09581c 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c | |||
@@ -557,7 +557,7 @@ cifs_readv_from_socket(struct TCP_Server_Info *server, struct kvec *iov_orig, | |||
557 | try_to_freeze(); | 557 | try_to_freeze(); |
558 | 558 | ||
559 | if (server_unresponsive(server)) { | 559 | if (server_unresponsive(server)) { |
560 | total_read = -EAGAIN; | 560 | total_read = -ECONNABORTED; |
561 | break; | 561 | break; |
562 | } | 562 | } |
563 | 563 | ||
@@ -571,7 +571,7 @@ cifs_readv_from_socket(struct TCP_Server_Info *server, struct kvec *iov_orig, | |||
571 | break; | 571 | break; |
572 | } else if (server->tcpStatus == CifsNeedReconnect) { | 572 | } else if (server->tcpStatus == CifsNeedReconnect) { |
573 | cifs_reconnect(server); | 573 | cifs_reconnect(server); |
574 | total_read = -EAGAIN; | 574 | total_read = -ECONNABORTED; |
575 | break; | 575 | break; |
576 | } else if (length == -ERESTARTSYS || | 576 | } else if (length == -ERESTARTSYS || |
577 | length == -EAGAIN || | 577 | length == -EAGAIN || |
@@ -588,7 +588,7 @@ cifs_readv_from_socket(struct TCP_Server_Info *server, struct kvec *iov_orig, | |||
588 | cifs_dbg(FYI, "Received no data or error: expecting %d\n" | 588 | cifs_dbg(FYI, "Received no data or error: expecting %d\n" |
589 | "got %d", to_read, length); | 589 | "got %d", to_read, length); |
590 | cifs_reconnect(server); | 590 | cifs_reconnect(server); |
591 | total_read = -EAGAIN; | 591 | total_read = -ECONNABORTED; |
592 | break; | 592 | break; |
593 | } | 593 | } |
594 | } | 594 | } |
@@ -786,7 +786,7 @@ standard_receive3(struct TCP_Server_Info *server, struct mid_q_entry *mid) | |||
786 | cifs_dbg(VFS, "SMB response too long (%u bytes)\n", pdu_length); | 786 | cifs_dbg(VFS, "SMB response too long (%u bytes)\n", pdu_length); |
787 | cifs_reconnect(server); | 787 | cifs_reconnect(server); |
788 | wake_up(&server->response_q); | 788 | wake_up(&server->response_q); |
789 | return -EAGAIN; | 789 | return -ECONNABORTED; |
790 | } | 790 | } |
791 | 791 | ||
792 | /* switch to large buffer if too big for a small one */ | 792 | /* switch to large buffer if too big for a small one */ |
diff --git a/fs/cifs/file.c b/fs/cifs/file.c index b88b1ade4d3d..4ab2f79ffa7a 100644 --- a/fs/cifs/file.c +++ b/fs/cifs/file.c | |||
@@ -1670,8 +1670,8 @@ cifs_write(struct cifsFileInfo *open_file, __u32 pid, const char *write_data, | |||
1670 | break; | 1670 | break; |
1671 | } | 1671 | } |
1672 | 1672 | ||
1673 | len = min((size_t)cifs_sb->wsize, | 1673 | len = min(server->ops->wp_retry_size(dentry->d_inode), |
1674 | write_size - total_written); | 1674 | (unsigned int)write_size - total_written); |
1675 | /* iov[0] is reserved for smb header */ | 1675 | /* iov[0] is reserved for smb header */ |
1676 | iov[1].iov_base = (char *)write_data + total_written; | 1676 | iov[1].iov_base = (char *)write_data + total_written; |
1677 | iov[1].iov_len = len; | 1677 | iov[1].iov_len = len; |
@@ -1878,15 +1878,163 @@ static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to) | |||
1878 | return rc; | 1878 | return rc; |
1879 | } | 1879 | } |
1880 | 1880 | ||
1881 | static struct cifs_writedata * | ||
1882 | wdata_alloc_and_fillpages(pgoff_t tofind, struct address_space *mapping, | ||
1883 | pgoff_t end, pgoff_t *index, | ||
1884 | unsigned int *found_pages) | ||
1885 | { | ||
1886 | unsigned int nr_pages; | ||
1887 | struct page **pages; | ||
1888 | struct cifs_writedata *wdata; | ||
1889 | |||
1890 | wdata = cifs_writedata_alloc((unsigned int)tofind, | ||
1891 | cifs_writev_complete); | ||
1892 | if (!wdata) | ||
1893 | return NULL; | ||
1894 | |||
1895 | /* | ||
1896 | * find_get_pages_tag seems to return a max of 256 on each | ||
1897 | * iteration, so we must call it several times in order to | ||
1898 | * fill the array or the wsize is effectively limited to | ||
1899 | * 256 * PAGE_CACHE_SIZE. | ||
1900 | */ | ||
1901 | *found_pages = 0; | ||
1902 | pages = wdata->pages; | ||
1903 | do { | ||
1904 | nr_pages = find_get_pages_tag(mapping, index, | ||
1905 | PAGECACHE_TAG_DIRTY, tofind, | ||
1906 | pages); | ||
1907 | *found_pages += nr_pages; | ||
1908 | tofind -= nr_pages; | ||
1909 | pages += nr_pages; | ||
1910 | } while (nr_pages && tofind && *index <= end); | ||
1911 | |||
1912 | return wdata; | ||
1913 | } | ||
1914 | |||
1915 | static unsigned int | ||
1916 | wdata_prepare_pages(struct cifs_writedata *wdata, unsigned int found_pages, | ||
1917 | struct address_space *mapping, | ||
1918 | struct writeback_control *wbc, | ||
1919 | pgoff_t end, pgoff_t *index, pgoff_t *next, bool *done) | ||
1920 | { | ||
1921 | unsigned int nr_pages = 0, i; | ||
1922 | struct page *page; | ||
1923 | |||
1924 | for (i = 0; i < found_pages; i++) { | ||
1925 | page = wdata->pages[i]; | ||
1926 | /* | ||
1927 | * At this point we hold neither mapping->tree_lock nor | ||
1928 | * lock on the page itself: the page may be truncated or | ||
1929 | * invalidated (changing page->mapping to NULL), or even | ||
1930 | * swizzled back from swapper_space to tmpfs file | ||
1931 | * mapping | ||
1932 | */ | ||
1933 | |||
1934 | if (nr_pages == 0) | ||
1935 | lock_page(page); | ||
1936 | else if (!trylock_page(page)) | ||
1937 | break; | ||
1938 | |||
1939 | if (unlikely(page->mapping != mapping)) { | ||
1940 | unlock_page(page); | ||
1941 | break; | ||
1942 | } | ||
1943 | |||
1944 | if (!wbc->range_cyclic && page->index > end) { | ||
1945 | *done = true; | ||
1946 | unlock_page(page); | ||
1947 | break; | ||
1948 | } | ||
1949 | |||
1950 | if (*next && (page->index != *next)) { | ||
1951 | /* Not next consecutive page */ | ||
1952 | unlock_page(page); | ||
1953 | break; | ||
1954 | } | ||
1955 | |||
1956 | if (wbc->sync_mode != WB_SYNC_NONE) | ||
1957 | wait_on_page_writeback(page); | ||
1958 | |||
1959 | if (PageWriteback(page) || | ||
1960 | !clear_page_dirty_for_io(page)) { | ||
1961 | unlock_page(page); | ||
1962 | break; | ||
1963 | } | ||
1964 | |||
1965 | /* | ||
1966 | * This actually clears the dirty bit in the radix tree. | ||
1967 | * See cifs_writepage() for more commentary. | ||
1968 | */ | ||
1969 | set_page_writeback(page); | ||
1970 | if (page_offset(page) >= i_size_read(mapping->host)) { | ||
1971 | *done = true; | ||
1972 | unlock_page(page); | ||
1973 | end_page_writeback(page); | ||
1974 | break; | ||
1975 | } | ||
1976 | |||
1977 | wdata->pages[i] = page; | ||
1978 | *next = page->index + 1; | ||
1979 | ++nr_pages; | ||
1980 | } | ||
1981 | |||
1982 | /* reset index to refind any pages skipped */ | ||
1983 | if (nr_pages == 0) | ||
1984 | *index = wdata->pages[0]->index + 1; | ||
1985 | |||
1986 | /* put any pages we aren't going to use */ | ||
1987 | for (i = nr_pages; i < found_pages; i++) { | ||
1988 | page_cache_release(wdata->pages[i]); | ||
1989 | wdata->pages[i] = NULL; | ||
1990 | } | ||
1991 | |||
1992 | return nr_pages; | ||
1993 | } | ||
1994 | |||
1995 | static int | ||
1996 | wdata_send_pages(struct cifs_writedata *wdata, unsigned int nr_pages, | ||
1997 | struct address_space *mapping, struct writeback_control *wbc) | ||
1998 | { | ||
1999 | int rc = 0; | ||
2000 | struct TCP_Server_Info *server; | ||
2001 | unsigned int i; | ||
2002 | |||
2003 | wdata->sync_mode = wbc->sync_mode; | ||
2004 | wdata->nr_pages = nr_pages; | ||
2005 | wdata->offset = page_offset(wdata->pages[0]); | ||
2006 | wdata->pagesz = PAGE_CACHE_SIZE; | ||
2007 | wdata->tailsz = min(i_size_read(mapping->host) - | ||
2008 | page_offset(wdata->pages[nr_pages - 1]), | ||
2009 | (loff_t)PAGE_CACHE_SIZE); | ||
2010 | wdata->bytes = ((nr_pages - 1) * PAGE_CACHE_SIZE) + wdata->tailsz; | ||
2011 | |||
2012 | if (wdata->cfile != NULL) | ||
2013 | cifsFileInfo_put(wdata->cfile); | ||
2014 | wdata->cfile = find_writable_file(CIFS_I(mapping->host), false); | ||
2015 | if (!wdata->cfile) { | ||
2016 | cifs_dbg(VFS, "No writable handles for inode\n"); | ||
2017 | rc = -EBADF; | ||
2018 | } else { | ||
2019 | wdata->pid = wdata->cfile->pid; | ||
2020 | server = tlink_tcon(wdata->cfile->tlink)->ses->server; | ||
2021 | rc = server->ops->async_writev(wdata, cifs_writedata_release); | ||
2022 | } | ||
2023 | |||
2024 | for (i = 0; i < nr_pages; ++i) | ||
2025 | unlock_page(wdata->pages[i]); | ||
2026 | |||
2027 | return rc; | ||
2028 | } | ||
2029 | |||
1881 | static int cifs_writepages(struct address_space *mapping, | 2030 | static int cifs_writepages(struct address_space *mapping, |
1882 | struct writeback_control *wbc) | 2031 | struct writeback_control *wbc) |
1883 | { | 2032 | { |
1884 | struct cifs_sb_info *cifs_sb = CIFS_SB(mapping->host->i_sb); | 2033 | struct cifs_sb_info *cifs_sb = CIFS_SB(mapping->host->i_sb); |
2034 | struct TCP_Server_Info *server; | ||
1885 | bool done = false, scanned = false, range_whole = false; | 2035 | bool done = false, scanned = false, range_whole = false; |
1886 | pgoff_t end, index; | 2036 | pgoff_t end, index; |
1887 | struct cifs_writedata *wdata; | 2037 | struct cifs_writedata *wdata; |
1888 | struct TCP_Server_Info *server; | ||
1889 | struct page *page; | ||
1890 | int rc = 0; | 2038 | int rc = 0; |
1891 | 2039 | ||
1892 | /* | 2040 | /* |
@@ -1906,152 +2054,50 @@ static int cifs_writepages(struct address_space *mapping, | |||
1906 | range_whole = true; | 2054 | range_whole = true; |
1907 | scanned = true; | 2055 | scanned = true; |
1908 | } | 2056 | } |
2057 | server = cifs_sb_master_tcon(cifs_sb)->ses->server; | ||
1909 | retry: | 2058 | retry: |
1910 | while (!done && index <= end) { | 2059 | while (!done && index <= end) { |
1911 | unsigned int i, nr_pages, found_pages; | 2060 | unsigned int i, nr_pages, found_pages, wsize, credits; |
1912 | pgoff_t next = 0, tofind; | 2061 | pgoff_t next = 0, tofind, saved_index = index; |
1913 | struct page **pages; | 2062 | |
2063 | rc = server->ops->wait_mtu_credits(server, cifs_sb->wsize, | ||
2064 | &wsize, &credits); | ||
2065 | if (rc) | ||
2066 | break; | ||
1914 | 2067 | ||
1915 | tofind = min((cifs_sb->wsize / PAGE_CACHE_SIZE) - 1, | 2068 | tofind = min((wsize / PAGE_CACHE_SIZE) - 1, end - index) + 1; |
1916 | end - index) + 1; | ||
1917 | 2069 | ||
1918 | wdata = cifs_writedata_alloc((unsigned int)tofind, | 2070 | wdata = wdata_alloc_and_fillpages(tofind, mapping, end, &index, |
1919 | cifs_writev_complete); | 2071 | &found_pages); |
1920 | if (!wdata) { | 2072 | if (!wdata) { |
1921 | rc = -ENOMEM; | 2073 | rc = -ENOMEM; |
2074 | add_credits_and_wake_if(server, credits, 0); | ||
1922 | break; | 2075 | break; |
1923 | } | 2076 | } |
1924 | 2077 | ||
1925 | /* | ||
1926 | * find_get_pages_tag seems to return a max of 256 on each | ||
1927 | * iteration, so we must call it several times in order to | ||
1928 | * fill the array or the wsize is effectively limited to | ||
1929 | * 256 * PAGE_CACHE_SIZE. | ||
1930 | */ | ||
1931 | found_pages = 0; | ||
1932 | pages = wdata->pages; | ||
1933 | do { | ||
1934 | nr_pages = find_get_pages_tag(mapping, &index, | ||
1935 | PAGECACHE_TAG_DIRTY, | ||
1936 | tofind, pages); | ||
1937 | found_pages += nr_pages; | ||
1938 | tofind -= nr_pages; | ||
1939 | pages += nr_pages; | ||
1940 | } while (nr_pages && tofind && index <= end); | ||
1941 | |||
1942 | if (found_pages == 0) { | 2078 | if (found_pages == 0) { |
1943 | kref_put(&wdata->refcount, cifs_writedata_release); | 2079 | kref_put(&wdata->refcount, cifs_writedata_release); |
2080 | add_credits_and_wake_if(server, credits, 0); | ||
1944 | break; | 2081 | break; |
1945 | } | 2082 | } |
1946 | 2083 | ||
1947 | nr_pages = 0; | 2084 | nr_pages = wdata_prepare_pages(wdata, found_pages, mapping, wbc, |
1948 | for (i = 0; i < found_pages; i++) { | 2085 | end, &index, &next, &done); |
1949 | page = wdata->pages[i]; | ||
1950 | /* | ||
1951 | * At this point we hold neither mapping->tree_lock nor | ||
1952 | * lock on the page itself: the page may be truncated or | ||
1953 | * invalidated (changing page->mapping to NULL), or even | ||
1954 | * swizzled back from swapper_space to tmpfs file | ||
1955 | * mapping | ||
1956 | */ | ||
1957 | |||
1958 | if (nr_pages == 0) | ||
1959 | lock_page(page); | ||
1960 | else if (!trylock_page(page)) | ||
1961 | break; | ||
1962 | |||
1963 | if (unlikely(page->mapping != mapping)) { | ||
1964 | unlock_page(page); | ||
1965 | break; | ||
1966 | } | ||
1967 | |||
1968 | if (!wbc->range_cyclic && page->index > end) { | ||
1969 | done = true; | ||
1970 | unlock_page(page); | ||
1971 | break; | ||
1972 | } | ||
1973 | |||
1974 | if (next && (page->index != next)) { | ||
1975 | /* Not next consecutive page */ | ||
1976 | unlock_page(page); | ||
1977 | break; | ||
1978 | } | ||
1979 | |||
1980 | if (wbc->sync_mode != WB_SYNC_NONE) | ||
1981 | wait_on_page_writeback(page); | ||
1982 | |||
1983 | if (PageWriteback(page) || | ||
1984 | !clear_page_dirty_for_io(page)) { | ||
1985 | unlock_page(page); | ||
1986 | break; | ||
1987 | } | ||
1988 | |||
1989 | /* | ||
1990 | * This actually clears the dirty bit in the radix tree. | ||
1991 | * See cifs_writepage() for more commentary. | ||
1992 | */ | ||
1993 | set_page_writeback(page); | ||
1994 | |||
1995 | if (page_offset(page) >= i_size_read(mapping->host)) { | ||
1996 | done = true; | ||
1997 | unlock_page(page); | ||
1998 | end_page_writeback(page); | ||
1999 | break; | ||
2000 | } | ||
2001 | |||
2002 | wdata->pages[i] = page; | ||
2003 | next = page->index + 1; | ||
2004 | ++nr_pages; | ||
2005 | } | ||
2006 | |||
2007 | /* reset index to refind any pages skipped */ | ||
2008 | if (nr_pages == 0) | ||
2009 | index = wdata->pages[0]->index + 1; | ||
2010 | |||
2011 | /* put any pages we aren't going to use */ | ||
2012 | for (i = nr_pages; i < found_pages; i++) { | ||
2013 | page_cache_release(wdata->pages[i]); | ||
2014 | wdata->pages[i] = NULL; | ||
2015 | } | ||
2016 | 2086 | ||
2017 | /* nothing to write? */ | 2087 | /* nothing to write? */ |
2018 | if (nr_pages == 0) { | 2088 | if (nr_pages == 0) { |
2019 | kref_put(&wdata->refcount, cifs_writedata_release); | 2089 | kref_put(&wdata->refcount, cifs_writedata_release); |
2090 | add_credits_and_wake_if(server, credits, 0); | ||
2020 | continue; | 2091 | continue; |
2021 | } | 2092 | } |
2022 | 2093 | ||
2023 | wdata->sync_mode = wbc->sync_mode; | 2094 | wdata->credits = credits; |
2024 | wdata->nr_pages = nr_pages; | ||
2025 | wdata->offset = page_offset(wdata->pages[0]); | ||
2026 | wdata->pagesz = PAGE_CACHE_SIZE; | ||
2027 | wdata->tailsz = | ||
2028 | min(i_size_read(mapping->host) - | ||
2029 | page_offset(wdata->pages[nr_pages - 1]), | ||
2030 | (loff_t)PAGE_CACHE_SIZE); | ||
2031 | wdata->bytes = ((nr_pages - 1) * PAGE_CACHE_SIZE) + | ||
2032 | wdata->tailsz; | ||
2033 | |||
2034 | do { | ||
2035 | if (wdata->cfile != NULL) | ||
2036 | cifsFileInfo_put(wdata->cfile); | ||
2037 | wdata->cfile = find_writable_file(CIFS_I(mapping->host), | ||
2038 | false); | ||
2039 | if (!wdata->cfile) { | ||
2040 | cifs_dbg(VFS, "No writable handles for inode\n"); | ||
2041 | rc = -EBADF; | ||
2042 | break; | ||
2043 | } | ||
2044 | wdata->pid = wdata->cfile->pid; | ||
2045 | server = tlink_tcon(wdata->cfile->tlink)->ses->server; | ||
2046 | rc = server->ops->async_writev(wdata, | ||
2047 | cifs_writedata_release); | ||
2048 | } while (wbc->sync_mode == WB_SYNC_ALL && rc == -EAGAIN); | ||
2049 | 2095 | ||
2050 | for (i = 0; i < nr_pages; ++i) | 2096 | rc = wdata_send_pages(wdata, nr_pages, mapping, wbc); |
2051 | unlock_page(wdata->pages[i]); | ||
2052 | 2097 | ||
2053 | /* send failure -- clean up the mess */ | 2098 | /* send failure -- clean up the mess */ |
2054 | if (rc != 0) { | 2099 | if (rc != 0) { |
2100 | add_credits_and_wake_if(server, wdata->credits, 0); | ||
2055 | for (i = 0; i < nr_pages; ++i) { | 2101 | for (i = 0; i < nr_pages; ++i) { |
2056 | if (rc == -EAGAIN) | 2102 | if (rc == -EAGAIN) |
2057 | redirty_page_for_writepage(wbc, | 2103 | redirty_page_for_writepage(wbc, |
@@ -2066,6 +2112,11 @@ retry: | |||
2066 | } | 2112 | } |
2067 | kref_put(&wdata->refcount, cifs_writedata_release); | 2113 | kref_put(&wdata->refcount, cifs_writedata_release); |
2068 | 2114 | ||
2115 | if (wbc->sync_mode == WB_SYNC_ALL && rc == -EAGAIN) { | ||
2116 | index = saved_index; | ||
2117 | continue; | ||
2118 | } | ||
2119 | |||
2069 | wbc->nr_to_write -= nr_pages; | 2120 | wbc->nr_to_write -= nr_pages; |
2070 | if (wbc->nr_to_write <= 0) | 2121 | if (wbc->nr_to_write <= 0) |
2071 | done = true; | 2122 | done = true; |
@@ -2362,123 +2413,109 @@ cifs_uncached_writev_complete(struct work_struct *work) | |||
2362 | kref_put(&wdata->refcount, cifs_uncached_writedata_release); | 2413 | kref_put(&wdata->refcount, cifs_uncached_writedata_release); |
2363 | } | 2414 | } |
2364 | 2415 | ||
2365 | /* attempt to send write to server, retry on any -EAGAIN errors */ | ||
2366 | static int | 2416 | static int |
2367 | cifs_uncached_retry_writev(struct cifs_writedata *wdata) | 2417 | wdata_fill_from_iovec(struct cifs_writedata *wdata, struct iov_iter *from, |
2418 | size_t *len, unsigned long *num_pages) | ||
2368 | { | 2419 | { |
2369 | int rc; | 2420 | size_t save_len, copied, bytes, cur_len = *len; |
2370 | struct TCP_Server_Info *server; | 2421 | unsigned long i, nr_pages = *num_pages; |
2371 | 2422 | ||
2372 | server = tlink_tcon(wdata->cfile->tlink)->ses->server; | 2423 | save_len = cur_len; |
2424 | for (i = 0; i < nr_pages; i++) { | ||
2425 | bytes = min_t(const size_t, cur_len, PAGE_SIZE); | ||
2426 | copied = copy_page_from_iter(wdata->pages[i], 0, bytes, from); | ||
2427 | cur_len -= copied; | ||
2428 | /* | ||
2429 | * If we didn't copy as much as we expected, then that | ||
2430 | * may mean we trod into an unmapped area. Stop copying | ||
2431 | * at that point. On the next pass through the big | ||
2432 | * loop, we'll likely end up getting a zero-length | ||
2433 | * write and bailing out of it. | ||
2434 | */ | ||
2435 | if (copied < bytes) | ||
2436 | break; | ||
2437 | } | ||
2438 | cur_len = save_len - cur_len; | ||
2439 | *len = cur_len; | ||
2373 | 2440 | ||
2374 | do { | 2441 | /* |
2375 | if (wdata->cfile->invalidHandle) { | 2442 | * If we have no data to send, then that probably means that |
2376 | rc = cifs_reopen_file(wdata->cfile, false); | 2443 | * the copy above failed altogether. That's most likely because |
2377 | if (rc != 0) | 2444 | * the address in the iovec was bogus. Return -EFAULT and let |
2378 | continue; | 2445 | * the caller free anything we allocated and bail out. |
2379 | } | 2446 | */ |
2380 | rc = server->ops->async_writev(wdata, | 2447 | if (!cur_len) |
2381 | cifs_uncached_writedata_release); | 2448 | return -EFAULT; |
2382 | } while (rc == -EAGAIN); | ||
2383 | 2449 | ||
2384 | return rc; | 2450 | /* |
2451 | * i + 1 now represents the number of pages we actually used in | ||
2452 | * the copy phase above. | ||
2453 | */ | ||
2454 | *num_pages = i + 1; | ||
2455 | return 0; | ||
2385 | } | 2456 | } |
2386 | 2457 | ||
2387 | static ssize_t | 2458 | static int |
2388 | cifs_iovec_write(struct file *file, struct iov_iter *from, loff_t *poffset) | 2459 | cifs_write_from_iter(loff_t offset, size_t len, struct iov_iter *from, |
2460 | struct cifsFileInfo *open_file, | ||
2461 | struct cifs_sb_info *cifs_sb, struct list_head *wdata_list) | ||
2389 | { | 2462 | { |
2390 | unsigned long nr_pages, i; | 2463 | int rc = 0; |
2391 | size_t bytes, copied, len, cur_len; | 2464 | size_t cur_len; |
2392 | ssize_t total_written = 0; | 2465 | unsigned long nr_pages, num_pages, i; |
2393 | loff_t offset; | 2466 | struct cifs_writedata *wdata; |
2394 | struct cifsFileInfo *open_file; | 2467 | struct iov_iter saved_from; |
2395 | struct cifs_tcon *tcon; | 2468 | loff_t saved_offset = offset; |
2396 | struct cifs_sb_info *cifs_sb; | ||
2397 | struct cifs_writedata *wdata, *tmp; | ||
2398 | struct list_head wdata_list; | ||
2399 | int rc; | ||
2400 | pid_t pid; | 2469 | pid_t pid; |
2401 | 2470 | struct TCP_Server_Info *server; | |
2402 | len = iov_iter_count(from); | ||
2403 | rc = generic_write_checks(file, poffset, &len, 0); | ||
2404 | if (rc) | ||
2405 | return rc; | ||
2406 | |||
2407 | if (!len) | ||
2408 | return 0; | ||
2409 | |||
2410 | iov_iter_truncate(from, len); | ||
2411 | |||
2412 | INIT_LIST_HEAD(&wdata_list); | ||
2413 | cifs_sb = CIFS_SB(file->f_path.dentry->d_sb); | ||
2414 | open_file = file->private_data; | ||
2415 | tcon = tlink_tcon(open_file->tlink); | ||
2416 | |||
2417 | if (!tcon->ses->server->ops->async_writev) | ||
2418 | return -ENOSYS; | ||
2419 | |||
2420 | offset = *poffset; | ||
2421 | 2471 | ||
2422 | if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD) | 2472 | if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD) |
2423 | pid = open_file->pid; | 2473 | pid = open_file->pid; |
2424 | else | 2474 | else |
2425 | pid = current->tgid; | 2475 | pid = current->tgid; |
2426 | 2476 | ||
2477 | server = tlink_tcon(open_file->tlink)->ses->server; | ||
2478 | memcpy(&saved_from, from, sizeof(struct iov_iter)); | ||
2479 | |||
2427 | do { | 2480 | do { |
2428 | size_t save_len; | 2481 | unsigned int wsize, credits; |
2482 | |||
2483 | rc = server->ops->wait_mtu_credits(server, cifs_sb->wsize, | ||
2484 | &wsize, &credits); | ||
2485 | if (rc) | ||
2486 | break; | ||
2429 | 2487 | ||
2430 | nr_pages = get_numpages(cifs_sb->wsize, len, &cur_len); | 2488 | nr_pages = get_numpages(wsize, len, &cur_len); |
2431 | wdata = cifs_writedata_alloc(nr_pages, | 2489 | wdata = cifs_writedata_alloc(nr_pages, |
2432 | cifs_uncached_writev_complete); | 2490 | cifs_uncached_writev_complete); |
2433 | if (!wdata) { | 2491 | if (!wdata) { |
2434 | rc = -ENOMEM; | 2492 | rc = -ENOMEM; |
2493 | add_credits_and_wake_if(server, credits, 0); | ||
2435 | break; | 2494 | break; |
2436 | } | 2495 | } |
2437 | 2496 | ||
2438 | rc = cifs_write_allocate_pages(wdata->pages, nr_pages); | 2497 | rc = cifs_write_allocate_pages(wdata->pages, nr_pages); |
2439 | if (rc) { | 2498 | if (rc) { |
2440 | kfree(wdata); | 2499 | kfree(wdata); |
2500 | add_credits_and_wake_if(server, credits, 0); | ||
2441 | break; | 2501 | break; |
2442 | } | 2502 | } |
2443 | 2503 | ||
2444 | save_len = cur_len; | 2504 | num_pages = nr_pages; |
2445 | for (i = 0; i < nr_pages; i++) { | 2505 | rc = wdata_fill_from_iovec(wdata, from, &cur_len, &num_pages); |
2446 | bytes = min_t(size_t, cur_len, PAGE_SIZE); | 2506 | if (rc) { |
2447 | copied = copy_page_from_iter(wdata->pages[i], 0, bytes, | ||
2448 | from); | ||
2449 | cur_len -= copied; | ||
2450 | /* | ||
2451 | * If we didn't copy as much as we expected, then that | ||
2452 | * may mean we trod into an unmapped area. Stop copying | ||
2453 | * at that point. On the next pass through the big | ||
2454 | * loop, we'll likely end up getting a zero-length | ||
2455 | * write and bailing out of it. | ||
2456 | */ | ||
2457 | if (copied < bytes) | ||
2458 | break; | ||
2459 | } | ||
2460 | cur_len = save_len - cur_len; | ||
2461 | |||
2462 | /* | ||
2463 | * If we have no data to send, then that probably means that | ||
2464 | * the copy above failed altogether. That's most likely because | ||
2465 | * the address in the iovec was bogus. Set the rc to -EFAULT, | ||
2466 | * free anything we allocated and bail out. | ||
2467 | */ | ||
2468 | if (!cur_len) { | ||
2469 | for (i = 0; i < nr_pages; i++) | 2507 | for (i = 0; i < nr_pages; i++) |
2470 | put_page(wdata->pages[i]); | 2508 | put_page(wdata->pages[i]); |
2471 | kfree(wdata); | 2509 | kfree(wdata); |
2472 | rc = -EFAULT; | 2510 | add_credits_and_wake_if(server, credits, 0); |
2473 | break; | 2511 | break; |
2474 | } | 2512 | } |
2475 | 2513 | ||
2476 | /* | 2514 | /* |
2477 | * i + 1 now represents the number of pages we actually used in | 2515 | * Bring nr_pages down to the number of pages we actually used, |
2478 | * the copy phase above. Bring nr_pages down to that, and free | 2516 | * and free any pages that we didn't use. |
2479 | * any pages that we didn't use. | ||
2480 | */ | 2517 | */ |
2481 | for ( ; nr_pages > i + 1; nr_pages--) | 2518 | for ( ; nr_pages > num_pages; nr_pages--) |
2482 | put_page(wdata->pages[nr_pages - 1]); | 2519 | put_page(wdata->pages[nr_pages - 1]); |
2483 | 2520 | ||
2484 | wdata->sync_mode = WB_SYNC_ALL; | 2521 | wdata->sync_mode = WB_SYNC_ALL; |
@@ -2489,18 +2526,69 @@ cifs_iovec_write(struct file *file, struct iov_iter *from, loff_t *poffset) | |||
2489 | wdata->bytes = cur_len; | 2526 | wdata->bytes = cur_len; |
2490 | wdata->pagesz = PAGE_SIZE; | 2527 | wdata->pagesz = PAGE_SIZE; |
2491 | wdata->tailsz = cur_len - ((nr_pages - 1) * PAGE_SIZE); | 2528 | wdata->tailsz = cur_len - ((nr_pages - 1) * PAGE_SIZE); |
2492 | rc = cifs_uncached_retry_writev(wdata); | 2529 | wdata->credits = credits; |
2530 | |||
2531 | if (!wdata->cfile->invalidHandle || | ||
2532 | !cifs_reopen_file(wdata->cfile, false)) | ||
2533 | rc = server->ops->async_writev(wdata, | ||
2534 | cifs_uncached_writedata_release); | ||
2493 | if (rc) { | 2535 | if (rc) { |
2536 | add_credits_and_wake_if(server, wdata->credits, 0); | ||
2494 | kref_put(&wdata->refcount, | 2537 | kref_put(&wdata->refcount, |
2495 | cifs_uncached_writedata_release); | 2538 | cifs_uncached_writedata_release); |
2539 | if (rc == -EAGAIN) { | ||
2540 | memcpy(from, &saved_from, | ||
2541 | sizeof(struct iov_iter)); | ||
2542 | iov_iter_advance(from, offset - saved_offset); | ||
2543 | continue; | ||
2544 | } | ||
2496 | break; | 2545 | break; |
2497 | } | 2546 | } |
2498 | 2547 | ||
2499 | list_add_tail(&wdata->list, &wdata_list); | 2548 | list_add_tail(&wdata->list, wdata_list); |
2500 | offset += cur_len; | 2549 | offset += cur_len; |
2501 | len -= cur_len; | 2550 | len -= cur_len; |
2502 | } while (len > 0); | 2551 | } while (len > 0); |
2503 | 2552 | ||
2553 | return rc; | ||
2554 | } | ||
2555 | |||
2556 | static ssize_t | ||
2557 | cifs_iovec_write(struct file *file, struct iov_iter *from, loff_t *poffset) | ||
2558 | { | ||
2559 | size_t len; | ||
2560 | ssize_t total_written = 0; | ||
2561 | struct cifsFileInfo *open_file; | ||
2562 | struct cifs_tcon *tcon; | ||
2563 | struct cifs_sb_info *cifs_sb; | ||
2564 | struct cifs_writedata *wdata, *tmp; | ||
2565 | struct list_head wdata_list; | ||
2566 | struct iov_iter saved_from; | ||
2567 | int rc; | ||
2568 | |||
2569 | len = iov_iter_count(from); | ||
2570 | rc = generic_write_checks(file, poffset, &len, 0); | ||
2571 | if (rc) | ||
2572 | return rc; | ||
2573 | |||
2574 | if (!len) | ||
2575 | return 0; | ||
2576 | |||
2577 | iov_iter_truncate(from, len); | ||
2578 | |||
2579 | INIT_LIST_HEAD(&wdata_list); | ||
2580 | cifs_sb = CIFS_SB(file->f_path.dentry->d_sb); | ||
2581 | open_file = file->private_data; | ||
2582 | tcon = tlink_tcon(open_file->tlink); | ||
2583 | |||
2584 | if (!tcon->ses->server->ops->async_writev) | ||
2585 | return -ENOSYS; | ||
2586 | |||
2587 | memcpy(&saved_from, from, sizeof(struct iov_iter)); | ||
2588 | |||
2589 | rc = cifs_write_from_iter(*poffset, len, from, open_file, cifs_sb, | ||
2590 | &wdata_list); | ||
2591 | |||
2504 | /* | 2592 | /* |
2505 | * If at least one write was successfully sent, then discard any rc | 2593 | * If at least one write was successfully sent, then discard any rc |
2506 | * value from the later writes. If the other write succeeds, then | 2594 | * value from the later writes. If the other write succeeds, then |
@@ -2529,7 +2617,25 @@ restart_loop: | |||
2529 | 2617 | ||
2530 | /* resend call if it's a retryable error */ | 2618 | /* resend call if it's a retryable error */ |
2531 | if (rc == -EAGAIN) { | 2619 | if (rc == -EAGAIN) { |
2532 | rc = cifs_uncached_retry_writev(wdata); | 2620 | struct list_head tmp_list; |
2621 | struct iov_iter tmp_from; | ||
2622 | |||
2623 | INIT_LIST_HEAD(&tmp_list); | ||
2624 | list_del_init(&wdata->list); | ||
2625 | |||
2626 | memcpy(&tmp_from, &saved_from, | ||
2627 | sizeof(struct iov_iter)); | ||
2628 | iov_iter_advance(&tmp_from, | ||
2629 | wdata->offset - *poffset); | ||
2630 | |||
2631 | rc = cifs_write_from_iter(wdata->offset, | ||
2632 | wdata->bytes, &tmp_from, | ||
2633 | open_file, cifs_sb, &tmp_list); | ||
2634 | |||
2635 | list_splice(&tmp_list, &wdata_list); | ||
2636 | |||
2637 | kref_put(&wdata->refcount, | ||
2638 | cifs_uncached_writedata_release); | ||
2533 | goto restart_loop; | 2639 | goto restart_loop; |
2534 | } | 2640 | } |
2535 | } | 2641 | } |
@@ -2722,26 +2828,6 @@ cifs_uncached_readdata_release(struct kref *refcount) | |||
2722 | cifs_readdata_release(refcount); | 2828 | cifs_readdata_release(refcount); |
2723 | } | 2829 | } |
2724 | 2830 | ||
2725 | static int | ||
2726 | cifs_retry_async_readv(struct cifs_readdata *rdata) | ||
2727 | { | ||
2728 | int rc; | ||
2729 | struct TCP_Server_Info *server; | ||
2730 | |||
2731 | server = tlink_tcon(rdata->cfile->tlink)->ses->server; | ||
2732 | |||
2733 | do { | ||
2734 | if (rdata->cfile->invalidHandle) { | ||
2735 | rc = cifs_reopen_file(rdata->cfile, true); | ||
2736 | if (rc != 0) | ||
2737 | continue; | ||
2738 | } | ||
2739 | rc = server->ops->async_readv(rdata); | ||
2740 | } while (rc == -EAGAIN); | ||
2741 | |||
2742 | return rc; | ||
2743 | } | ||
2744 | |||
2745 | /** | 2831 | /** |
2746 | * cifs_readdata_to_iov - copy data from pages in response to an iovec | 2832 | * cifs_readdata_to_iov - copy data from pages in response to an iovec |
2747 | * @rdata: the readdata response with list of pages holding data | 2833 | * @rdata: the readdata response with list of pages holding data |
@@ -2754,7 +2840,7 @@ cifs_retry_async_readv(struct cifs_readdata *rdata) | |||
2754 | static int | 2840 | static int |
2755 | cifs_readdata_to_iov(struct cifs_readdata *rdata, struct iov_iter *iter) | 2841 | cifs_readdata_to_iov(struct cifs_readdata *rdata, struct iov_iter *iter) |
2756 | { | 2842 | { |
2757 | size_t remaining = rdata->bytes; | 2843 | size_t remaining = rdata->got_bytes; |
2758 | unsigned int i; | 2844 | unsigned int i; |
2759 | 2845 | ||
2760 | for (i = 0; i < rdata->nr_pages; i++) { | 2846 | for (i = 0; i < rdata->nr_pages; i++) { |
@@ -2782,11 +2868,12 @@ static int | |||
2782 | cifs_uncached_read_into_pages(struct TCP_Server_Info *server, | 2868 | cifs_uncached_read_into_pages(struct TCP_Server_Info *server, |
2783 | struct cifs_readdata *rdata, unsigned int len) | 2869 | struct cifs_readdata *rdata, unsigned int len) |
2784 | { | 2870 | { |
2785 | int total_read = 0, result = 0; | 2871 | int result = 0; |
2786 | unsigned int i; | 2872 | unsigned int i; |
2787 | unsigned int nr_pages = rdata->nr_pages; | 2873 | unsigned int nr_pages = rdata->nr_pages; |
2788 | struct kvec iov; | 2874 | struct kvec iov; |
2789 | 2875 | ||
2876 | rdata->got_bytes = 0; | ||
2790 | rdata->tailsz = PAGE_SIZE; | 2877 | rdata->tailsz = PAGE_SIZE; |
2791 | for (i = 0; i < nr_pages; i++) { | 2878 | for (i = 0; i < nr_pages; i++) { |
2792 | struct page *page = rdata->pages[i]; | 2879 | struct page *page = rdata->pages[i]; |
@@ -2820,55 +2907,45 @@ cifs_uncached_read_into_pages(struct TCP_Server_Info *server, | |||
2820 | if (result < 0) | 2907 | if (result < 0) |
2821 | break; | 2908 | break; |
2822 | 2909 | ||
2823 | total_read += result; | 2910 | rdata->got_bytes += result; |
2824 | } | 2911 | } |
2825 | 2912 | ||
2826 | return total_read > 0 ? total_read : result; | 2913 | return rdata->got_bytes > 0 && result != -ECONNABORTED ? |
2914 | rdata->got_bytes : result; | ||
2827 | } | 2915 | } |
2828 | 2916 | ||
2829 | ssize_t cifs_user_readv(struct kiocb *iocb, struct iov_iter *to) | 2917 | static int |
2918 | cifs_send_async_read(loff_t offset, size_t len, struct cifsFileInfo *open_file, | ||
2919 | struct cifs_sb_info *cifs_sb, struct list_head *rdata_list) | ||
2830 | { | 2920 | { |
2831 | struct file *file = iocb->ki_filp; | 2921 | struct cifs_readdata *rdata; |
2832 | ssize_t rc; | 2922 | unsigned int npages, rsize, credits; |
2833 | size_t len, cur_len; | 2923 | size_t cur_len; |
2834 | ssize_t total_read = 0; | 2924 | int rc; |
2835 | loff_t offset = iocb->ki_pos; | ||
2836 | unsigned int npages; | ||
2837 | struct cifs_sb_info *cifs_sb; | ||
2838 | struct cifs_tcon *tcon; | ||
2839 | struct cifsFileInfo *open_file; | ||
2840 | struct cifs_readdata *rdata, *tmp; | ||
2841 | struct list_head rdata_list; | ||
2842 | pid_t pid; | 2925 | pid_t pid; |
2926 | struct TCP_Server_Info *server; | ||
2843 | 2927 | ||
2844 | len = iov_iter_count(to); | 2928 | server = tlink_tcon(open_file->tlink)->ses->server; |
2845 | if (!len) | ||
2846 | return 0; | ||
2847 | |||
2848 | INIT_LIST_HEAD(&rdata_list); | ||
2849 | cifs_sb = CIFS_SB(file->f_path.dentry->d_sb); | ||
2850 | open_file = file->private_data; | ||
2851 | tcon = tlink_tcon(open_file->tlink); | ||
2852 | |||
2853 | if (!tcon->ses->server->ops->async_readv) | ||
2854 | return -ENOSYS; | ||
2855 | 2929 | ||
2856 | if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD) | 2930 | if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD) |
2857 | pid = open_file->pid; | 2931 | pid = open_file->pid; |
2858 | else | 2932 | else |
2859 | pid = current->tgid; | 2933 | pid = current->tgid; |
2860 | 2934 | ||
2861 | if ((file->f_flags & O_ACCMODE) == O_WRONLY) | ||
2862 | cifs_dbg(FYI, "attempting read on write only file instance\n"); | ||
2863 | |||
2864 | do { | 2935 | do { |
2865 | cur_len = min_t(const size_t, len - total_read, cifs_sb->rsize); | 2936 | rc = server->ops->wait_mtu_credits(server, cifs_sb->rsize, |
2937 | &rsize, &credits); | ||
2938 | if (rc) | ||
2939 | break; | ||
2940 | |||
2941 | cur_len = min_t(const size_t, len, rsize); | ||
2866 | npages = DIV_ROUND_UP(cur_len, PAGE_SIZE); | 2942 | npages = DIV_ROUND_UP(cur_len, PAGE_SIZE); |
2867 | 2943 | ||
2868 | /* allocate a readdata struct */ | 2944 | /* allocate a readdata struct */ |
2869 | rdata = cifs_readdata_alloc(npages, | 2945 | rdata = cifs_readdata_alloc(npages, |
2870 | cifs_uncached_readv_complete); | 2946 | cifs_uncached_readv_complete); |
2871 | if (!rdata) { | 2947 | if (!rdata) { |
2948 | add_credits_and_wake_if(server, credits, 0); | ||
2872 | rc = -ENOMEM; | 2949 | rc = -ENOMEM; |
2873 | break; | 2950 | break; |
2874 | } | 2951 | } |
@@ -2884,44 +2961,113 @@ ssize_t cifs_user_readv(struct kiocb *iocb, struct iov_iter *to) | |||
2884 | rdata->pid = pid; | 2961 | rdata->pid = pid; |
2885 | rdata->pagesz = PAGE_SIZE; | 2962 | rdata->pagesz = PAGE_SIZE; |
2886 | rdata->read_into_pages = cifs_uncached_read_into_pages; | 2963 | rdata->read_into_pages = cifs_uncached_read_into_pages; |
2964 | rdata->credits = credits; | ||
2887 | 2965 | ||
2888 | rc = cifs_retry_async_readv(rdata); | 2966 | if (!rdata->cfile->invalidHandle || |
2967 | !cifs_reopen_file(rdata->cfile, true)) | ||
2968 | rc = server->ops->async_readv(rdata); | ||
2889 | error: | 2969 | error: |
2890 | if (rc) { | 2970 | if (rc) { |
2971 | add_credits_and_wake_if(server, rdata->credits, 0); | ||
2891 | kref_put(&rdata->refcount, | 2972 | kref_put(&rdata->refcount, |
2892 | cifs_uncached_readdata_release); | 2973 | cifs_uncached_readdata_release); |
2974 | if (rc == -EAGAIN) | ||
2975 | continue; | ||
2893 | break; | 2976 | break; |
2894 | } | 2977 | } |
2895 | 2978 | ||
2896 | list_add_tail(&rdata->list, &rdata_list); | 2979 | list_add_tail(&rdata->list, rdata_list); |
2897 | offset += cur_len; | 2980 | offset += cur_len; |
2898 | len -= cur_len; | 2981 | len -= cur_len; |
2899 | } while (len > 0); | 2982 | } while (len > 0); |
2900 | 2983 | ||
2984 | return rc; | ||
2985 | } | ||
2986 | |||
2987 | ssize_t cifs_user_readv(struct kiocb *iocb, struct iov_iter *to) | ||
2988 | { | ||
2989 | struct file *file = iocb->ki_filp; | ||
2990 | ssize_t rc; | ||
2991 | size_t len; | ||
2992 | ssize_t total_read = 0; | ||
2993 | loff_t offset = iocb->ki_pos; | ||
2994 | struct cifs_sb_info *cifs_sb; | ||
2995 | struct cifs_tcon *tcon; | ||
2996 | struct cifsFileInfo *open_file; | ||
2997 | struct cifs_readdata *rdata, *tmp; | ||
2998 | struct list_head rdata_list; | ||
2999 | |||
3000 | len = iov_iter_count(to); | ||
3001 | if (!len) | ||
3002 | return 0; | ||
3003 | |||
3004 | INIT_LIST_HEAD(&rdata_list); | ||
3005 | cifs_sb = CIFS_SB(file->f_path.dentry->d_sb); | ||
3006 | open_file = file->private_data; | ||
3007 | tcon = tlink_tcon(open_file->tlink); | ||
3008 | |||
3009 | if (!tcon->ses->server->ops->async_readv) | ||
3010 | return -ENOSYS; | ||
3011 | |||
3012 | if ((file->f_flags & O_ACCMODE) == O_WRONLY) | ||
3013 | cifs_dbg(FYI, "attempting read on write only file instance\n"); | ||
3014 | |||
3015 | rc = cifs_send_async_read(offset, len, open_file, cifs_sb, &rdata_list); | ||
3016 | |||
2901 | /* if at least one read request send succeeded, then reset rc */ | 3017 | /* if at least one read request send succeeded, then reset rc */ |
2902 | if (!list_empty(&rdata_list)) | 3018 | if (!list_empty(&rdata_list)) |
2903 | rc = 0; | 3019 | rc = 0; |
2904 | 3020 | ||
2905 | len = iov_iter_count(to); | 3021 | len = iov_iter_count(to); |
2906 | /* the loop below should proceed in the order of increasing offsets */ | 3022 | /* the loop below should proceed in the order of increasing offsets */ |
3023 | again: | ||
2907 | list_for_each_entry_safe(rdata, tmp, &rdata_list, list) { | 3024 | list_for_each_entry_safe(rdata, tmp, &rdata_list, list) { |
2908 | again: | ||
2909 | if (!rc) { | 3025 | if (!rc) { |
2910 | /* FIXME: freezable sleep too? */ | 3026 | /* FIXME: freezable sleep too? */ |
2911 | rc = wait_for_completion_killable(&rdata->done); | 3027 | rc = wait_for_completion_killable(&rdata->done); |
2912 | if (rc) | 3028 | if (rc) |
2913 | rc = -EINTR; | 3029 | rc = -EINTR; |
2914 | else if (rdata->result) { | 3030 | else if (rdata->result == -EAGAIN) { |
2915 | rc = rdata->result; | ||
2916 | /* resend call if it's a retryable error */ | 3031 | /* resend call if it's a retryable error */ |
2917 | if (rc == -EAGAIN) { | 3032 | struct list_head tmp_list; |
2918 | rc = cifs_retry_async_readv(rdata); | 3033 | unsigned int got_bytes = rdata->got_bytes; |
2919 | goto again; | 3034 | |
3035 | list_del_init(&rdata->list); | ||
3036 | INIT_LIST_HEAD(&tmp_list); | ||
3037 | |||
3038 | /* | ||
3039 | * Got a part of data and then reconnect has | ||
3040 | * happened -- fill the buffer and continue | ||
3041 | * reading. | ||
3042 | */ | ||
3043 | if (got_bytes && got_bytes < rdata->bytes) { | ||
3044 | rc = cifs_readdata_to_iov(rdata, to); | ||
3045 | if (rc) { | ||
3046 | kref_put(&rdata->refcount, | ||
3047 | cifs_uncached_readdata_release); | ||
3048 | continue; | ||
3049 | } | ||
2920 | } | 3050 | } |
2921 | } else { | 3051 | |
3052 | rc = cifs_send_async_read( | ||
3053 | rdata->offset + got_bytes, | ||
3054 | rdata->bytes - got_bytes, | ||
3055 | rdata->cfile, cifs_sb, | ||
3056 | &tmp_list); | ||
3057 | |||
3058 | list_splice(&tmp_list, &rdata_list); | ||
3059 | |||
3060 | kref_put(&rdata->refcount, | ||
3061 | cifs_uncached_readdata_release); | ||
3062 | goto again; | ||
3063 | } else if (rdata->result) | ||
3064 | rc = rdata->result; | ||
3065 | else | ||
2922 | rc = cifs_readdata_to_iov(rdata, to); | 3066 | rc = cifs_readdata_to_iov(rdata, to); |
2923 | } | ||
2924 | 3067 | ||
3068 | /* if there was a short read -- discard anything left */ | ||
3069 | if (rdata->got_bytes && rdata->got_bytes < rdata->bytes) | ||
3070 | rc = -ENODATA; | ||
2925 | } | 3071 | } |
2926 | list_del_init(&rdata->list); | 3072 | list_del_init(&rdata->list); |
2927 | kref_put(&rdata->refcount, cifs_uncached_readdata_release); | 3073 | kref_put(&rdata->refcount, cifs_uncached_readdata_release); |
@@ -3030,18 +3176,19 @@ cifs_read(struct file *file, char *read_data, size_t read_size, loff_t *offset) | |||
3030 | 3176 | ||
3031 | for (total_read = 0, cur_offset = read_data; read_size > total_read; | 3177 | for (total_read = 0, cur_offset = read_data; read_size > total_read; |
3032 | total_read += bytes_read, cur_offset += bytes_read) { | 3178 | total_read += bytes_read, cur_offset += bytes_read) { |
3033 | current_read_size = min_t(uint, read_size - total_read, rsize); | 3179 | do { |
3034 | /* | 3180 | current_read_size = min_t(uint, read_size - total_read, |
3035 | * For windows me and 9x we do not want to request more than it | 3181 | rsize); |
3036 | * negotiated since it will refuse the read then. | 3182 | /* |
3037 | */ | 3183 | * For windows me and 9x we do not want to request more |
3038 | if ((tcon->ses) && !(tcon->ses->capabilities & | 3184 | * than it negotiated since it will refuse the read |
3185 | * then. | ||
3186 | */ | ||
3187 | if ((tcon->ses) && !(tcon->ses->capabilities & | ||
3039 | tcon->ses->server->vals->cap_large_files)) { | 3188 | tcon->ses->server->vals->cap_large_files)) { |
3040 | current_read_size = min_t(uint, current_read_size, | 3189 | current_read_size = min_t(uint, |
3041 | CIFSMaxBufSize); | 3190 | current_read_size, CIFSMaxBufSize); |
3042 | } | 3191 | } |
3043 | rc = -EAGAIN; | ||
3044 | while (rc == -EAGAIN) { | ||
3045 | if (open_file->invalidHandle) { | 3192 | if (open_file->invalidHandle) { |
3046 | rc = cifs_reopen_file(open_file, true); | 3193 | rc = cifs_reopen_file(open_file, true); |
3047 | if (rc != 0) | 3194 | if (rc != 0) |
@@ -3054,7 +3201,8 @@ cifs_read(struct file *file, char *read_data, size_t read_size, loff_t *offset) | |||
3054 | rc = server->ops->sync_read(xid, open_file, &io_parms, | 3201 | rc = server->ops->sync_read(xid, open_file, &io_parms, |
3055 | &bytes_read, &cur_offset, | 3202 | &bytes_read, &cur_offset, |
3056 | &buf_type); | 3203 | &buf_type); |
3057 | } | 3204 | } while (rc == -EAGAIN); |
3205 | |||
3058 | if (rc || (bytes_read == 0)) { | 3206 | if (rc || (bytes_read == 0)) { |
3059 | if (total_read) { | 3207 | if (total_read) { |
3060 | break; | 3208 | break; |
@@ -3133,25 +3281,30 @@ int cifs_file_mmap(struct file *file, struct vm_area_struct *vma) | |||
3133 | static void | 3281 | static void |
3134 | cifs_readv_complete(struct work_struct *work) | 3282 | cifs_readv_complete(struct work_struct *work) |
3135 | { | 3283 | { |
3136 | unsigned int i; | 3284 | unsigned int i, got_bytes; |
3137 | struct cifs_readdata *rdata = container_of(work, | 3285 | struct cifs_readdata *rdata = container_of(work, |
3138 | struct cifs_readdata, work); | 3286 | struct cifs_readdata, work); |
3139 | 3287 | ||
3288 | got_bytes = rdata->got_bytes; | ||
3140 | for (i = 0; i < rdata->nr_pages; i++) { | 3289 | for (i = 0; i < rdata->nr_pages; i++) { |
3141 | struct page *page = rdata->pages[i]; | 3290 | struct page *page = rdata->pages[i]; |
3142 | 3291 | ||
3143 | lru_cache_add_file(page); | 3292 | lru_cache_add_file(page); |
3144 | 3293 | ||
3145 | if (rdata->result == 0) { | 3294 | if (rdata->result == 0 || |
3295 | (rdata->result == -EAGAIN && got_bytes)) { | ||
3146 | flush_dcache_page(page); | 3296 | flush_dcache_page(page); |
3147 | SetPageUptodate(page); | 3297 | SetPageUptodate(page); |
3148 | } | 3298 | } |
3149 | 3299 | ||
3150 | unlock_page(page); | 3300 | unlock_page(page); |
3151 | 3301 | ||
3152 | if (rdata->result == 0) | 3302 | if (rdata->result == 0 || |
3303 | (rdata->result == -EAGAIN && got_bytes)) | ||
3153 | cifs_readpage_to_fscache(rdata->mapping->host, page); | 3304 | cifs_readpage_to_fscache(rdata->mapping->host, page); |
3154 | 3305 | ||
3306 | got_bytes -= min_t(unsigned int, PAGE_CACHE_SIZE, got_bytes); | ||
3307 | |||
3155 | page_cache_release(page); | 3308 | page_cache_release(page); |
3156 | rdata->pages[i] = NULL; | 3309 | rdata->pages[i] = NULL; |
3157 | } | 3310 | } |
@@ -3162,7 +3315,7 @@ static int | |||
3162 | cifs_readpages_read_into_pages(struct TCP_Server_Info *server, | 3315 | cifs_readpages_read_into_pages(struct TCP_Server_Info *server, |
3163 | struct cifs_readdata *rdata, unsigned int len) | 3316 | struct cifs_readdata *rdata, unsigned int len) |
3164 | { | 3317 | { |
3165 | int total_read = 0, result = 0; | 3318 | int result = 0; |
3166 | unsigned int i; | 3319 | unsigned int i; |
3167 | u64 eof; | 3320 | u64 eof; |
3168 | pgoff_t eof_index; | 3321 | pgoff_t eof_index; |
@@ -3174,6 +3327,7 @@ cifs_readpages_read_into_pages(struct TCP_Server_Info *server, | |||
3174 | eof_index = eof ? (eof - 1) >> PAGE_CACHE_SHIFT : 0; | 3327 | eof_index = eof ? (eof - 1) >> PAGE_CACHE_SHIFT : 0; |
3175 | cifs_dbg(FYI, "eof=%llu eof_index=%lu\n", eof, eof_index); | 3328 | cifs_dbg(FYI, "eof=%llu eof_index=%lu\n", eof, eof_index); |
3176 | 3329 | ||
3330 | rdata->got_bytes = 0; | ||
3177 | rdata->tailsz = PAGE_CACHE_SIZE; | 3331 | rdata->tailsz = PAGE_CACHE_SIZE; |
3178 | for (i = 0; i < nr_pages; i++) { | 3332 | for (i = 0; i < nr_pages; i++) { |
3179 | struct page *page = rdata->pages[i]; | 3333 | struct page *page = rdata->pages[i]; |
@@ -3228,10 +3382,70 @@ cifs_readpages_read_into_pages(struct TCP_Server_Info *server, | |||
3228 | if (result < 0) | 3382 | if (result < 0) |
3229 | break; | 3383 | break; |
3230 | 3384 | ||
3231 | total_read += result; | 3385 | rdata->got_bytes += result; |
3232 | } | 3386 | } |
3233 | 3387 | ||
3234 | return total_read > 0 ? total_read : result; | 3388 | return rdata->got_bytes > 0 && result != -ECONNABORTED ? |
3389 | rdata->got_bytes : result; | ||
3390 | } | ||
3391 | |||
3392 | static int | ||
3393 | readpages_get_pages(struct address_space *mapping, struct list_head *page_list, | ||
3394 | unsigned int rsize, struct list_head *tmplist, | ||
3395 | unsigned int *nr_pages, loff_t *offset, unsigned int *bytes) | ||
3396 | { | ||
3397 | struct page *page, *tpage; | ||
3398 | unsigned int expected_index; | ||
3399 | int rc; | ||
3400 | |||
3401 | INIT_LIST_HEAD(tmplist); | ||
3402 | |||
3403 | page = list_entry(page_list->prev, struct page, lru); | ||
3404 | |||
3405 | /* | ||
3406 | * Lock the page and put it in the cache. Since no one else | ||
3407 | * should have access to this page, we're safe to simply set | ||
3408 | * PG_locked without checking it first. | ||
3409 | */ | ||
3410 | __set_page_locked(page); | ||
3411 | rc = add_to_page_cache_locked(page, mapping, | ||
3412 | page->index, GFP_KERNEL); | ||
3413 | |||
3414 | /* give up if we can't stick it in the cache */ | ||
3415 | if (rc) { | ||
3416 | __clear_page_locked(page); | ||
3417 | return rc; | ||
3418 | } | ||
3419 | |||
3420 | /* move first page to the tmplist */ | ||
3421 | *offset = (loff_t)page->index << PAGE_CACHE_SHIFT; | ||
3422 | *bytes = PAGE_CACHE_SIZE; | ||
3423 | *nr_pages = 1; | ||
3424 | list_move_tail(&page->lru, tmplist); | ||
3425 | |||
3426 | /* now try and add more pages onto the request */ | ||
3427 | expected_index = page->index + 1; | ||
3428 | list_for_each_entry_safe_reverse(page, tpage, page_list, lru) { | ||
3429 | /* discontinuity ? */ | ||
3430 | if (page->index != expected_index) | ||
3431 | break; | ||
3432 | |||
3433 | /* would this page push the read over the rsize? */ | ||
3434 | if (*bytes + PAGE_CACHE_SIZE > rsize) | ||
3435 | break; | ||
3436 | |||
3437 | __set_page_locked(page); | ||
3438 | if (add_to_page_cache_locked(page, mapping, page->index, | ||
3439 | GFP_KERNEL)) { | ||
3440 | __clear_page_locked(page); | ||
3441 | break; | ||
3442 | } | ||
3443 | list_move_tail(&page->lru, tmplist); | ||
3444 | (*bytes) += PAGE_CACHE_SIZE; | ||
3445 | expected_index++; | ||
3446 | (*nr_pages)++; | ||
3447 | } | ||
3448 | return rc; | ||
3235 | } | 3449 | } |
3236 | 3450 | ||
3237 | static int cifs_readpages(struct file *file, struct address_space *mapping, | 3451 | static int cifs_readpages(struct file *file, struct address_space *mapping, |
@@ -3241,19 +3455,10 @@ static int cifs_readpages(struct file *file, struct address_space *mapping, | |||
3241 | struct list_head tmplist; | 3455 | struct list_head tmplist; |
3242 | struct cifsFileInfo *open_file = file->private_data; | 3456 | struct cifsFileInfo *open_file = file->private_data; |
3243 | struct cifs_sb_info *cifs_sb = CIFS_SB(file->f_path.dentry->d_sb); | 3457 | struct cifs_sb_info *cifs_sb = CIFS_SB(file->f_path.dentry->d_sb); |
3244 | unsigned int rsize = cifs_sb->rsize; | 3458 | struct TCP_Server_Info *server; |
3245 | pid_t pid; | 3459 | pid_t pid; |
3246 | 3460 | ||
3247 | /* | 3461 | /* |
3248 | * Give up immediately if rsize is too small to read an entire page. | ||
3249 | * The VFS will fall back to readpage. We should never reach this | ||
3250 | * point however since we set ra_pages to 0 when the rsize is smaller | ||
3251 | * than a cache page. | ||
3252 | */ | ||
3253 | if (unlikely(rsize < PAGE_CACHE_SIZE)) | ||
3254 | return 0; | ||
3255 | |||
3256 | /* | ||
3257 | * Reads as many pages as possible from fscache. Returns -ENOBUFS | 3462 | * Reads as many pages as possible from fscache. Returns -ENOBUFS |
3258 | * immediately if the cookie is negative | 3463 | * immediately if the cookie is negative |
3259 | * | 3464 | * |
@@ -3271,7 +3476,7 @@ static int cifs_readpages(struct file *file, struct address_space *mapping, | |||
3271 | pid = current->tgid; | 3476 | pid = current->tgid; |
3272 | 3477 | ||
3273 | rc = 0; | 3478 | rc = 0; |
3274 | INIT_LIST_HEAD(&tmplist); | 3479 | server = tlink_tcon(open_file->tlink)->ses->server; |
3275 | 3480 | ||
3276 | cifs_dbg(FYI, "%s: file=%p mapping=%p num_pages=%u\n", | 3481 | cifs_dbg(FYI, "%s: file=%p mapping=%p num_pages=%u\n", |
3277 | __func__, file, mapping, num_pages); | 3482 | __func__, file, mapping, num_pages); |
@@ -3288,58 +3493,35 @@ static int cifs_readpages(struct file *file, struct address_space *mapping, | |||
3288 | * the rdata->pages, then we want them in increasing order. | 3493 | * the rdata->pages, then we want them in increasing order. |
3289 | */ | 3494 | */ |
3290 | while (!list_empty(page_list)) { | 3495 | while (!list_empty(page_list)) { |
3291 | unsigned int i; | 3496 | unsigned int i, nr_pages, bytes, rsize; |
3292 | unsigned int bytes = PAGE_CACHE_SIZE; | ||
3293 | unsigned int expected_index; | ||
3294 | unsigned int nr_pages = 1; | ||
3295 | loff_t offset; | 3497 | loff_t offset; |
3296 | struct page *page, *tpage; | 3498 | struct page *page, *tpage; |
3297 | struct cifs_readdata *rdata; | 3499 | struct cifs_readdata *rdata; |
3500 | unsigned credits; | ||
3298 | 3501 | ||
3299 | page = list_entry(page_list->prev, struct page, lru); | 3502 | rc = server->ops->wait_mtu_credits(server, cifs_sb->rsize, |
3503 | &rsize, &credits); | ||
3504 | if (rc) | ||
3505 | break; | ||
3300 | 3506 | ||
3301 | /* | 3507 | /* |
3302 | * Lock the page and put it in the cache. Since no one else | 3508 | * Give up immediately if rsize is too small to read an entire |
3303 | * should have access to this page, we're safe to simply set | 3509 | * page. The VFS will fall back to readpage. We should never |
3304 | * PG_locked without checking it first. | 3510 | * reach this point however since we set ra_pages to 0 when the |
3511 | * rsize is smaller than a cache page. | ||
3305 | */ | 3512 | */ |
3306 | __set_page_locked(page); | 3513 | if (unlikely(rsize < PAGE_CACHE_SIZE)) { |
3307 | rc = add_to_page_cache_locked(page, mapping, | 3514 | add_credits_and_wake_if(server, credits, 0); |
3308 | page->index, GFP_KERNEL); | 3515 | return 0; |
3516 | } | ||
3309 | 3517 | ||
3310 | /* give up if we can't stick it in the cache */ | 3518 | rc = readpages_get_pages(mapping, page_list, rsize, &tmplist, |
3519 | &nr_pages, &offset, &bytes); | ||
3311 | if (rc) { | 3520 | if (rc) { |
3312 | __clear_page_locked(page); | 3521 | add_credits_and_wake_if(server, credits, 0); |
3313 | break; | 3522 | break; |
3314 | } | 3523 | } |
3315 | 3524 | ||
3316 | /* move first page to the tmplist */ | ||
3317 | offset = (loff_t)page->index << PAGE_CACHE_SHIFT; | ||
3318 | list_move_tail(&page->lru, &tmplist); | ||
3319 | |||
3320 | /* now try and add more pages onto the request */ | ||
3321 | expected_index = page->index + 1; | ||
3322 | list_for_each_entry_safe_reverse(page, tpage, page_list, lru) { | ||
3323 | /* discontinuity ? */ | ||
3324 | if (page->index != expected_index) | ||
3325 | break; | ||
3326 | |||
3327 | /* would this page push the read over the rsize? */ | ||
3328 | if (bytes + PAGE_CACHE_SIZE > rsize) | ||
3329 | break; | ||
3330 | |||
3331 | __set_page_locked(page); | ||
3332 | if (add_to_page_cache_locked(page, mapping, | ||
3333 | page->index, GFP_KERNEL)) { | ||
3334 | __clear_page_locked(page); | ||
3335 | break; | ||
3336 | } | ||
3337 | list_move_tail(&page->lru, &tmplist); | ||
3338 | bytes += PAGE_CACHE_SIZE; | ||
3339 | expected_index++; | ||
3340 | nr_pages++; | ||
3341 | } | ||
3342 | |||
3343 | rdata = cifs_readdata_alloc(nr_pages, cifs_readv_complete); | 3525 | rdata = cifs_readdata_alloc(nr_pages, cifs_readv_complete); |
3344 | if (!rdata) { | 3526 | if (!rdata) { |
3345 | /* best to give up if we're out of mem */ | 3527 | /* best to give up if we're out of mem */ |
@@ -3350,6 +3532,7 @@ static int cifs_readpages(struct file *file, struct address_space *mapping, | |||
3350 | page_cache_release(page); | 3532 | page_cache_release(page); |
3351 | } | 3533 | } |
3352 | rc = -ENOMEM; | 3534 | rc = -ENOMEM; |
3535 | add_credits_and_wake_if(server, credits, 0); | ||
3353 | break; | 3536 | break; |
3354 | } | 3537 | } |
3355 | 3538 | ||
@@ -3360,21 +3543,32 @@ static int cifs_readpages(struct file *file, struct address_space *mapping, | |||
3360 | rdata->pid = pid; | 3543 | rdata->pid = pid; |
3361 | rdata->pagesz = PAGE_CACHE_SIZE; | 3544 | rdata->pagesz = PAGE_CACHE_SIZE; |
3362 | rdata->read_into_pages = cifs_readpages_read_into_pages; | 3545 | rdata->read_into_pages = cifs_readpages_read_into_pages; |
3546 | rdata->credits = credits; | ||
3363 | 3547 | ||
3364 | list_for_each_entry_safe(page, tpage, &tmplist, lru) { | 3548 | list_for_each_entry_safe(page, tpage, &tmplist, lru) { |
3365 | list_del(&page->lru); | 3549 | list_del(&page->lru); |
3366 | rdata->pages[rdata->nr_pages++] = page; | 3550 | rdata->pages[rdata->nr_pages++] = page; |
3367 | } | 3551 | } |
3368 | 3552 | ||
3369 | rc = cifs_retry_async_readv(rdata); | 3553 | if (!rdata->cfile->invalidHandle || |
3370 | if (rc != 0) { | 3554 | !cifs_reopen_file(rdata->cfile, true)) |
3555 | rc = server->ops->async_readv(rdata); | ||
3556 | if (rc) { | ||
3557 | add_credits_and_wake_if(server, rdata->credits, 0); | ||
3371 | for (i = 0; i < rdata->nr_pages; i++) { | 3558 | for (i = 0; i < rdata->nr_pages; i++) { |
3372 | page = rdata->pages[i]; | 3559 | page = rdata->pages[i]; |
3373 | lru_cache_add_file(page); | 3560 | lru_cache_add_file(page); |
3374 | unlock_page(page); | 3561 | unlock_page(page); |
3375 | page_cache_release(page); | 3562 | page_cache_release(page); |
3563 | if (rc == -EAGAIN) | ||
3564 | list_add_tail(&page->lru, &tmplist); | ||
3376 | } | 3565 | } |
3377 | kref_put(&rdata->refcount, cifs_readdata_release); | 3566 | kref_put(&rdata->refcount, cifs_readdata_release); |
3567 | if (rc == -EAGAIN) { | ||
3568 | /* Re-add pages to the page_list and retry */ | ||
3569 | list_splice(&tmplist, page_list); | ||
3570 | continue; | ||
3571 | } | ||
3378 | break; | 3572 | break; |
3379 | } | 3573 | } |
3380 | 3574 | ||
diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c index 6bf55d0ed494..81340c6253eb 100644 --- a/fs/cifs/misc.c +++ b/fs/cifs/misc.c | |||
@@ -226,6 +226,15 @@ cifs_small_buf_release(void *buf_to_free) | |||
226 | return; | 226 | return; |
227 | } | 227 | } |
228 | 228 | ||
229 | void | ||
230 | free_rsp_buf(int resp_buftype, void *rsp) | ||
231 | { | ||
232 | if (resp_buftype == CIFS_SMALL_BUFFER) | ||
233 | cifs_small_buf_release(rsp); | ||
234 | else if (resp_buftype == CIFS_LARGE_BUFFER) | ||
235 | cifs_buf_release(rsp); | ||
236 | } | ||
237 | |||
229 | /* NB: MID can not be set if treeCon not passed in, in that | 238 | /* NB: MID can not be set if treeCon not passed in, in that |
230 | case it is responsbility of caller to set the mid */ | 239 | case it is responsbility of caller to set the mid */ |
231 | void | 240 | void |
@@ -414,7 +423,7 @@ is_valid_oplock_break(char *buffer, struct TCP_Server_Info *srv) | |||
414 | return true; | 423 | return true; |
415 | } | 424 | } |
416 | if (pSMBr->hdr.Status.CifsError) { | 425 | if (pSMBr->hdr.Status.CifsError) { |
417 | cifs_dbg(FYI, "notify err 0x%d\n", | 426 | cifs_dbg(FYI, "notify err 0x%x\n", |
418 | pSMBr->hdr.Status.CifsError); | 427 | pSMBr->hdr.Status.CifsError); |
419 | return true; | 428 | return true; |
420 | } | 429 | } |
@@ -441,7 +450,7 @@ is_valid_oplock_break(char *buffer, struct TCP_Server_Info *srv) | |||
441 | if (pSMB->hdr.WordCount != 8) | 450 | if (pSMB->hdr.WordCount != 8) |
442 | return false; | 451 | return false; |
443 | 452 | ||
444 | cifs_dbg(FYI, "oplock type 0x%d level 0x%d\n", | 453 | cifs_dbg(FYI, "oplock type 0x%x level 0x%x\n", |
445 | pSMB->LockType, pSMB->OplockLevel); | 454 | pSMB->LockType, pSMB->OplockLevel); |
446 | if (!(pSMB->LockType & LOCKING_ANDX_OPLOCK_RELEASE)) | 455 | if (!(pSMB->LockType & LOCKING_ANDX_OPLOCK_RELEASE)) |
447 | return false; | 456 | return false; |
diff --git a/fs/cifs/sess.c b/fs/cifs/sess.c index e87387dbf39f..39ee32688eac 100644 --- a/fs/cifs/sess.c +++ b/fs/cifs/sess.c | |||
@@ -520,382 +520,559 @@ select_sectype(struct TCP_Server_Info *server, enum securityEnum requested) | |||
520 | } | 520 | } |
521 | } | 521 | } |
522 | 522 | ||
523 | int | 523 | struct sess_data { |
524 | CIFS_SessSetup(const unsigned int xid, struct cifs_ses *ses, | 524 | unsigned int xid; |
525 | const struct nls_table *nls_cp) | 525 | struct cifs_ses *ses; |
526 | struct nls_table *nls_cp; | ||
527 | void (*func)(struct sess_data *); | ||
528 | int result; | ||
529 | |||
530 | /* we will send the SMB in three pieces: | ||
531 | * a fixed length beginning part, an optional | ||
532 | * SPNEGO blob (which can be zero length), and a | ||
533 | * last part which will include the strings | ||
534 | * and rest of bcc area. This allows us to avoid | ||
535 | * a large buffer 17K allocation | ||
536 | */ | ||
537 | int buf0_type; | ||
538 | struct kvec iov[3]; | ||
539 | }; | ||
540 | |||
541 | static int | ||
542 | sess_alloc_buffer(struct sess_data *sess_data, int wct) | ||
526 | { | 543 | { |
527 | int rc = 0; | 544 | int rc; |
528 | int wct; | 545 | struct cifs_ses *ses = sess_data->ses; |
529 | struct smb_hdr *smb_buf; | 546 | struct smb_hdr *smb_buf; |
530 | char *bcc_ptr; | ||
531 | char *str_area; | ||
532 | SESSION_SETUP_ANDX *pSMB; | ||
533 | __u32 capabilities; | ||
534 | __u16 count; | ||
535 | int resp_buf_type; | ||
536 | struct kvec iov[3]; | ||
537 | enum securityEnum type; | ||
538 | __u16 action, bytes_remaining; | ||
539 | struct key *spnego_key = NULL; | ||
540 | __le32 phase = NtLmNegotiate; /* NTLMSSP, if needed, is multistage */ | ||
541 | u16 blob_len; | ||
542 | char *ntlmsspblob = NULL; | ||
543 | 547 | ||
544 | if (ses == NULL) { | 548 | rc = small_smb_init_no_tc(SMB_COM_SESSION_SETUP_ANDX, wct, ses, |
545 | WARN(1, "%s: ses == NULL!", __func__); | 549 | (void **)&smb_buf); |
546 | return -EINVAL; | ||
547 | } | ||
548 | 550 | ||
549 | type = select_sectype(ses->server, ses->sectype); | 551 | if (rc) |
550 | cifs_dbg(FYI, "sess setup type %d\n", type); | 552 | return rc; |
551 | if (type == Unspecified) { | 553 | |
552 | cifs_dbg(VFS, | 554 | sess_data->iov[0].iov_base = (char *)smb_buf; |
553 | "Unable to select appropriate authentication method!"); | 555 | sess_data->iov[0].iov_len = be32_to_cpu(smb_buf->smb_buf_length) + 4; |
554 | return -EINVAL; | 556 | /* |
557 | * This variable will be used to clear the buffer | ||
558 | * allocated above in case of any error in the calling function. | ||
559 | */ | ||
560 | sess_data->buf0_type = CIFS_SMALL_BUFFER; | ||
561 | |||
562 | /* 2000 big enough to fit max user, domain, NOS name etc. */ | ||
563 | sess_data->iov[2].iov_base = kmalloc(2000, GFP_KERNEL); | ||
564 | if (!sess_data->iov[2].iov_base) { | ||
565 | rc = -ENOMEM; | ||
566 | goto out_free_smb_buf; | ||
555 | } | 567 | } |
556 | 568 | ||
557 | if (type == RawNTLMSSP) { | 569 | return 0; |
558 | /* if memory allocation is successful, caller of this function | 570 | |
559 | * frees it. | 571 | out_free_smb_buf: |
560 | */ | 572 | kfree(smb_buf); |
561 | ses->ntlmssp = kmalloc(sizeof(struct ntlmssp_auth), GFP_KERNEL); | 573 | sess_data->iov[0].iov_base = NULL; |
562 | if (!ses->ntlmssp) | 574 | sess_data->iov[0].iov_len = 0; |
563 | return -ENOMEM; | 575 | sess_data->buf0_type = CIFS_NO_BUFFER; |
564 | ses->ntlmssp->sesskey_per_smbsess = false; | 576 | return rc; |
577 | } | ||
578 | |||
579 | static void | ||
580 | sess_free_buffer(struct sess_data *sess_data) | ||
581 | { | ||
565 | 582 | ||
583 | free_rsp_buf(sess_data->buf0_type, sess_data->iov[0].iov_base); | ||
584 | sess_data->buf0_type = CIFS_NO_BUFFER; | ||
585 | kfree(sess_data->iov[2].iov_base); | ||
586 | } | ||
587 | |||
588 | static int | ||
589 | sess_establish_session(struct sess_data *sess_data) | ||
590 | { | ||
591 | struct cifs_ses *ses = sess_data->ses; | ||
592 | |||
593 | mutex_lock(&ses->server->srv_mutex); | ||
594 | if (!ses->server->session_estab) { | ||
595 | if (ses->server->sign) { | ||
596 | ses->server->session_key.response = | ||
597 | kmemdup(ses->auth_key.response, | ||
598 | ses->auth_key.len, GFP_KERNEL); | ||
599 | if (!ses->server->session_key.response) { | ||
600 | mutex_unlock(&ses->server->srv_mutex); | ||
601 | return -ENOMEM; | ||
602 | } | ||
603 | ses->server->session_key.len = | ||
604 | ses->auth_key.len; | ||
605 | } | ||
606 | ses->server->sequence_number = 0x2; | ||
607 | ses->server->session_estab = true; | ||
566 | } | 608 | } |
609 | mutex_unlock(&ses->server->srv_mutex); | ||
567 | 610 | ||
568 | ssetup_ntlmssp_authenticate: | 611 | cifs_dbg(FYI, "CIFS session established successfully\n"); |
569 | if (phase == NtLmChallenge) | 612 | spin_lock(&GlobalMid_Lock); |
570 | phase = NtLmAuthenticate; /* if ntlmssp, now final phase */ | 613 | ses->status = CifsGood; |
614 | ses->need_reconnect = false; | ||
615 | spin_unlock(&GlobalMid_Lock); | ||
571 | 616 | ||
572 | if (type == LANMAN) { | 617 | return 0; |
573 | #ifndef CONFIG_CIFS_WEAK_PW_HASH | 618 | } |
574 | /* LANMAN and plaintext are less secure and off by default. | ||
575 | So we make this explicitly be turned on in kconfig (in the | ||
576 | build) and turned on at runtime (changed from the default) | ||
577 | in proc/fs/cifs or via mount parm. Unfortunately this is | ||
578 | needed for old Win (e.g. Win95), some obscure NAS and OS/2 */ | ||
579 | return -EOPNOTSUPP; | ||
580 | #endif | ||
581 | wct = 10; /* lanman 2 style sessionsetup */ | ||
582 | } else if ((type == NTLM) || (type == NTLMv2)) { | ||
583 | /* For NTLMv2 failures eventually may need to retry NTLM */ | ||
584 | wct = 13; /* old style NTLM sessionsetup */ | ||
585 | } else /* same size: negotiate or auth, NTLMSSP or extended security */ | ||
586 | wct = 12; | ||
587 | 619 | ||
588 | rc = small_smb_init_no_tc(SMB_COM_SESSION_SETUP_ANDX, wct, ses, | 620 | static int |
589 | (void **)&smb_buf); | 621 | sess_sendreceive(struct sess_data *sess_data) |
590 | if (rc) | 622 | { |
591 | return rc; | 623 | int rc; |
624 | struct smb_hdr *smb_buf = (struct smb_hdr *) sess_data->iov[0].iov_base; | ||
625 | __u16 count; | ||
592 | 626 | ||
593 | pSMB = (SESSION_SETUP_ANDX *)smb_buf; | 627 | count = sess_data->iov[1].iov_len + sess_data->iov[2].iov_len; |
628 | smb_buf->smb_buf_length = | ||
629 | cpu_to_be32(be32_to_cpu(smb_buf->smb_buf_length) + count); | ||
630 | put_bcc(count, smb_buf); | ||
631 | |||
632 | rc = SendReceive2(sess_data->xid, sess_data->ses, | ||
633 | sess_data->iov, 3 /* num_iovecs */, | ||
634 | &sess_data->buf0_type, | ||
635 | CIFS_LOG_ERROR); | ||
636 | |||
637 | return rc; | ||
638 | } | ||
594 | 639 | ||
640 | /* | ||
641 | * LANMAN and plaintext are less secure and off by default. | ||
642 | * So we make this explicitly be turned on in kconfig (in the | ||
643 | * build) and turned on at runtime (changed from the default) | ||
644 | * in proc/fs/cifs or via mount parm. Unfortunately this is | ||
645 | * needed for old Win (e.g. Win95), some obscure NAS and OS/2 | ||
646 | */ | ||
647 | #ifdef CONFIG_CIFS_WEAK_PW_HASH | ||
648 | static void | ||
649 | sess_auth_lanman(struct sess_data *sess_data) | ||
650 | { | ||
651 | int rc = 0; | ||
652 | struct smb_hdr *smb_buf; | ||
653 | SESSION_SETUP_ANDX *pSMB; | ||
654 | char *bcc_ptr; | ||
655 | struct cifs_ses *ses = sess_data->ses; | ||
656 | char lnm_session_key[CIFS_AUTH_RESP_SIZE]; | ||
657 | __u32 capabilities; | ||
658 | __u16 bytes_remaining; | ||
659 | |||
660 | /* lanman 2 style sessionsetup */ | ||
661 | /* wct = 10 */ | ||
662 | rc = sess_alloc_buffer(sess_data, 10); | ||
663 | if (rc) | ||
664 | goto out; | ||
665 | |||
666 | pSMB = (SESSION_SETUP_ANDX *)sess_data->iov[0].iov_base; | ||
667 | bcc_ptr = sess_data->iov[2].iov_base; | ||
595 | capabilities = cifs_ssetup_hdr(ses, pSMB); | 668 | capabilities = cifs_ssetup_hdr(ses, pSMB); |
596 | 669 | ||
597 | /* we will send the SMB in three pieces: | 670 | pSMB->req.hdr.Flags2 &= ~SMBFLG2_UNICODE; |
598 | a fixed length beginning part, an optional | ||
599 | SPNEGO blob (which can be zero length), and a | ||
600 | last part which will include the strings | ||
601 | and rest of bcc area. This allows us to avoid | ||
602 | a large buffer 17K allocation */ | ||
603 | iov[0].iov_base = (char *)pSMB; | ||
604 | iov[0].iov_len = be32_to_cpu(smb_buf->smb_buf_length) + 4; | ||
605 | |||
606 | /* setting this here allows the code at the end of the function | ||
607 | to free the request buffer if there's an error */ | ||
608 | resp_buf_type = CIFS_SMALL_BUFFER; | ||
609 | 671 | ||
610 | /* 2000 big enough to fit max user, domain, NOS name etc. */ | 672 | /* no capabilities flags in old lanman negotiation */ |
611 | str_area = kmalloc(2000, GFP_KERNEL); | 673 | pSMB->old_req.PasswordLength = cpu_to_le16(CIFS_AUTH_RESP_SIZE); |
612 | if (str_area == NULL) { | ||
613 | rc = -ENOMEM; | ||
614 | goto ssetup_exit; | ||
615 | } | ||
616 | bcc_ptr = str_area; | ||
617 | 674 | ||
618 | iov[1].iov_base = NULL; | 675 | /* Calculate hash with password and copy into bcc_ptr. |
619 | iov[1].iov_len = 0; | 676 | * Encryption Key (stored as in cryptkey) gets used if the |
677 | * security mode bit in Negottiate Protocol response states | ||
678 | * to use challenge/response method (i.e. Password bit is 1). | ||
679 | */ | ||
680 | rc = calc_lanman_hash(ses->password, ses->server->cryptkey, | ||
681 | ses->server->sec_mode & SECMODE_PW_ENCRYPT ? | ||
682 | true : false, lnm_session_key); | ||
620 | 683 | ||
621 | if (type == LANMAN) { | 684 | memcpy(bcc_ptr, (char *)lnm_session_key, CIFS_AUTH_RESP_SIZE); |
622 | #ifdef CONFIG_CIFS_WEAK_PW_HASH | 685 | bcc_ptr += CIFS_AUTH_RESP_SIZE; |
623 | char lnm_session_key[CIFS_AUTH_RESP_SIZE]; | 686 | |
687 | /* | ||
688 | * can not sign if LANMAN negotiated so no need | ||
689 | * to calculate signing key? but what if server | ||
690 | * changed to do higher than lanman dialect and | ||
691 | * we reconnected would we ever calc signing_key? | ||
692 | */ | ||
624 | 693 | ||
625 | pSMB->req.hdr.Flags2 &= ~SMBFLG2_UNICODE; | 694 | cifs_dbg(FYI, "Negotiating LANMAN setting up strings\n"); |
695 | /* Unicode not allowed for LANMAN dialects */ | ||
696 | ascii_ssetup_strings(&bcc_ptr, ses, sess_data->nls_cp); | ||
626 | 697 | ||
627 | /* no capabilities flags in old lanman negotiation */ | 698 | sess_data->iov[2].iov_len = (long) bcc_ptr - |
699 | (long) sess_data->iov[2].iov_base; | ||
628 | 700 | ||
629 | pSMB->old_req.PasswordLength = cpu_to_le16(CIFS_AUTH_RESP_SIZE); | 701 | rc = sess_sendreceive(sess_data); |
702 | if (rc) | ||
703 | goto out; | ||
630 | 704 | ||
631 | /* Calculate hash with password and copy into bcc_ptr. | 705 | pSMB = (SESSION_SETUP_ANDX *)sess_data->iov[0].iov_base; |
632 | * Encryption Key (stored as in cryptkey) gets used if the | 706 | smb_buf = (struct smb_hdr *)sess_data->iov[0].iov_base; |
633 | * security mode bit in Negottiate Protocol response states | ||
634 | * to use challenge/response method (i.e. Password bit is 1). | ||
635 | */ | ||
636 | 707 | ||
637 | rc = calc_lanman_hash(ses->password, ses->server->cryptkey, | 708 | /* lanman response has a word count of 3 */ |
638 | ses->server->sec_mode & SECMODE_PW_ENCRYPT ? | 709 | if (smb_buf->WordCount != 3) { |
639 | true : false, lnm_session_key); | 710 | rc = -EIO; |
711 | cifs_dbg(VFS, "bad word count %d\n", smb_buf->WordCount); | ||
712 | goto out; | ||
713 | } | ||
640 | 714 | ||
641 | memcpy(bcc_ptr, (char *)lnm_session_key, CIFS_AUTH_RESP_SIZE); | 715 | if (le16_to_cpu(pSMB->resp.Action) & GUEST_LOGIN) |
642 | bcc_ptr += CIFS_AUTH_RESP_SIZE; | 716 | cifs_dbg(FYI, "Guest login\n"); /* BB mark SesInfo struct? */ |
717 | |||
718 | ses->Suid = smb_buf->Uid; /* UID left in wire format (le) */ | ||
719 | cifs_dbg(FYI, "UID = %llu\n", ses->Suid); | ||
643 | 720 | ||
644 | /* can not sign if LANMAN negotiated so no need | 721 | bytes_remaining = get_bcc(smb_buf); |
645 | to calculate signing key? but what if server | 722 | bcc_ptr = pByteArea(smb_buf); |
646 | changed to do higher than lanman dialect and | ||
647 | we reconnected would we ever calc signing_key? */ | ||
648 | 723 | ||
649 | cifs_dbg(FYI, "Negotiating LANMAN setting up strings\n"); | 724 | /* BB check if Unicode and decode strings */ |
650 | /* Unicode not allowed for LANMAN dialects */ | 725 | if (bytes_remaining == 0) { |
651 | ascii_ssetup_strings(&bcc_ptr, ses, nls_cp); | 726 | /* no string area to decode, do nothing */ |
727 | } else if (smb_buf->Flags2 & SMBFLG2_UNICODE) { | ||
728 | /* unicode string area must be word-aligned */ | ||
729 | if (((unsigned long) bcc_ptr - (unsigned long) smb_buf) % 2) { | ||
730 | ++bcc_ptr; | ||
731 | --bytes_remaining; | ||
732 | } | ||
733 | decode_unicode_ssetup(&bcc_ptr, bytes_remaining, ses, | ||
734 | sess_data->nls_cp); | ||
735 | } else { | ||
736 | decode_ascii_ssetup(&bcc_ptr, bytes_remaining, ses, | ||
737 | sess_data->nls_cp); | ||
738 | } | ||
739 | |||
740 | rc = sess_establish_session(sess_data); | ||
741 | out: | ||
742 | sess_data->result = rc; | ||
743 | sess_data->func = NULL; | ||
744 | sess_free_buffer(sess_data); | ||
745 | } | ||
746 | |||
747 | #else | ||
748 | |||
749 | static void | ||
750 | sess_auth_lanman(struct sess_data *sess_data) | ||
751 | { | ||
752 | sess_data->result = -EOPNOTSUPP; | ||
753 | sess_data->func = NULL; | ||
754 | } | ||
652 | #endif | 755 | #endif |
653 | } else if (type == NTLM) { | 756 | |
654 | pSMB->req_no_secext.Capabilities = cpu_to_le32(capabilities); | 757 | static void |
655 | pSMB->req_no_secext.CaseInsensitivePasswordLength = | 758 | sess_auth_ntlm(struct sess_data *sess_data) |
759 | { | ||
760 | int rc = 0; | ||
761 | struct smb_hdr *smb_buf; | ||
762 | SESSION_SETUP_ANDX *pSMB; | ||
763 | char *bcc_ptr; | ||
764 | struct cifs_ses *ses = sess_data->ses; | ||
765 | __u32 capabilities; | ||
766 | __u16 bytes_remaining; | ||
767 | |||
768 | /* old style NTLM sessionsetup */ | ||
769 | /* wct = 13 */ | ||
770 | rc = sess_alloc_buffer(sess_data, 13); | ||
771 | if (rc) | ||
772 | goto out; | ||
773 | |||
774 | pSMB = (SESSION_SETUP_ANDX *)sess_data->iov[0].iov_base; | ||
775 | bcc_ptr = sess_data->iov[2].iov_base; | ||
776 | capabilities = cifs_ssetup_hdr(ses, pSMB); | ||
777 | |||
778 | pSMB->req_no_secext.Capabilities = cpu_to_le32(capabilities); | ||
779 | pSMB->req_no_secext.CaseInsensitivePasswordLength = | ||
656 | cpu_to_le16(CIFS_AUTH_RESP_SIZE); | 780 | cpu_to_le16(CIFS_AUTH_RESP_SIZE); |
657 | pSMB->req_no_secext.CaseSensitivePasswordLength = | 781 | pSMB->req_no_secext.CaseSensitivePasswordLength = |
658 | cpu_to_le16(CIFS_AUTH_RESP_SIZE); | 782 | cpu_to_le16(CIFS_AUTH_RESP_SIZE); |
659 | 783 | ||
660 | /* calculate ntlm response and session key */ | 784 | /* calculate ntlm response and session key */ |
661 | rc = setup_ntlm_response(ses, nls_cp); | 785 | rc = setup_ntlm_response(ses, sess_data->nls_cp); |
662 | if (rc) { | 786 | if (rc) { |
663 | cifs_dbg(VFS, "Error %d during NTLM authentication\n", | 787 | cifs_dbg(VFS, "Error %d during NTLM authentication\n", |
664 | rc); | 788 | rc); |
665 | goto ssetup_exit; | 789 | goto out; |
666 | } | 790 | } |
667 | 791 | ||
668 | /* copy ntlm response */ | 792 | /* copy ntlm response */ |
669 | memcpy(bcc_ptr, ses->auth_key.response + CIFS_SESS_KEY_SIZE, | 793 | memcpy(bcc_ptr, ses->auth_key.response + CIFS_SESS_KEY_SIZE, |
670 | CIFS_AUTH_RESP_SIZE); | 794 | CIFS_AUTH_RESP_SIZE); |
671 | bcc_ptr += CIFS_AUTH_RESP_SIZE; | 795 | bcc_ptr += CIFS_AUTH_RESP_SIZE; |
672 | memcpy(bcc_ptr, ses->auth_key.response + CIFS_SESS_KEY_SIZE, | 796 | memcpy(bcc_ptr, ses->auth_key.response + CIFS_SESS_KEY_SIZE, |
673 | CIFS_AUTH_RESP_SIZE); | 797 | CIFS_AUTH_RESP_SIZE); |
674 | bcc_ptr += CIFS_AUTH_RESP_SIZE; | 798 | bcc_ptr += CIFS_AUTH_RESP_SIZE; |
675 | 799 | ||
676 | if (ses->capabilities & CAP_UNICODE) { | 800 | if (ses->capabilities & CAP_UNICODE) { |
677 | /* unicode strings must be word aligned */ | 801 | /* unicode strings must be word aligned */ |
678 | if (iov[0].iov_len % 2) { | 802 | if (sess_data->iov[0].iov_len % 2) { |
679 | *bcc_ptr = 0; | 803 | *bcc_ptr = 0; |
680 | bcc_ptr++; | 804 | bcc_ptr++; |
681 | } | ||
682 | unicode_ssetup_strings(&bcc_ptr, ses, nls_cp); | ||
683 | } else | ||
684 | ascii_ssetup_strings(&bcc_ptr, ses, nls_cp); | ||
685 | } else if (type == NTLMv2) { | ||
686 | pSMB->req_no_secext.Capabilities = cpu_to_le32(capabilities); | ||
687 | |||
688 | /* LM2 password would be here if we supported it */ | ||
689 | pSMB->req_no_secext.CaseInsensitivePasswordLength = 0; | ||
690 | |||
691 | /* calculate nlmv2 response and session key */ | ||
692 | rc = setup_ntlmv2_rsp(ses, nls_cp); | ||
693 | if (rc) { | ||
694 | cifs_dbg(VFS, "Error %d during NTLMv2 authentication\n", | ||
695 | rc); | ||
696 | goto ssetup_exit; | ||
697 | } | 805 | } |
698 | memcpy(bcc_ptr, ses->auth_key.response + CIFS_SESS_KEY_SIZE, | 806 | unicode_ssetup_strings(&bcc_ptr, ses, sess_data->nls_cp); |
699 | ses->auth_key.len - CIFS_SESS_KEY_SIZE); | 807 | } else { |
700 | bcc_ptr += ses->auth_key.len - CIFS_SESS_KEY_SIZE; | 808 | ascii_ssetup_strings(&bcc_ptr, ses, sess_data->nls_cp); |
701 | 809 | } | |
702 | /* set case sensitive password length after tilen may get | ||
703 | * assigned, tilen is 0 otherwise. | ||
704 | */ | ||
705 | pSMB->req_no_secext.CaseSensitivePasswordLength = | ||
706 | cpu_to_le16(ses->auth_key.len - CIFS_SESS_KEY_SIZE); | ||
707 | 810 | ||
708 | if (ses->capabilities & CAP_UNICODE) { | ||
709 | if (iov[0].iov_len % 2) { | ||
710 | *bcc_ptr = 0; | ||
711 | bcc_ptr++; | ||
712 | } | ||
713 | unicode_ssetup_strings(&bcc_ptr, ses, nls_cp); | ||
714 | } else | ||
715 | ascii_ssetup_strings(&bcc_ptr, ses, nls_cp); | ||
716 | } else if (type == Kerberos) { | ||
717 | #ifdef CONFIG_CIFS_UPCALL | ||
718 | struct cifs_spnego_msg *msg; | ||
719 | 811 | ||
720 | spnego_key = cifs_get_spnego_key(ses); | 812 | sess_data->iov[2].iov_len = (long) bcc_ptr - |
721 | if (IS_ERR(spnego_key)) { | 813 | (long) sess_data->iov[2].iov_base; |
722 | rc = PTR_ERR(spnego_key); | ||
723 | spnego_key = NULL; | ||
724 | goto ssetup_exit; | ||
725 | } | ||
726 | 814 | ||
727 | msg = spnego_key->payload.data; | 815 | rc = sess_sendreceive(sess_data); |
728 | /* check version field to make sure that cifs.upcall is | 816 | if (rc) |
729 | sending us a response in an expected form */ | 817 | goto out; |
730 | if (msg->version != CIFS_SPNEGO_UPCALL_VERSION) { | ||
731 | cifs_dbg(VFS, "incorrect version of cifs.upcall " | ||
732 | "expected %d but got %d)", | ||
733 | CIFS_SPNEGO_UPCALL_VERSION, msg->version); | ||
734 | rc = -EKEYREJECTED; | ||
735 | goto ssetup_exit; | ||
736 | } | ||
737 | 818 | ||
738 | ses->auth_key.response = kmemdup(msg->data, msg->sesskey_len, | 819 | pSMB = (SESSION_SETUP_ANDX *)sess_data->iov[0].iov_base; |
739 | GFP_KERNEL); | 820 | smb_buf = (struct smb_hdr *)sess_data->iov[0].iov_base; |
740 | if (!ses->auth_key.response) { | ||
741 | cifs_dbg(VFS, | ||
742 | "Kerberos can't allocate (%u bytes) memory", | ||
743 | msg->sesskey_len); | ||
744 | rc = -ENOMEM; | ||
745 | goto ssetup_exit; | ||
746 | } | ||
747 | ses->auth_key.len = msg->sesskey_len; | ||
748 | |||
749 | pSMB->req.hdr.Flags2 |= SMBFLG2_EXT_SEC; | ||
750 | capabilities |= CAP_EXTENDED_SECURITY; | ||
751 | pSMB->req.Capabilities = cpu_to_le32(capabilities); | ||
752 | iov[1].iov_base = msg->data + msg->sesskey_len; | ||
753 | iov[1].iov_len = msg->secblob_len; | ||
754 | pSMB->req.SecurityBlobLength = cpu_to_le16(iov[1].iov_len); | ||
755 | |||
756 | if (ses->capabilities & CAP_UNICODE) { | ||
757 | /* unicode strings must be word aligned */ | ||
758 | if ((iov[0].iov_len + iov[1].iov_len) % 2) { | ||
759 | *bcc_ptr = 0; | ||
760 | bcc_ptr++; | ||
761 | } | ||
762 | unicode_oslm_strings(&bcc_ptr, nls_cp); | ||
763 | unicode_domain_string(&bcc_ptr, ses, nls_cp); | ||
764 | } else | ||
765 | /* BB: is this right? */ | ||
766 | ascii_ssetup_strings(&bcc_ptr, ses, nls_cp); | ||
767 | #else /* ! CONFIG_CIFS_UPCALL */ | ||
768 | cifs_dbg(VFS, "Kerberos negotiated but upcall support disabled!\n"); | ||
769 | rc = -ENOSYS; | ||
770 | goto ssetup_exit; | ||
771 | #endif /* CONFIG_CIFS_UPCALL */ | ||
772 | } else if (type == RawNTLMSSP) { | ||
773 | if ((pSMB->req.hdr.Flags2 & SMBFLG2_UNICODE) == 0) { | ||
774 | cifs_dbg(VFS, "NTLMSSP requires Unicode support\n"); | ||
775 | rc = -ENOSYS; | ||
776 | goto ssetup_exit; | ||
777 | } | ||
778 | 821 | ||
779 | cifs_dbg(FYI, "ntlmssp session setup phase %d\n", phase); | 822 | if (smb_buf->WordCount != 3) { |
780 | pSMB->req.hdr.Flags2 |= SMBFLG2_EXT_SEC; | 823 | rc = -EIO; |
781 | capabilities |= CAP_EXTENDED_SECURITY; | 824 | cifs_dbg(VFS, "bad word count %d\n", smb_buf->WordCount); |
782 | pSMB->req.Capabilities |= cpu_to_le32(capabilities); | 825 | goto out; |
783 | switch(phase) { | 826 | } |
784 | case NtLmNegotiate: | ||
785 | build_ntlmssp_negotiate_blob( | ||
786 | pSMB->req.SecurityBlob, ses); | ||
787 | iov[1].iov_len = sizeof(NEGOTIATE_MESSAGE); | ||
788 | iov[1].iov_base = pSMB->req.SecurityBlob; | ||
789 | pSMB->req.SecurityBlobLength = | ||
790 | cpu_to_le16(sizeof(NEGOTIATE_MESSAGE)); | ||
791 | break; | ||
792 | case NtLmAuthenticate: | ||
793 | /* | ||
794 | * 5 is an empirical value, large enough to hold | ||
795 | * authenticate message plus max 10 of av paris, | ||
796 | * domain, user, workstation names, flags, etc. | ||
797 | */ | ||
798 | ntlmsspblob = kzalloc( | ||
799 | 5*sizeof(struct _AUTHENTICATE_MESSAGE), | ||
800 | GFP_KERNEL); | ||
801 | if (!ntlmsspblob) { | ||
802 | rc = -ENOMEM; | ||
803 | goto ssetup_exit; | ||
804 | } | ||
805 | 827 | ||
806 | rc = build_ntlmssp_auth_blob(ntlmsspblob, | 828 | if (le16_to_cpu(pSMB->resp.Action) & GUEST_LOGIN) |
807 | &blob_len, ses, nls_cp); | 829 | cifs_dbg(FYI, "Guest login\n"); /* BB mark SesInfo struct? */ |
808 | if (rc) | 830 | |
809 | goto ssetup_exit; | 831 | ses->Suid = smb_buf->Uid; /* UID left in wire format (le) */ |
810 | iov[1].iov_len = blob_len; | 832 | cifs_dbg(FYI, "UID = %llu\n", ses->Suid); |
811 | iov[1].iov_base = ntlmsspblob; | 833 | |
812 | pSMB->req.SecurityBlobLength = cpu_to_le16(blob_len); | 834 | bytes_remaining = get_bcc(smb_buf); |
813 | /* | 835 | bcc_ptr = pByteArea(smb_buf); |
814 | * Make sure that we tell the server that we are using | 836 | |
815 | * the uid that it just gave us back on the response | 837 | /* BB check if Unicode and decode strings */ |
816 | * (challenge) | 838 | if (bytes_remaining == 0) { |
817 | */ | 839 | /* no string area to decode, do nothing */ |
818 | smb_buf->Uid = ses->Suid; | 840 | } else if (smb_buf->Flags2 & SMBFLG2_UNICODE) { |
819 | break; | 841 | /* unicode string area must be word-aligned */ |
820 | default: | 842 | if (((unsigned long) bcc_ptr - (unsigned long) smb_buf) % 2) { |
821 | cifs_dbg(VFS, "invalid phase %d\n", phase); | 843 | ++bcc_ptr; |
822 | rc = -ENOSYS; | 844 | --bytes_remaining; |
823 | goto ssetup_exit; | ||
824 | } | 845 | } |
825 | /* unicode strings must be word aligned */ | 846 | decode_unicode_ssetup(&bcc_ptr, bytes_remaining, ses, |
826 | if ((iov[0].iov_len + iov[1].iov_len) % 2) { | 847 | sess_data->nls_cp); |
848 | } else { | ||
849 | decode_ascii_ssetup(&bcc_ptr, bytes_remaining, ses, | ||
850 | sess_data->nls_cp); | ||
851 | } | ||
852 | |||
853 | rc = sess_establish_session(sess_data); | ||
854 | out: | ||
855 | sess_data->result = rc; | ||
856 | sess_data->func = NULL; | ||
857 | sess_free_buffer(sess_data); | ||
858 | kfree(ses->auth_key.response); | ||
859 | ses->auth_key.response = NULL; | ||
860 | } | ||
861 | |||
862 | static void | ||
863 | sess_auth_ntlmv2(struct sess_data *sess_data) | ||
864 | { | ||
865 | int rc = 0; | ||
866 | struct smb_hdr *smb_buf; | ||
867 | SESSION_SETUP_ANDX *pSMB; | ||
868 | char *bcc_ptr; | ||
869 | struct cifs_ses *ses = sess_data->ses; | ||
870 | __u32 capabilities; | ||
871 | __u16 bytes_remaining; | ||
872 | |||
873 | /* old style NTLM sessionsetup */ | ||
874 | /* wct = 13 */ | ||
875 | rc = sess_alloc_buffer(sess_data, 13); | ||
876 | if (rc) | ||
877 | goto out; | ||
878 | |||
879 | pSMB = (SESSION_SETUP_ANDX *)sess_data->iov[0].iov_base; | ||
880 | bcc_ptr = sess_data->iov[2].iov_base; | ||
881 | capabilities = cifs_ssetup_hdr(ses, pSMB); | ||
882 | |||
883 | pSMB->req_no_secext.Capabilities = cpu_to_le32(capabilities); | ||
884 | |||
885 | /* LM2 password would be here if we supported it */ | ||
886 | pSMB->req_no_secext.CaseInsensitivePasswordLength = 0; | ||
887 | |||
888 | /* calculate nlmv2 response and session key */ | ||
889 | rc = setup_ntlmv2_rsp(ses, sess_data->nls_cp); | ||
890 | if (rc) { | ||
891 | cifs_dbg(VFS, "Error %d during NTLMv2 authentication\n", rc); | ||
892 | goto out; | ||
893 | } | ||
894 | |||
895 | memcpy(bcc_ptr, ses->auth_key.response + CIFS_SESS_KEY_SIZE, | ||
896 | ses->auth_key.len - CIFS_SESS_KEY_SIZE); | ||
897 | bcc_ptr += ses->auth_key.len - CIFS_SESS_KEY_SIZE; | ||
898 | |||
899 | /* set case sensitive password length after tilen may get | ||
900 | * assigned, tilen is 0 otherwise. | ||
901 | */ | ||
902 | pSMB->req_no_secext.CaseSensitivePasswordLength = | ||
903 | cpu_to_le16(ses->auth_key.len - CIFS_SESS_KEY_SIZE); | ||
904 | |||
905 | if (ses->capabilities & CAP_UNICODE) { | ||
906 | if (sess_data->iov[0].iov_len % 2) { | ||
827 | *bcc_ptr = 0; | 907 | *bcc_ptr = 0; |
828 | bcc_ptr++; | 908 | bcc_ptr++; |
829 | } | 909 | } |
830 | unicode_oslm_strings(&bcc_ptr, nls_cp); | 910 | unicode_ssetup_strings(&bcc_ptr, ses, sess_data->nls_cp); |
831 | } else { | 911 | } else { |
832 | cifs_dbg(VFS, "secType %d not supported!\n", type); | 912 | ascii_ssetup_strings(&bcc_ptr, ses, sess_data->nls_cp); |
833 | rc = -ENOSYS; | ||
834 | goto ssetup_exit; | ||
835 | } | 913 | } |
836 | 914 | ||
837 | iov[2].iov_base = str_area; | ||
838 | iov[2].iov_len = (long) bcc_ptr - (long) str_area; | ||
839 | 915 | ||
840 | count = iov[1].iov_len + iov[2].iov_len; | 916 | sess_data->iov[2].iov_len = (long) bcc_ptr - |
841 | smb_buf->smb_buf_length = | 917 | (long) sess_data->iov[2].iov_base; |
842 | cpu_to_be32(be32_to_cpu(smb_buf->smb_buf_length) + count); | ||
843 | 918 | ||
844 | put_bcc(count, smb_buf); | 919 | rc = sess_sendreceive(sess_data); |
920 | if (rc) | ||
921 | goto out; | ||
845 | 922 | ||
846 | rc = SendReceive2(xid, ses, iov, 3 /* num_iovecs */, &resp_buf_type, | 923 | pSMB = (SESSION_SETUP_ANDX *)sess_data->iov[0].iov_base; |
847 | CIFS_LOG_ERROR); | 924 | smb_buf = (struct smb_hdr *)sess_data->iov[0].iov_base; |
848 | /* SMB request buf freed in SendReceive2 */ | 925 | |
926 | if (smb_buf->WordCount != 3) { | ||
927 | rc = -EIO; | ||
928 | cifs_dbg(VFS, "bad word count %d\n", smb_buf->WordCount); | ||
929 | goto out; | ||
930 | } | ||
931 | |||
932 | if (le16_to_cpu(pSMB->resp.Action) & GUEST_LOGIN) | ||
933 | cifs_dbg(FYI, "Guest login\n"); /* BB mark SesInfo struct? */ | ||
934 | |||
935 | ses->Suid = smb_buf->Uid; /* UID left in wire format (le) */ | ||
936 | cifs_dbg(FYI, "UID = %llu\n", ses->Suid); | ||
849 | 937 | ||
850 | pSMB = (SESSION_SETUP_ANDX *)iov[0].iov_base; | 938 | bytes_remaining = get_bcc(smb_buf); |
851 | smb_buf = (struct smb_hdr *)iov[0].iov_base; | 939 | bcc_ptr = pByteArea(smb_buf); |
852 | 940 | ||
853 | if ((type == RawNTLMSSP) && (resp_buf_type != CIFS_NO_BUFFER) && | 941 | /* BB check if Unicode and decode strings */ |
854 | (smb_buf->Status.CifsError == | 942 | if (bytes_remaining == 0) { |
855 | cpu_to_le32(NT_STATUS_MORE_PROCESSING_REQUIRED))) { | 943 | /* no string area to decode, do nothing */ |
856 | if (phase != NtLmNegotiate) { | 944 | } else if (smb_buf->Flags2 & SMBFLG2_UNICODE) { |
857 | cifs_dbg(VFS, "Unexpected more processing error\n"); | 945 | /* unicode string area must be word-aligned */ |
858 | goto ssetup_exit; | 946 | if (((unsigned long) bcc_ptr - (unsigned long) smb_buf) % 2) { |
947 | ++bcc_ptr; | ||
948 | --bytes_remaining; | ||
859 | } | 949 | } |
860 | /* NTLMSSP Negotiate sent now processing challenge (response) */ | 950 | decode_unicode_ssetup(&bcc_ptr, bytes_remaining, ses, |
861 | phase = NtLmChallenge; /* process ntlmssp challenge */ | 951 | sess_data->nls_cp); |
862 | rc = 0; /* MORE_PROC rc is not an error here, but expected */ | 952 | } else { |
953 | decode_ascii_ssetup(&bcc_ptr, bytes_remaining, ses, | ||
954 | sess_data->nls_cp); | ||
863 | } | 955 | } |
956 | |||
957 | rc = sess_establish_session(sess_data); | ||
958 | out: | ||
959 | sess_data->result = rc; | ||
960 | sess_data->func = NULL; | ||
961 | sess_free_buffer(sess_data); | ||
962 | kfree(ses->auth_key.response); | ||
963 | ses->auth_key.response = NULL; | ||
964 | } | ||
965 | |||
966 | #ifdef CONFIG_CIFS_UPCALL | ||
967 | static void | ||
968 | sess_auth_kerberos(struct sess_data *sess_data) | ||
969 | { | ||
970 | int rc = 0; | ||
971 | struct smb_hdr *smb_buf; | ||
972 | SESSION_SETUP_ANDX *pSMB; | ||
973 | char *bcc_ptr; | ||
974 | struct cifs_ses *ses = sess_data->ses; | ||
975 | __u32 capabilities; | ||
976 | __u16 bytes_remaining; | ||
977 | struct key *spnego_key = NULL; | ||
978 | struct cifs_spnego_msg *msg; | ||
979 | u16 blob_len; | ||
980 | |||
981 | /* extended security */ | ||
982 | /* wct = 12 */ | ||
983 | rc = sess_alloc_buffer(sess_data, 12); | ||
864 | if (rc) | 984 | if (rc) |
865 | goto ssetup_exit; | 985 | goto out; |
866 | 986 | ||
867 | if ((smb_buf->WordCount != 3) && (smb_buf->WordCount != 4)) { | 987 | pSMB = (SESSION_SETUP_ANDX *)sess_data->iov[0].iov_base; |
988 | bcc_ptr = sess_data->iov[2].iov_base; | ||
989 | capabilities = cifs_ssetup_hdr(ses, pSMB); | ||
990 | |||
991 | spnego_key = cifs_get_spnego_key(ses); | ||
992 | if (IS_ERR(spnego_key)) { | ||
993 | rc = PTR_ERR(spnego_key); | ||
994 | spnego_key = NULL; | ||
995 | goto out; | ||
996 | } | ||
997 | |||
998 | msg = spnego_key->payload.data; | ||
999 | /* | ||
1000 | * check version field to make sure that cifs.upcall is | ||
1001 | * sending us a response in an expected form | ||
1002 | */ | ||
1003 | if (msg->version != CIFS_SPNEGO_UPCALL_VERSION) { | ||
1004 | cifs_dbg(VFS, | ||
1005 | "incorrect version of cifs.upcall (expected %d but got %d)", | ||
1006 | CIFS_SPNEGO_UPCALL_VERSION, msg->version); | ||
1007 | rc = -EKEYREJECTED; | ||
1008 | goto out_put_spnego_key; | ||
1009 | } | ||
1010 | |||
1011 | ses->auth_key.response = kmemdup(msg->data, msg->sesskey_len, | ||
1012 | GFP_KERNEL); | ||
1013 | if (!ses->auth_key.response) { | ||
1014 | cifs_dbg(VFS, "Kerberos can't allocate (%u bytes) memory", | ||
1015 | msg->sesskey_len); | ||
1016 | rc = -ENOMEM; | ||
1017 | goto out_put_spnego_key; | ||
1018 | } | ||
1019 | ses->auth_key.len = msg->sesskey_len; | ||
1020 | |||
1021 | pSMB->req.hdr.Flags2 |= SMBFLG2_EXT_SEC; | ||
1022 | capabilities |= CAP_EXTENDED_SECURITY; | ||
1023 | pSMB->req.Capabilities = cpu_to_le32(capabilities); | ||
1024 | sess_data->iov[1].iov_base = msg->data + msg->sesskey_len; | ||
1025 | sess_data->iov[1].iov_len = msg->secblob_len; | ||
1026 | pSMB->req.SecurityBlobLength = cpu_to_le16(sess_data->iov[1].iov_len); | ||
1027 | |||
1028 | if (ses->capabilities & CAP_UNICODE) { | ||
1029 | /* unicode strings must be word aligned */ | ||
1030 | if ((sess_data->iov[0].iov_len | ||
1031 | + sess_data->iov[1].iov_len) % 2) { | ||
1032 | *bcc_ptr = 0; | ||
1033 | bcc_ptr++; | ||
1034 | } | ||
1035 | unicode_oslm_strings(&bcc_ptr, sess_data->nls_cp); | ||
1036 | unicode_domain_string(&bcc_ptr, ses, sess_data->nls_cp); | ||
1037 | } else { | ||
1038 | /* BB: is this right? */ | ||
1039 | ascii_ssetup_strings(&bcc_ptr, ses, sess_data->nls_cp); | ||
1040 | } | ||
1041 | |||
1042 | sess_data->iov[2].iov_len = (long) bcc_ptr - | ||
1043 | (long) sess_data->iov[2].iov_base; | ||
1044 | |||
1045 | rc = sess_sendreceive(sess_data); | ||
1046 | if (rc) | ||
1047 | goto out_put_spnego_key; | ||
1048 | |||
1049 | pSMB = (SESSION_SETUP_ANDX *)sess_data->iov[0].iov_base; | ||
1050 | smb_buf = (struct smb_hdr *)sess_data->iov[0].iov_base; | ||
1051 | |||
1052 | if (smb_buf->WordCount != 4) { | ||
868 | rc = -EIO; | 1053 | rc = -EIO; |
869 | cifs_dbg(VFS, "bad word count %d\n", smb_buf->WordCount); | 1054 | cifs_dbg(VFS, "bad word count %d\n", smb_buf->WordCount); |
870 | goto ssetup_exit; | 1055 | goto out_put_spnego_key; |
871 | } | 1056 | } |
872 | action = le16_to_cpu(pSMB->resp.Action); | 1057 | |
873 | if (action & GUEST_LOGIN) | 1058 | if (le16_to_cpu(pSMB->resp.Action) & GUEST_LOGIN) |
874 | cifs_dbg(FYI, "Guest login\n"); /* BB mark SesInfo struct? */ | 1059 | cifs_dbg(FYI, "Guest login\n"); /* BB mark SesInfo struct? */ |
1060 | |||
875 | ses->Suid = smb_buf->Uid; /* UID left in wire format (le) */ | 1061 | ses->Suid = smb_buf->Uid; /* UID left in wire format (le) */ |
876 | cifs_dbg(FYI, "UID = %llu\n", ses->Suid); | 1062 | cifs_dbg(FYI, "UID = %llu\n", ses->Suid); |
877 | /* response can have either 3 or 4 word count - Samba sends 3 */ | 1063 | |
878 | /* and lanman response is 3 */ | ||
879 | bytes_remaining = get_bcc(smb_buf); | 1064 | bytes_remaining = get_bcc(smb_buf); |
880 | bcc_ptr = pByteArea(smb_buf); | 1065 | bcc_ptr = pByteArea(smb_buf); |
881 | 1066 | ||
882 | if (smb_buf->WordCount == 4) { | 1067 | blob_len = le16_to_cpu(pSMB->resp.SecurityBlobLength); |
883 | blob_len = le16_to_cpu(pSMB->resp.SecurityBlobLength); | 1068 | if (blob_len > bytes_remaining) { |
884 | if (blob_len > bytes_remaining) { | 1069 | cifs_dbg(VFS, "bad security blob length %d\n", |
885 | cifs_dbg(VFS, "bad security blob length %d\n", | 1070 | blob_len); |
886 | blob_len); | 1071 | rc = -EINVAL; |
887 | rc = -EINVAL; | 1072 | goto out_put_spnego_key; |
888 | goto ssetup_exit; | ||
889 | } | ||
890 | if (phase == NtLmChallenge) { | ||
891 | rc = decode_ntlmssp_challenge(bcc_ptr, blob_len, ses); | ||
892 | /* now goto beginning for ntlmssp authenticate phase */ | ||
893 | if (rc) | ||
894 | goto ssetup_exit; | ||
895 | } | ||
896 | bcc_ptr += blob_len; | ||
897 | bytes_remaining -= blob_len; | ||
898 | } | 1073 | } |
1074 | bcc_ptr += blob_len; | ||
1075 | bytes_remaining -= blob_len; | ||
899 | 1076 | ||
900 | /* BB check if Unicode and decode strings */ | 1077 | /* BB check if Unicode and decode strings */ |
901 | if (bytes_remaining == 0) { | 1078 | if (bytes_remaining == 0) { |
@@ -906,60 +1083,371 @@ ssetup_ntlmssp_authenticate: | |||
906 | ++bcc_ptr; | 1083 | ++bcc_ptr; |
907 | --bytes_remaining; | 1084 | --bytes_remaining; |
908 | } | 1085 | } |
909 | decode_unicode_ssetup(&bcc_ptr, bytes_remaining, ses, nls_cp); | 1086 | decode_unicode_ssetup(&bcc_ptr, bytes_remaining, ses, |
1087 | sess_data->nls_cp); | ||
910 | } else { | 1088 | } else { |
911 | decode_ascii_ssetup(&bcc_ptr, bytes_remaining, ses, nls_cp); | 1089 | decode_ascii_ssetup(&bcc_ptr, bytes_remaining, ses, |
1090 | sess_data->nls_cp); | ||
912 | } | 1091 | } |
913 | 1092 | ||
914 | ssetup_exit: | 1093 | rc = sess_establish_session(sess_data); |
915 | if (spnego_key) { | 1094 | out_put_spnego_key: |
916 | key_invalidate(spnego_key); | 1095 | key_invalidate(spnego_key); |
917 | key_put(spnego_key); | 1096 | key_put(spnego_key); |
1097 | out: | ||
1098 | sess_data->result = rc; | ||
1099 | sess_data->func = NULL; | ||
1100 | sess_free_buffer(sess_data); | ||
1101 | kfree(ses->auth_key.response); | ||
1102 | ses->auth_key.response = NULL; | ||
1103 | } | ||
1104 | |||
1105 | #else | ||
1106 | |||
1107 | static void | ||
1108 | sess_auth_kerberos(struct sess_data *sess_data) | ||
1109 | { | ||
1110 | cifs_dbg(VFS, "Kerberos negotiated but upcall support disabled!\n"); | ||
1111 | sess_data->result = -ENOSYS; | ||
1112 | sess_data->func = NULL; | ||
1113 | } | ||
1114 | #endif /* ! CONFIG_CIFS_UPCALL */ | ||
1115 | |||
1116 | /* | ||
1117 | * The required kvec buffers have to be allocated before calling this | ||
1118 | * function. | ||
1119 | */ | ||
1120 | static int | ||
1121 | _sess_auth_rawntlmssp_assemble_req(struct sess_data *sess_data) | ||
1122 | { | ||
1123 | struct smb_hdr *smb_buf; | ||
1124 | SESSION_SETUP_ANDX *pSMB; | ||
1125 | struct cifs_ses *ses = sess_data->ses; | ||
1126 | __u32 capabilities; | ||
1127 | char *bcc_ptr; | ||
1128 | |||
1129 | pSMB = (SESSION_SETUP_ANDX *)sess_data->iov[0].iov_base; | ||
1130 | smb_buf = (struct smb_hdr *)pSMB; | ||
1131 | |||
1132 | capabilities = cifs_ssetup_hdr(ses, pSMB); | ||
1133 | if ((pSMB->req.hdr.Flags2 & SMBFLG2_UNICODE) == 0) { | ||
1134 | cifs_dbg(VFS, "NTLMSSP requires Unicode support\n"); | ||
1135 | return -ENOSYS; | ||
918 | } | 1136 | } |
919 | kfree(str_area); | ||
920 | kfree(ntlmsspblob); | ||
921 | ntlmsspblob = NULL; | ||
922 | if (resp_buf_type == CIFS_SMALL_BUFFER) { | ||
923 | cifs_dbg(FYI, "ssetup freeing small buf %p\n", iov[0].iov_base); | ||
924 | cifs_small_buf_release(iov[0].iov_base); | ||
925 | } else if (resp_buf_type == CIFS_LARGE_BUFFER) | ||
926 | cifs_buf_release(iov[0].iov_base); | ||
927 | 1137 | ||
928 | /* if ntlmssp, and negotiate succeeded, proceed to authenticate phase */ | 1138 | pSMB->req.hdr.Flags2 |= SMBFLG2_EXT_SEC; |
929 | if ((phase == NtLmChallenge) && (rc == 0)) | 1139 | capabilities |= CAP_EXTENDED_SECURITY; |
930 | goto ssetup_ntlmssp_authenticate; | 1140 | pSMB->req.Capabilities |= cpu_to_le32(capabilities); |
1141 | |||
1142 | bcc_ptr = sess_data->iov[2].iov_base; | ||
1143 | /* unicode strings must be word aligned */ | ||
1144 | if ((sess_data->iov[0].iov_len + sess_data->iov[1].iov_len) % 2) { | ||
1145 | *bcc_ptr = 0; | ||
1146 | bcc_ptr++; | ||
1147 | } | ||
1148 | unicode_oslm_strings(&bcc_ptr, sess_data->nls_cp); | ||
1149 | |||
1150 | sess_data->iov[2].iov_len = (long) bcc_ptr - | ||
1151 | (long) sess_data->iov[2].iov_base; | ||
1152 | |||
1153 | return 0; | ||
1154 | } | ||
1155 | |||
1156 | static void | ||
1157 | sess_auth_rawntlmssp_authenticate(struct sess_data *sess_data); | ||
1158 | |||
1159 | static void | ||
1160 | sess_auth_rawntlmssp_negotiate(struct sess_data *sess_data) | ||
1161 | { | ||
1162 | int rc; | ||
1163 | struct smb_hdr *smb_buf; | ||
1164 | SESSION_SETUP_ANDX *pSMB; | ||
1165 | struct cifs_ses *ses = sess_data->ses; | ||
1166 | __u16 bytes_remaining; | ||
1167 | char *bcc_ptr; | ||
1168 | u16 blob_len; | ||
1169 | |||
1170 | cifs_dbg(FYI, "rawntlmssp session setup negotiate phase\n"); | ||
1171 | |||
1172 | /* | ||
1173 | * if memory allocation is successful, caller of this function | ||
1174 | * frees it. | ||
1175 | */ | ||
1176 | ses->ntlmssp = kmalloc(sizeof(struct ntlmssp_auth), GFP_KERNEL); | ||
1177 | if (!ses->ntlmssp) { | ||
1178 | rc = -ENOMEM; | ||
1179 | goto out; | ||
1180 | } | ||
1181 | ses->ntlmssp->sesskey_per_smbsess = false; | ||
1182 | |||
1183 | /* wct = 12 */ | ||
1184 | rc = sess_alloc_buffer(sess_data, 12); | ||
1185 | if (rc) | ||
1186 | goto out; | ||
1187 | |||
1188 | pSMB = (SESSION_SETUP_ANDX *)sess_data->iov[0].iov_base; | ||
1189 | |||
1190 | /* Build security blob before we assemble the request */ | ||
1191 | build_ntlmssp_negotiate_blob(pSMB->req.SecurityBlob, ses); | ||
1192 | sess_data->iov[1].iov_len = sizeof(NEGOTIATE_MESSAGE); | ||
1193 | sess_data->iov[1].iov_base = pSMB->req.SecurityBlob; | ||
1194 | pSMB->req.SecurityBlobLength = cpu_to_le16(sizeof(NEGOTIATE_MESSAGE)); | ||
1195 | |||
1196 | rc = _sess_auth_rawntlmssp_assemble_req(sess_data); | ||
1197 | if (rc) | ||
1198 | goto out; | ||
1199 | |||
1200 | rc = sess_sendreceive(sess_data); | ||
1201 | |||
1202 | pSMB = (SESSION_SETUP_ANDX *)sess_data->iov[0].iov_base; | ||
1203 | smb_buf = (struct smb_hdr *)sess_data->iov[0].iov_base; | ||
1204 | |||
1205 | /* If true, rc here is expected and not an error */ | ||
1206 | if (sess_data->buf0_type != CIFS_NO_BUFFER && | ||
1207 | smb_buf->Status.CifsError == | ||
1208 | cpu_to_le32(NT_STATUS_MORE_PROCESSING_REQUIRED)) | ||
1209 | rc = 0; | ||
1210 | |||
1211 | if (rc) | ||
1212 | goto out; | ||
1213 | |||
1214 | cifs_dbg(FYI, "rawntlmssp session setup challenge phase\n"); | ||
1215 | |||
1216 | if (smb_buf->WordCount != 4) { | ||
1217 | rc = -EIO; | ||
1218 | cifs_dbg(VFS, "bad word count %d\n", smb_buf->WordCount); | ||
1219 | goto out; | ||
1220 | } | ||
1221 | |||
1222 | ses->Suid = smb_buf->Uid; /* UID left in wire format (le) */ | ||
1223 | cifs_dbg(FYI, "UID = %llu\n", ses->Suid); | ||
1224 | |||
1225 | bytes_remaining = get_bcc(smb_buf); | ||
1226 | bcc_ptr = pByteArea(smb_buf); | ||
1227 | |||
1228 | blob_len = le16_to_cpu(pSMB->resp.SecurityBlobLength); | ||
1229 | if (blob_len > bytes_remaining) { | ||
1230 | cifs_dbg(VFS, "bad security blob length %d\n", | ||
1231 | blob_len); | ||
1232 | rc = -EINVAL; | ||
1233 | goto out; | ||
1234 | } | ||
1235 | |||
1236 | rc = decode_ntlmssp_challenge(bcc_ptr, blob_len, ses); | ||
1237 | out: | ||
1238 | sess_free_buffer(sess_data); | ||
931 | 1239 | ||
932 | if (!rc) { | 1240 | if (!rc) { |
933 | mutex_lock(&ses->server->srv_mutex); | 1241 | sess_data->func = sess_auth_rawntlmssp_authenticate; |
934 | if (!ses->server->session_estab) { | 1242 | return; |
935 | if (ses->server->sign) { | 1243 | } |
936 | ses->server->session_key.response = | 1244 | |
937 | kmemdup(ses->auth_key.response, | 1245 | /* Else error. Cleanup */ |
938 | ses->auth_key.len, GFP_KERNEL); | 1246 | kfree(ses->auth_key.response); |
939 | if (!ses->server->session_key.response) { | 1247 | ses->auth_key.response = NULL; |
940 | rc = -ENOMEM; | 1248 | kfree(ses->ntlmssp); |
941 | mutex_unlock(&ses->server->srv_mutex); | 1249 | ses->ntlmssp = NULL; |
942 | goto keycp_exit; | 1250 | |
943 | } | 1251 | sess_data->func = NULL; |
944 | ses->server->session_key.len = | 1252 | sess_data->result = rc; |
945 | ses->auth_key.len; | 1253 | } |
946 | } | ||
947 | ses->server->sequence_number = 0x2; | ||
948 | ses->server->session_estab = true; | ||
949 | } | ||
950 | mutex_unlock(&ses->server->srv_mutex); | ||
951 | 1254 | ||
952 | cifs_dbg(FYI, "CIFS session established successfully\n"); | 1255 | static void |
953 | spin_lock(&GlobalMid_Lock); | 1256 | sess_auth_rawntlmssp_authenticate(struct sess_data *sess_data) |
954 | ses->status = CifsGood; | 1257 | { |
955 | ses->need_reconnect = false; | 1258 | int rc; |
956 | spin_unlock(&GlobalMid_Lock); | 1259 | struct smb_hdr *smb_buf; |
1260 | SESSION_SETUP_ANDX *pSMB; | ||
1261 | struct cifs_ses *ses = sess_data->ses; | ||
1262 | __u16 bytes_remaining; | ||
1263 | char *bcc_ptr; | ||
1264 | char *ntlmsspblob = NULL; | ||
1265 | u16 blob_len; | ||
1266 | |||
1267 | cifs_dbg(FYI, "rawntlmssp session setup authenticate phase\n"); | ||
1268 | |||
1269 | /* wct = 12 */ | ||
1270 | rc = sess_alloc_buffer(sess_data, 12); | ||
1271 | if (rc) | ||
1272 | goto out; | ||
1273 | |||
1274 | /* Build security blob before we assemble the request */ | ||
1275 | pSMB = (SESSION_SETUP_ANDX *)sess_data->iov[0].iov_base; | ||
1276 | smb_buf = (struct smb_hdr *)pSMB; | ||
1277 | /* | ||
1278 | * 5 is an empirical value, large enough to hold | ||
1279 | * authenticate message plus max 10 of av paris, | ||
1280 | * domain, user, workstation names, flags, etc. | ||
1281 | */ | ||
1282 | ntlmsspblob = kzalloc(5*sizeof(struct _AUTHENTICATE_MESSAGE), | ||
1283 | GFP_KERNEL); | ||
1284 | if (!ntlmsspblob) { | ||
1285 | rc = -ENOMEM; | ||
1286 | goto out; | ||
957 | } | 1287 | } |
958 | 1288 | ||
959 | keycp_exit: | 1289 | rc = build_ntlmssp_auth_blob(ntlmsspblob, |
1290 | &blob_len, ses, sess_data->nls_cp); | ||
1291 | if (rc) | ||
1292 | goto out_free_ntlmsspblob; | ||
1293 | sess_data->iov[1].iov_len = blob_len; | ||
1294 | sess_data->iov[1].iov_base = ntlmsspblob; | ||
1295 | pSMB->req.SecurityBlobLength = cpu_to_le16(blob_len); | ||
1296 | /* | ||
1297 | * Make sure that we tell the server that we are using | ||
1298 | * the uid that it just gave us back on the response | ||
1299 | * (challenge) | ||
1300 | */ | ||
1301 | smb_buf->Uid = ses->Suid; | ||
1302 | |||
1303 | rc = _sess_auth_rawntlmssp_assemble_req(sess_data); | ||
1304 | if (rc) | ||
1305 | goto out_free_ntlmsspblob; | ||
1306 | |||
1307 | rc = sess_sendreceive(sess_data); | ||
1308 | if (rc) | ||
1309 | goto out_free_ntlmsspblob; | ||
1310 | |||
1311 | pSMB = (SESSION_SETUP_ANDX *)sess_data->iov[0].iov_base; | ||
1312 | smb_buf = (struct smb_hdr *)sess_data->iov[0].iov_base; | ||
1313 | if (smb_buf->WordCount != 4) { | ||
1314 | rc = -EIO; | ||
1315 | cifs_dbg(VFS, "bad word count %d\n", smb_buf->WordCount); | ||
1316 | goto out_free_ntlmsspblob; | ||
1317 | } | ||
1318 | |||
1319 | if (le16_to_cpu(pSMB->resp.Action) & GUEST_LOGIN) | ||
1320 | cifs_dbg(FYI, "Guest login\n"); /* BB mark SesInfo struct? */ | ||
1321 | |||
1322 | bytes_remaining = get_bcc(smb_buf); | ||
1323 | bcc_ptr = pByteArea(smb_buf); | ||
1324 | blob_len = le16_to_cpu(pSMB->resp.SecurityBlobLength); | ||
1325 | if (blob_len > bytes_remaining) { | ||
1326 | cifs_dbg(VFS, "bad security blob length %d\n", | ||
1327 | blob_len); | ||
1328 | rc = -EINVAL; | ||
1329 | goto out_free_ntlmsspblob; | ||
1330 | } | ||
1331 | bcc_ptr += blob_len; | ||
1332 | bytes_remaining -= blob_len; | ||
1333 | |||
1334 | |||
1335 | /* BB check if Unicode and decode strings */ | ||
1336 | if (bytes_remaining == 0) { | ||
1337 | /* no string area to decode, do nothing */ | ||
1338 | } else if (smb_buf->Flags2 & SMBFLG2_UNICODE) { | ||
1339 | /* unicode string area must be word-aligned */ | ||
1340 | if (((unsigned long) bcc_ptr - (unsigned long) smb_buf) % 2) { | ||
1341 | ++bcc_ptr; | ||
1342 | --bytes_remaining; | ||
1343 | } | ||
1344 | decode_unicode_ssetup(&bcc_ptr, bytes_remaining, ses, | ||
1345 | sess_data->nls_cp); | ||
1346 | } else { | ||
1347 | decode_ascii_ssetup(&bcc_ptr, bytes_remaining, ses, | ||
1348 | sess_data->nls_cp); | ||
1349 | } | ||
1350 | |||
1351 | out_free_ntlmsspblob: | ||
1352 | kfree(ntlmsspblob); | ||
1353 | out: | ||
1354 | sess_free_buffer(sess_data); | ||
1355 | |||
1356 | if (!rc) | ||
1357 | rc = sess_establish_session(sess_data); | ||
1358 | |||
1359 | /* Cleanup */ | ||
960 | kfree(ses->auth_key.response); | 1360 | kfree(ses->auth_key.response); |
961 | ses->auth_key.response = NULL; | 1361 | ses->auth_key.response = NULL; |
962 | kfree(ses->ntlmssp); | 1362 | kfree(ses->ntlmssp); |
1363 | ses->ntlmssp = NULL; | ||
1364 | |||
1365 | sess_data->func = NULL; | ||
1366 | sess_data->result = rc; | ||
1367 | } | ||
1368 | |||
1369 | static int select_sec(struct cifs_ses *ses, struct sess_data *sess_data) | ||
1370 | { | ||
1371 | int type; | ||
1372 | |||
1373 | type = select_sectype(ses->server, ses->sectype); | ||
1374 | cifs_dbg(FYI, "sess setup type %d\n", type); | ||
1375 | if (type == Unspecified) { | ||
1376 | cifs_dbg(VFS, | ||
1377 | "Unable to select appropriate authentication method!"); | ||
1378 | return -EINVAL; | ||
1379 | } | ||
1380 | |||
1381 | switch (type) { | ||
1382 | case LANMAN: | ||
1383 | /* LANMAN and plaintext are less secure and off by default. | ||
1384 | * So we make this explicitly be turned on in kconfig (in the | ||
1385 | * build) and turned on at runtime (changed from the default) | ||
1386 | * in proc/fs/cifs or via mount parm. Unfortunately this is | ||
1387 | * needed for old Win (e.g. Win95), some obscure NAS and OS/2 */ | ||
1388 | #ifdef CONFIG_CIFS_WEAK_PW_HASH | ||
1389 | sess_data->func = sess_auth_lanman; | ||
1390 | break; | ||
1391 | #else | ||
1392 | return -EOPNOTSUPP; | ||
1393 | #endif | ||
1394 | case NTLM: | ||
1395 | sess_data->func = sess_auth_ntlm; | ||
1396 | break; | ||
1397 | case NTLMv2: | ||
1398 | sess_data->func = sess_auth_ntlmv2; | ||
1399 | break; | ||
1400 | case Kerberos: | ||
1401 | #ifdef CONFIG_CIFS_UPCALL | ||
1402 | sess_data->func = sess_auth_kerberos; | ||
1403 | break; | ||
1404 | #else | ||
1405 | cifs_dbg(VFS, "Kerberos negotiated but upcall support disabled!\n"); | ||
1406 | return -ENOSYS; | ||
1407 | break; | ||
1408 | #endif /* CONFIG_CIFS_UPCALL */ | ||
1409 | case RawNTLMSSP: | ||
1410 | sess_data->func = sess_auth_rawntlmssp_negotiate; | ||
1411 | break; | ||
1412 | default: | ||
1413 | cifs_dbg(VFS, "secType %d not supported!\n", type); | ||
1414 | return -ENOSYS; | ||
1415 | } | ||
1416 | |||
1417 | return 0; | ||
1418 | } | ||
1419 | |||
1420 | int CIFS_SessSetup(const unsigned int xid, struct cifs_ses *ses, | ||
1421 | const struct nls_table *nls_cp) | ||
1422 | { | ||
1423 | int rc = 0; | ||
1424 | struct sess_data *sess_data; | ||
1425 | |||
1426 | if (ses == NULL) { | ||
1427 | WARN(1, "%s: ses == NULL!", __func__); | ||
1428 | return -EINVAL; | ||
1429 | } | ||
1430 | |||
1431 | sess_data = kzalloc(sizeof(struct sess_data), GFP_KERNEL); | ||
1432 | if (!sess_data) | ||
1433 | return -ENOMEM; | ||
1434 | |||
1435 | rc = select_sec(ses, sess_data); | ||
1436 | if (rc) | ||
1437 | goto out; | ||
1438 | |||
1439 | sess_data->xid = xid; | ||
1440 | sess_data->ses = ses; | ||
1441 | sess_data->buf0_type = CIFS_NO_BUFFER; | ||
1442 | sess_data->nls_cp = (struct nls_table *) nls_cp; | ||
1443 | |||
1444 | while (sess_data->func) | ||
1445 | sess_data->func(sess_data); | ||
1446 | |||
1447 | /* Store result before we free sess_data */ | ||
1448 | rc = sess_data->result; | ||
963 | 1449 | ||
1450 | out: | ||
1451 | kfree(sess_data); | ||
964 | return rc; | 1452 | return rc; |
965 | } | 1453 | } |
diff --git a/fs/cifs/smb1ops.c b/fs/cifs/smb1ops.c index d1fdfa848703..5e8c22d6c7b9 100644 --- a/fs/cifs/smb1ops.c +++ b/fs/cifs/smb1ops.c | |||
@@ -1009,6 +1009,12 @@ cifs_is_read_op(__u32 oplock) | |||
1009 | return oplock == OPLOCK_READ; | 1009 | return oplock == OPLOCK_READ; |
1010 | } | 1010 | } |
1011 | 1011 | ||
1012 | static unsigned int | ||
1013 | cifs_wp_retry_size(struct inode *inode) | ||
1014 | { | ||
1015 | return CIFS_SB(inode->i_sb)->wsize; | ||
1016 | } | ||
1017 | |||
1012 | struct smb_version_operations smb1_operations = { | 1018 | struct smb_version_operations smb1_operations = { |
1013 | .send_cancel = send_nt_cancel, | 1019 | .send_cancel = send_nt_cancel, |
1014 | .compare_fids = cifs_compare_fids, | 1020 | .compare_fids = cifs_compare_fids, |
@@ -1019,6 +1025,7 @@ struct smb_version_operations smb1_operations = { | |||
1019 | .set_credits = cifs_set_credits, | 1025 | .set_credits = cifs_set_credits, |
1020 | .get_credits_field = cifs_get_credits_field, | 1026 | .get_credits_field = cifs_get_credits_field, |
1021 | .get_credits = cifs_get_credits, | 1027 | .get_credits = cifs_get_credits, |
1028 | .wait_mtu_credits = cifs_wait_mtu_credits, | ||
1022 | .get_next_mid = cifs_get_next_mid, | 1029 | .get_next_mid = cifs_get_next_mid, |
1023 | .read_data_offset = cifs_read_data_offset, | 1030 | .read_data_offset = cifs_read_data_offset, |
1024 | .read_data_length = cifs_read_data_length, | 1031 | .read_data_length = cifs_read_data_length, |
@@ -1078,6 +1085,7 @@ struct smb_version_operations smb1_operations = { | |||
1078 | .query_mf_symlink = cifs_query_mf_symlink, | 1085 | .query_mf_symlink = cifs_query_mf_symlink, |
1079 | .create_mf_symlink = cifs_create_mf_symlink, | 1086 | .create_mf_symlink = cifs_create_mf_symlink, |
1080 | .is_read_op = cifs_is_read_op, | 1087 | .is_read_op = cifs_is_read_op, |
1088 | .wp_retry_size = cifs_wp_retry_size, | ||
1081 | #ifdef CONFIG_CIFS_XATTR | 1089 | #ifdef CONFIG_CIFS_XATTR |
1082 | .query_all_EAs = CIFSSMBQAllEAs, | 1090 | .query_all_EAs = CIFSSMBQAllEAs, |
1083 | .set_EA = CIFSSMBSetEA, | 1091 | .set_EA = CIFSSMBSetEA, |
diff --git a/fs/cifs/smb2inode.c b/fs/cifs/smb2inode.c index 84c012a6aba0..0150182a4494 100644 --- a/fs/cifs/smb2inode.c +++ b/fs/cifs/smb2inode.c | |||
@@ -91,7 +91,7 @@ smb2_open_op_close(const unsigned int xid, struct cifs_tcon *tcon, | |||
91 | case SMB2_OP_SET_EOF: | 91 | case SMB2_OP_SET_EOF: |
92 | tmprc = SMB2_set_eof(xid, tcon, fid.persistent_fid, | 92 | tmprc = SMB2_set_eof(xid, tcon, fid.persistent_fid, |
93 | fid.volatile_fid, current->tgid, | 93 | fid.volatile_fid, current->tgid, |
94 | (__le64 *)data); | 94 | (__le64 *)data, false); |
95 | break; | 95 | break; |
96 | case SMB2_OP_SET_INFO: | 96 | case SMB2_OP_SET_INFO: |
97 | tmprc = SMB2_set_info(xid, tcon, fid.persistent_fid, | 97 | tmprc = SMB2_set_info(xid, tcon, fid.persistent_fid, |
diff --git a/fs/cifs/smb2maperror.c b/fs/cifs/smb2maperror.c index 94bd4fbb13d3..e31a9dfdcd39 100644 --- a/fs/cifs/smb2maperror.c +++ b/fs/cifs/smb2maperror.c | |||
@@ -605,7 +605,7 @@ static const struct status_to_posix_error smb2_error_map_table[] = { | |||
605 | {STATUS_MAPPED_FILE_SIZE_ZERO, -EIO, "STATUS_MAPPED_FILE_SIZE_ZERO"}, | 605 | {STATUS_MAPPED_FILE_SIZE_ZERO, -EIO, "STATUS_MAPPED_FILE_SIZE_ZERO"}, |
606 | {STATUS_TOO_MANY_OPENED_FILES, -EMFILE, "STATUS_TOO_MANY_OPENED_FILES"}, | 606 | {STATUS_TOO_MANY_OPENED_FILES, -EMFILE, "STATUS_TOO_MANY_OPENED_FILES"}, |
607 | {STATUS_CANCELLED, -EIO, "STATUS_CANCELLED"}, | 607 | {STATUS_CANCELLED, -EIO, "STATUS_CANCELLED"}, |
608 | {STATUS_CANNOT_DELETE, -EIO, "STATUS_CANNOT_DELETE"}, | 608 | {STATUS_CANNOT_DELETE, -EACCES, "STATUS_CANNOT_DELETE"}, |
609 | {STATUS_INVALID_COMPUTER_NAME, -EIO, "STATUS_INVALID_COMPUTER_NAME"}, | 609 | {STATUS_INVALID_COMPUTER_NAME, -EIO, "STATUS_INVALID_COMPUTER_NAME"}, |
610 | {STATUS_FILE_DELETED, -EIO, "STATUS_FILE_DELETED"}, | 610 | {STATUS_FILE_DELETED, -EIO, "STATUS_FILE_DELETED"}, |
611 | {STATUS_SPECIAL_ACCOUNT, -EIO, "STATUS_SPECIAL_ACCOUNT"}, | 611 | {STATUS_SPECIAL_ACCOUNT, -EIO, "STATUS_SPECIAL_ACCOUNT"}, |
diff --git a/fs/cifs/smb2misc.c b/fs/cifs/smb2misc.c index b8021fde987d..f2e6ac29a8d6 100644 --- a/fs/cifs/smb2misc.c +++ b/fs/cifs/smb2misc.c | |||
@@ -437,7 +437,7 @@ smb2_tcon_has_lease(struct cifs_tcon *tcon, struct smb2_lease_break *rsp, | |||
437 | continue; | 437 | continue; |
438 | 438 | ||
439 | cifs_dbg(FYI, "found in the open list\n"); | 439 | cifs_dbg(FYI, "found in the open list\n"); |
440 | cifs_dbg(FYI, "lease key match, lease break 0x%d\n", | 440 | cifs_dbg(FYI, "lease key match, lease break 0x%x\n", |
441 | le32_to_cpu(rsp->NewLeaseState)); | 441 | le32_to_cpu(rsp->NewLeaseState)); |
442 | 442 | ||
443 | server->ops->set_oplock_level(cinode, lease_state, 0, NULL); | 443 | server->ops->set_oplock_level(cinode, lease_state, 0, NULL); |
@@ -467,7 +467,7 @@ smb2_tcon_has_lease(struct cifs_tcon *tcon, struct smb2_lease_break *rsp, | |||
467 | } | 467 | } |
468 | 468 | ||
469 | cifs_dbg(FYI, "found in the pending open list\n"); | 469 | cifs_dbg(FYI, "found in the pending open list\n"); |
470 | cifs_dbg(FYI, "lease key match, lease break 0x%d\n", | 470 | cifs_dbg(FYI, "lease key match, lease break 0x%x\n", |
471 | le32_to_cpu(rsp->NewLeaseState)); | 471 | le32_to_cpu(rsp->NewLeaseState)); |
472 | 472 | ||
473 | open->oplock = lease_state; | 473 | open->oplock = lease_state; |
@@ -546,7 +546,7 @@ smb2_is_valid_oplock_break(char *buffer, struct TCP_Server_Info *server) | |||
546 | return false; | 546 | return false; |
547 | } | 547 | } |
548 | 548 | ||
549 | cifs_dbg(FYI, "oplock level 0x%d\n", rsp->OplockLevel); | 549 | cifs_dbg(FYI, "oplock level 0x%x\n", rsp->OplockLevel); |
550 | 550 | ||
551 | /* look up tcon based on tid & uid */ | 551 | /* look up tcon based on tid & uid */ |
552 | spin_lock(&cifs_tcp_ses_lock); | 552 | spin_lock(&cifs_tcp_ses_lock); |
diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c index 787844bde384..77f8aeb9c2fc 100644 --- a/fs/cifs/smb2ops.c +++ b/fs/cifs/smb2ops.c | |||
@@ -19,6 +19,7 @@ | |||
19 | 19 | ||
20 | #include <linux/pagemap.h> | 20 | #include <linux/pagemap.h> |
21 | #include <linux/vfs.h> | 21 | #include <linux/vfs.h> |
22 | #include <linux/falloc.h> | ||
22 | #include "cifsglob.h" | 23 | #include "cifsglob.h" |
23 | #include "smb2pdu.h" | 24 | #include "smb2pdu.h" |
24 | #include "smb2proto.h" | 25 | #include "smb2proto.h" |
@@ -112,6 +113,53 @@ smb2_get_credits(struct mid_q_entry *mid) | |||
112 | return le16_to_cpu(((struct smb2_hdr *)mid->resp_buf)->CreditRequest); | 113 | return le16_to_cpu(((struct smb2_hdr *)mid->resp_buf)->CreditRequest); |
113 | } | 114 | } |
114 | 115 | ||
116 | static int | ||
117 | smb2_wait_mtu_credits(struct TCP_Server_Info *server, unsigned int size, | ||
118 | unsigned int *num, unsigned int *credits) | ||
119 | { | ||
120 | int rc = 0; | ||
121 | unsigned int scredits; | ||
122 | |||
123 | spin_lock(&server->req_lock); | ||
124 | while (1) { | ||
125 | if (server->credits <= 0) { | ||
126 | spin_unlock(&server->req_lock); | ||
127 | cifs_num_waiters_inc(server); | ||
128 | rc = wait_event_killable(server->request_q, | ||
129 | has_credits(server, &server->credits)); | ||
130 | cifs_num_waiters_dec(server); | ||
131 | if (rc) | ||
132 | return rc; | ||
133 | spin_lock(&server->req_lock); | ||
134 | } else { | ||
135 | if (server->tcpStatus == CifsExiting) { | ||
136 | spin_unlock(&server->req_lock); | ||
137 | return -ENOENT; | ||
138 | } | ||
139 | |||
140 | scredits = server->credits; | ||
141 | /* can deadlock with reopen */ | ||
142 | if (scredits == 1) { | ||
143 | *num = SMB2_MAX_BUFFER_SIZE; | ||
144 | *credits = 0; | ||
145 | break; | ||
146 | } | ||
147 | |||
148 | /* leave one credit for a possible reopen */ | ||
149 | scredits--; | ||
150 | *num = min_t(unsigned int, size, | ||
151 | scredits * SMB2_MAX_BUFFER_SIZE); | ||
152 | |||
153 | *credits = DIV_ROUND_UP(*num, SMB2_MAX_BUFFER_SIZE); | ||
154 | server->credits -= *credits; | ||
155 | server->in_flight++; | ||
156 | break; | ||
157 | } | ||
158 | } | ||
159 | spin_unlock(&server->req_lock); | ||
160 | return rc; | ||
161 | } | ||
162 | |||
115 | static __u64 | 163 | static __u64 |
116 | smb2_get_next_mid(struct TCP_Server_Info *server) | 164 | smb2_get_next_mid(struct TCP_Server_Info *server) |
117 | { | 165 | { |
@@ -182,8 +230,9 @@ smb2_negotiate_wsize(struct cifs_tcon *tcon, struct smb_vol *volume_info) | |||
182 | /* start with specified wsize, or default */ | 230 | /* start with specified wsize, or default */ |
183 | wsize = volume_info->wsize ? volume_info->wsize : CIFS_DEFAULT_IOSIZE; | 231 | wsize = volume_info->wsize ? volume_info->wsize : CIFS_DEFAULT_IOSIZE; |
184 | wsize = min_t(unsigned int, wsize, server->max_write); | 232 | wsize = min_t(unsigned int, wsize, server->max_write); |
185 | /* set it to the maximum buffer size value we can send with 1 credit */ | 233 | |
186 | wsize = min_t(unsigned int, wsize, SMB2_MAX_BUFFER_SIZE); | 234 | if (!(server->capabilities & SMB2_GLOBAL_CAP_LARGE_MTU)) |
235 | wsize = min_t(unsigned int, wsize, SMB2_MAX_BUFFER_SIZE); | ||
187 | 236 | ||
188 | return wsize; | 237 | return wsize; |
189 | } | 238 | } |
@@ -197,8 +246,9 @@ smb2_negotiate_rsize(struct cifs_tcon *tcon, struct smb_vol *volume_info) | |||
197 | /* start with specified rsize, or default */ | 246 | /* start with specified rsize, or default */ |
198 | rsize = volume_info->rsize ? volume_info->rsize : CIFS_DEFAULT_IOSIZE; | 247 | rsize = volume_info->rsize ? volume_info->rsize : CIFS_DEFAULT_IOSIZE; |
199 | rsize = min_t(unsigned int, rsize, server->max_read); | 248 | rsize = min_t(unsigned int, rsize, server->max_read); |
200 | /* set it to the maximum buffer size value we can send with 1 credit */ | 249 | |
201 | rsize = min_t(unsigned int, rsize, SMB2_MAX_BUFFER_SIZE); | 250 | if (!(server->capabilities & SMB2_GLOBAL_CAP_LARGE_MTU)) |
251 | rsize = min_t(unsigned int, rsize, SMB2_MAX_BUFFER_SIZE); | ||
202 | 252 | ||
203 | return rsize; | 253 | return rsize; |
204 | } | 254 | } |
@@ -687,7 +737,7 @@ smb2_set_file_size(const unsigned int xid, struct cifs_tcon *tcon, | |||
687 | { | 737 | { |
688 | __le64 eof = cpu_to_le64(size); | 738 | __le64 eof = cpu_to_le64(size); |
689 | return SMB2_set_eof(xid, tcon, cfile->fid.persistent_fid, | 739 | return SMB2_set_eof(xid, tcon, cfile->fid.persistent_fid, |
690 | cfile->fid.volatile_fid, cfile->pid, &eof); | 740 | cfile->fid.volatile_fid, cfile->pid, &eof, false); |
691 | } | 741 | } |
692 | 742 | ||
693 | static int | 743 | static int |
@@ -1104,6 +1154,13 @@ smb3_parse_lease_buf(void *buf, unsigned int *epoch) | |||
1104 | return le32_to_cpu(lc->lcontext.LeaseState); | 1154 | return le32_to_cpu(lc->lcontext.LeaseState); |
1105 | } | 1155 | } |
1106 | 1156 | ||
1157 | static unsigned int | ||
1158 | smb2_wp_retry_size(struct inode *inode) | ||
1159 | { | ||
1160 | return min_t(unsigned int, CIFS_SB(inode->i_sb)->wsize, | ||
1161 | SMB2_MAX_BUFFER_SIZE); | ||
1162 | } | ||
1163 | |||
1107 | struct smb_version_operations smb20_operations = { | 1164 | struct smb_version_operations smb20_operations = { |
1108 | .compare_fids = smb2_compare_fids, | 1165 | .compare_fids = smb2_compare_fids, |
1109 | .setup_request = smb2_setup_request, | 1166 | .setup_request = smb2_setup_request, |
@@ -1113,6 +1170,7 @@ struct smb_version_operations smb20_operations = { | |||
1113 | .set_credits = smb2_set_credits, | 1170 | .set_credits = smb2_set_credits, |
1114 | .get_credits_field = smb2_get_credits_field, | 1171 | .get_credits_field = smb2_get_credits_field, |
1115 | .get_credits = smb2_get_credits, | 1172 | .get_credits = smb2_get_credits, |
1173 | .wait_mtu_credits = cifs_wait_mtu_credits, | ||
1116 | .get_next_mid = smb2_get_next_mid, | 1174 | .get_next_mid = smb2_get_next_mid, |
1117 | .read_data_offset = smb2_read_data_offset, | 1175 | .read_data_offset = smb2_read_data_offset, |
1118 | .read_data_length = smb2_read_data_length, | 1176 | .read_data_length = smb2_read_data_length, |
@@ -1177,6 +1235,7 @@ struct smb_version_operations smb20_operations = { | |||
1177 | .create_lease_buf = smb2_create_lease_buf, | 1235 | .create_lease_buf = smb2_create_lease_buf, |
1178 | .parse_lease_buf = smb2_parse_lease_buf, | 1236 | .parse_lease_buf = smb2_parse_lease_buf, |
1179 | .clone_range = smb2_clone_range, | 1237 | .clone_range = smb2_clone_range, |
1238 | .wp_retry_size = smb2_wp_retry_size, | ||
1180 | }; | 1239 | }; |
1181 | 1240 | ||
1182 | struct smb_version_operations smb21_operations = { | 1241 | struct smb_version_operations smb21_operations = { |
@@ -1188,6 +1247,7 @@ struct smb_version_operations smb21_operations = { | |||
1188 | .set_credits = smb2_set_credits, | 1247 | .set_credits = smb2_set_credits, |
1189 | .get_credits_field = smb2_get_credits_field, | 1248 | .get_credits_field = smb2_get_credits_field, |
1190 | .get_credits = smb2_get_credits, | 1249 | .get_credits = smb2_get_credits, |
1250 | .wait_mtu_credits = smb2_wait_mtu_credits, | ||
1191 | .get_next_mid = smb2_get_next_mid, | 1251 | .get_next_mid = smb2_get_next_mid, |
1192 | .read_data_offset = smb2_read_data_offset, | 1252 | .read_data_offset = smb2_read_data_offset, |
1193 | .read_data_length = smb2_read_data_length, | 1253 | .read_data_length = smb2_read_data_length, |
@@ -1252,6 +1312,7 @@ struct smb_version_operations smb21_operations = { | |||
1252 | .create_lease_buf = smb2_create_lease_buf, | 1312 | .create_lease_buf = smb2_create_lease_buf, |
1253 | .parse_lease_buf = smb2_parse_lease_buf, | 1313 | .parse_lease_buf = smb2_parse_lease_buf, |
1254 | .clone_range = smb2_clone_range, | 1314 | .clone_range = smb2_clone_range, |
1315 | .wp_retry_size = smb2_wp_retry_size, | ||
1255 | }; | 1316 | }; |
1256 | 1317 | ||
1257 | struct smb_version_operations smb30_operations = { | 1318 | struct smb_version_operations smb30_operations = { |
@@ -1263,6 +1324,7 @@ struct smb_version_operations smb30_operations = { | |||
1263 | .set_credits = smb2_set_credits, | 1324 | .set_credits = smb2_set_credits, |
1264 | .get_credits_field = smb2_get_credits_field, | 1325 | .get_credits_field = smb2_get_credits_field, |
1265 | .get_credits = smb2_get_credits, | 1326 | .get_credits = smb2_get_credits, |
1327 | .wait_mtu_credits = smb2_wait_mtu_credits, | ||
1266 | .get_next_mid = smb2_get_next_mid, | 1328 | .get_next_mid = smb2_get_next_mid, |
1267 | .read_data_offset = smb2_read_data_offset, | 1329 | .read_data_offset = smb2_read_data_offset, |
1268 | .read_data_length = smb2_read_data_length, | 1330 | .read_data_length = smb2_read_data_length, |
@@ -1330,6 +1392,7 @@ struct smb_version_operations smb30_operations = { | |||
1330 | .parse_lease_buf = smb3_parse_lease_buf, | 1392 | .parse_lease_buf = smb3_parse_lease_buf, |
1331 | .clone_range = smb2_clone_range, | 1393 | .clone_range = smb2_clone_range, |
1332 | .validate_negotiate = smb3_validate_negotiate, | 1394 | .validate_negotiate = smb3_validate_negotiate, |
1395 | .wp_retry_size = smb2_wp_retry_size, | ||
1333 | }; | 1396 | }; |
1334 | 1397 | ||
1335 | struct smb_version_values smb20_values = { | 1398 | struct smb_version_values smb20_values = { |
diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c index b0b260dbb19d..42ebc1a8be6c 100644 --- a/fs/cifs/smb2pdu.c +++ b/fs/cifs/smb2pdu.c | |||
@@ -108,7 +108,6 @@ smb2_hdr_assemble(struct smb2_hdr *hdr, __le16 smb2_cmd /* command */ , | |||
108 | if (!tcon) | 108 | if (!tcon) |
109 | goto out; | 109 | goto out; |
110 | 110 | ||
111 | /* BB FIXME when we do write > 64K add +1 for every 64K in req or rsp */ | ||
112 | /* GLOBAL_CAP_LARGE_MTU will only be set if dialect > SMB2.02 */ | 111 | /* GLOBAL_CAP_LARGE_MTU will only be set if dialect > SMB2.02 */ |
113 | /* See sections 2.2.4 and 3.2.4.1.5 of MS-SMB2 */ | 112 | /* See sections 2.2.4 and 3.2.4.1.5 of MS-SMB2 */ |
114 | if ((tcon->ses) && | 113 | if ((tcon->ses) && |
@@ -245,10 +244,6 @@ smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon) | |||
245 | if (rc) | 244 | if (rc) |
246 | goto out; | 245 | goto out; |
247 | atomic_inc(&tconInfoReconnectCount); | 246 | atomic_inc(&tconInfoReconnectCount); |
248 | /* | ||
249 | * BB FIXME add code to check if wsize needs update due to negotiated | ||
250 | * smb buffer size shrinking. | ||
251 | */ | ||
252 | out: | 247 | out: |
253 | /* | 248 | /* |
254 | * Check if handle based operation so we know whether we can continue | 249 | * Check if handle based operation so we know whether we can continue |
@@ -309,16 +304,6 @@ small_smb2_init(__le16 smb2_command, struct cifs_tcon *tcon, | |||
309 | return rc; | 304 | return rc; |
310 | } | 305 | } |
311 | 306 | ||
312 | static void | ||
313 | free_rsp_buf(int resp_buftype, void *rsp) | ||
314 | { | ||
315 | if (resp_buftype == CIFS_SMALL_BUFFER) | ||
316 | cifs_small_buf_release(rsp); | ||
317 | else if (resp_buftype == CIFS_LARGE_BUFFER) | ||
318 | cifs_buf_release(rsp); | ||
319 | } | ||
320 | |||
321 | |||
322 | /* | 307 | /* |
323 | * | 308 | * |
324 | * SMB2 Worker functions follow: | 309 | * SMB2 Worker functions follow: |
@@ -1738,12 +1723,18 @@ smb2_readv_callback(struct mid_q_entry *mid) | |||
1738 | rc); | 1723 | rc); |
1739 | } | 1724 | } |
1740 | /* FIXME: should this be counted toward the initiating task? */ | 1725 | /* FIXME: should this be counted toward the initiating task? */ |
1741 | task_io_account_read(rdata->bytes); | 1726 | task_io_account_read(rdata->got_bytes); |
1742 | cifs_stats_bytes_read(tcon, rdata->bytes); | 1727 | cifs_stats_bytes_read(tcon, rdata->got_bytes); |
1743 | break; | 1728 | break; |
1744 | case MID_REQUEST_SUBMITTED: | 1729 | case MID_REQUEST_SUBMITTED: |
1745 | case MID_RETRY_NEEDED: | 1730 | case MID_RETRY_NEEDED: |
1746 | rdata->result = -EAGAIN; | 1731 | rdata->result = -EAGAIN; |
1732 | if (server->sign && rdata->got_bytes) | ||
1733 | /* reset bytes number since we can not check a sign */ | ||
1734 | rdata->got_bytes = 0; | ||
1735 | /* FIXME: should this be counted toward the initiating task? */ | ||
1736 | task_io_account_read(rdata->got_bytes); | ||
1737 | cifs_stats_bytes_read(tcon, rdata->got_bytes); | ||
1747 | break; | 1738 | break; |
1748 | default: | 1739 | default: |
1749 | if (rdata->result != -ENODATA) | 1740 | if (rdata->result != -ENODATA) |
@@ -1762,11 +1753,12 @@ smb2_readv_callback(struct mid_q_entry *mid) | |||
1762 | int | 1753 | int |
1763 | smb2_async_readv(struct cifs_readdata *rdata) | 1754 | smb2_async_readv(struct cifs_readdata *rdata) |
1764 | { | 1755 | { |
1765 | int rc; | 1756 | int rc, flags = 0; |
1766 | struct smb2_hdr *buf; | 1757 | struct smb2_hdr *buf; |
1767 | struct cifs_io_parms io_parms; | 1758 | struct cifs_io_parms io_parms; |
1768 | struct smb_rqst rqst = { .rq_iov = &rdata->iov, | 1759 | struct smb_rqst rqst = { .rq_iov = &rdata->iov, |
1769 | .rq_nvec = 1 }; | 1760 | .rq_nvec = 1 }; |
1761 | struct TCP_Server_Info *server; | ||
1770 | 1762 | ||
1771 | cifs_dbg(FYI, "%s: offset=%llu bytes=%u\n", | 1763 | cifs_dbg(FYI, "%s: offset=%llu bytes=%u\n", |
1772 | __func__, rdata->offset, rdata->bytes); | 1764 | __func__, rdata->offset, rdata->bytes); |
@@ -1777,18 +1769,41 @@ smb2_async_readv(struct cifs_readdata *rdata) | |||
1777 | io_parms.persistent_fid = rdata->cfile->fid.persistent_fid; | 1769 | io_parms.persistent_fid = rdata->cfile->fid.persistent_fid; |
1778 | io_parms.volatile_fid = rdata->cfile->fid.volatile_fid; | 1770 | io_parms.volatile_fid = rdata->cfile->fid.volatile_fid; |
1779 | io_parms.pid = rdata->pid; | 1771 | io_parms.pid = rdata->pid; |
1772 | |||
1773 | server = io_parms.tcon->ses->server; | ||
1774 | |||
1780 | rc = smb2_new_read_req(&rdata->iov, &io_parms, 0, 0); | 1775 | rc = smb2_new_read_req(&rdata->iov, &io_parms, 0, 0); |
1781 | if (rc) | 1776 | if (rc) { |
1777 | if (rc == -EAGAIN && rdata->credits) { | ||
1778 | /* credits was reset by reconnect */ | ||
1779 | rdata->credits = 0; | ||
1780 | /* reduce in_flight value since we won't send the req */ | ||
1781 | spin_lock(&server->req_lock); | ||
1782 | server->in_flight--; | ||
1783 | spin_unlock(&server->req_lock); | ||
1784 | } | ||
1782 | return rc; | 1785 | return rc; |
1786 | } | ||
1783 | 1787 | ||
1784 | buf = (struct smb2_hdr *)rdata->iov.iov_base; | 1788 | buf = (struct smb2_hdr *)rdata->iov.iov_base; |
1785 | /* 4 for rfc1002 length field */ | 1789 | /* 4 for rfc1002 length field */ |
1786 | rdata->iov.iov_len = get_rfc1002_length(rdata->iov.iov_base) + 4; | 1790 | rdata->iov.iov_len = get_rfc1002_length(rdata->iov.iov_base) + 4; |
1787 | 1791 | ||
1792 | if (rdata->credits) { | ||
1793 | buf->CreditCharge = cpu_to_le16(DIV_ROUND_UP(rdata->bytes, | ||
1794 | SMB2_MAX_BUFFER_SIZE)); | ||
1795 | spin_lock(&server->req_lock); | ||
1796 | server->credits += rdata->credits - | ||
1797 | le16_to_cpu(buf->CreditCharge); | ||
1798 | spin_unlock(&server->req_lock); | ||
1799 | wake_up(&server->request_q); | ||
1800 | flags = CIFS_HAS_CREDITS; | ||
1801 | } | ||
1802 | |||
1788 | kref_get(&rdata->refcount); | 1803 | kref_get(&rdata->refcount); |
1789 | rc = cifs_call_async(io_parms.tcon->ses->server, &rqst, | 1804 | rc = cifs_call_async(io_parms.tcon->ses->server, &rqst, |
1790 | cifs_readv_receive, smb2_readv_callback, | 1805 | cifs_readv_receive, smb2_readv_callback, |
1791 | rdata, 0); | 1806 | rdata, flags); |
1792 | if (rc) { | 1807 | if (rc) { |
1793 | kref_put(&rdata->refcount, cifs_readdata_release); | 1808 | kref_put(&rdata->refcount, cifs_readdata_release); |
1794 | cifs_stats_fail_inc(io_parms.tcon, SMB2_READ_HE); | 1809 | cifs_stats_fail_inc(io_parms.tcon, SMB2_READ_HE); |
@@ -1906,15 +1921,25 @@ int | |||
1906 | smb2_async_writev(struct cifs_writedata *wdata, | 1921 | smb2_async_writev(struct cifs_writedata *wdata, |
1907 | void (*release)(struct kref *kref)) | 1922 | void (*release)(struct kref *kref)) |
1908 | { | 1923 | { |
1909 | int rc = -EACCES; | 1924 | int rc = -EACCES, flags = 0; |
1910 | struct smb2_write_req *req = NULL; | 1925 | struct smb2_write_req *req = NULL; |
1911 | struct cifs_tcon *tcon = tlink_tcon(wdata->cfile->tlink); | 1926 | struct cifs_tcon *tcon = tlink_tcon(wdata->cfile->tlink); |
1927 | struct TCP_Server_Info *server = tcon->ses->server; | ||
1912 | struct kvec iov; | 1928 | struct kvec iov; |
1913 | struct smb_rqst rqst; | 1929 | struct smb_rqst rqst; |
1914 | 1930 | ||
1915 | rc = small_smb2_init(SMB2_WRITE, tcon, (void **) &req); | 1931 | rc = small_smb2_init(SMB2_WRITE, tcon, (void **) &req); |
1916 | if (rc) | 1932 | if (rc) { |
1933 | if (rc == -EAGAIN && wdata->credits) { | ||
1934 | /* credits was reset by reconnect */ | ||
1935 | wdata->credits = 0; | ||
1936 | /* reduce in_flight value since we won't send the req */ | ||
1937 | spin_lock(&server->req_lock); | ||
1938 | server->in_flight--; | ||
1939 | spin_unlock(&server->req_lock); | ||
1940 | } | ||
1917 | goto async_writev_out; | 1941 | goto async_writev_out; |
1942 | } | ||
1918 | 1943 | ||
1919 | req->hdr.ProcessId = cpu_to_le32(wdata->cfile->pid); | 1944 | req->hdr.ProcessId = cpu_to_le32(wdata->cfile->pid); |
1920 | 1945 | ||
@@ -1947,9 +1972,20 @@ smb2_async_writev(struct cifs_writedata *wdata, | |||
1947 | 1972 | ||
1948 | inc_rfc1001_len(&req->hdr, wdata->bytes - 1 /* Buffer */); | 1973 | inc_rfc1001_len(&req->hdr, wdata->bytes - 1 /* Buffer */); |
1949 | 1974 | ||
1975 | if (wdata->credits) { | ||
1976 | req->hdr.CreditCharge = cpu_to_le16(DIV_ROUND_UP(wdata->bytes, | ||
1977 | SMB2_MAX_BUFFER_SIZE)); | ||
1978 | spin_lock(&server->req_lock); | ||
1979 | server->credits += wdata->credits - | ||
1980 | le16_to_cpu(req->hdr.CreditCharge); | ||
1981 | spin_unlock(&server->req_lock); | ||
1982 | wake_up(&server->request_q); | ||
1983 | flags = CIFS_HAS_CREDITS; | ||
1984 | } | ||
1985 | |||
1950 | kref_get(&wdata->refcount); | 1986 | kref_get(&wdata->refcount); |
1951 | rc = cifs_call_async(tcon->ses->server, &rqst, NULL, | 1987 | rc = cifs_call_async(server, &rqst, NULL, smb2_writev_callback, wdata, |
1952 | smb2_writev_callback, wdata, 0); | 1988 | flags); |
1953 | 1989 | ||
1954 | if (rc) { | 1990 | if (rc) { |
1955 | kref_put(&wdata->refcount, release); | 1991 | kref_put(&wdata->refcount, release); |
@@ -2325,7 +2361,7 @@ SMB2_set_hardlink(const unsigned int xid, struct cifs_tcon *tcon, | |||
2325 | 2361 | ||
2326 | int | 2362 | int |
2327 | SMB2_set_eof(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid, | 2363 | SMB2_set_eof(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid, |
2328 | u64 volatile_fid, u32 pid, __le64 *eof) | 2364 | u64 volatile_fid, u32 pid, __le64 *eof, bool is_falloc) |
2329 | { | 2365 | { |
2330 | struct smb2_file_eof_info info; | 2366 | struct smb2_file_eof_info info; |
2331 | void *data; | 2367 | void *data; |
@@ -2336,8 +2372,12 @@ SMB2_set_eof(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid, | |||
2336 | data = &info; | 2372 | data = &info; |
2337 | size = sizeof(struct smb2_file_eof_info); | 2373 | size = sizeof(struct smb2_file_eof_info); |
2338 | 2374 | ||
2339 | return send_set_info(xid, tcon, persistent_fid, volatile_fid, pid, | 2375 | if (is_falloc) |
2340 | FILE_END_OF_FILE_INFORMATION, 1, &data, &size); | 2376 | return send_set_info(xid, tcon, persistent_fid, volatile_fid, |
2377 | pid, FILE_ALLOCATION_INFORMATION, 1, &data, &size); | ||
2378 | else | ||
2379 | return send_set_info(xid, tcon, persistent_fid, volatile_fid, | ||
2380 | pid, FILE_END_OF_FILE_INFORMATION, 1, &data, &size); | ||
2341 | } | 2381 | } |
2342 | 2382 | ||
2343 | int | 2383 | int |
diff --git a/fs/cifs/smb2proto.h b/fs/cifs/smb2proto.h index 0ce48db20a65..67e8ce8055de 100644 --- a/fs/cifs/smb2proto.h +++ b/fs/cifs/smb2proto.h | |||
@@ -139,7 +139,7 @@ extern int SMB2_set_hardlink(const unsigned int xid, struct cifs_tcon *tcon, | |||
139 | __le16 *target_file); | 139 | __le16 *target_file); |
140 | extern int SMB2_set_eof(const unsigned int xid, struct cifs_tcon *tcon, | 140 | extern int SMB2_set_eof(const unsigned int xid, struct cifs_tcon *tcon, |
141 | u64 persistent_fid, u64 volatile_fid, u32 pid, | 141 | u64 persistent_fid, u64 volatile_fid, u32 pid, |
142 | __le64 *eof); | 142 | __le64 *eof, bool is_fallocate); |
143 | extern int SMB2_set_info(const unsigned int xid, struct cifs_tcon *tcon, | 143 | extern int SMB2_set_info(const unsigned int xid, struct cifs_tcon *tcon, |
144 | u64 persistent_fid, u64 volatile_fid, | 144 | u64 persistent_fid, u64 volatile_fid, |
145 | FILE_BASIC_INFO *buf); | 145 | FILE_BASIC_INFO *buf); |
diff --git a/fs/cifs/smb2transport.c b/fs/cifs/smb2transport.c index 59c748ce872f..5111e7272db6 100644 --- a/fs/cifs/smb2transport.c +++ b/fs/cifs/smb2transport.c | |||
@@ -466,7 +466,12 @@ smb2_verify_signature(struct smb_rqst *rqst, struct TCP_Server_Info *server) | |||
466 | static inline void | 466 | static inline void |
467 | smb2_seq_num_into_buf(struct TCP_Server_Info *server, struct smb2_hdr *hdr) | 467 | smb2_seq_num_into_buf(struct TCP_Server_Info *server, struct smb2_hdr *hdr) |
468 | { | 468 | { |
469 | unsigned int i, num = le16_to_cpu(hdr->CreditCharge); | ||
470 | |||
469 | hdr->MessageId = get_next_mid64(server); | 471 | hdr->MessageId = get_next_mid64(server); |
472 | /* skip message numbers according to CreditCharge field */ | ||
473 | for (i = 1; i < num; i++) | ||
474 | get_next_mid(server); | ||
470 | } | 475 | } |
471 | 476 | ||
472 | static struct mid_q_entry * | 477 | static struct mid_q_entry * |
diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c index 18cd5650a5fc..9d087f4e7d4e 100644 --- a/fs/cifs/transport.c +++ b/fs/cifs/transport.c | |||
@@ -448,6 +448,15 @@ wait_for_free_request(struct TCP_Server_Info *server, const int timeout, | |||
448 | return wait_for_free_credits(server, timeout, val); | 448 | return wait_for_free_credits(server, timeout, val); |
449 | } | 449 | } |
450 | 450 | ||
451 | int | ||
452 | cifs_wait_mtu_credits(struct TCP_Server_Info *server, unsigned int size, | ||
453 | unsigned int *num, unsigned int *credits) | ||
454 | { | ||
455 | *num = size; | ||
456 | *credits = 0; | ||
457 | return 0; | ||
458 | } | ||
459 | |||
451 | static int allocate_mid(struct cifs_ses *ses, struct smb_hdr *in_buf, | 460 | static int allocate_mid(struct cifs_ses *ses, struct smb_hdr *in_buf, |
452 | struct mid_q_entry **ppmidQ) | 461 | struct mid_q_entry **ppmidQ) |
453 | { | 462 | { |
@@ -531,20 +540,23 @@ cifs_call_async(struct TCP_Server_Info *server, struct smb_rqst *rqst, | |||
531 | { | 540 | { |
532 | int rc, timeout, optype; | 541 | int rc, timeout, optype; |
533 | struct mid_q_entry *mid; | 542 | struct mid_q_entry *mid; |
543 | unsigned int credits = 0; | ||
534 | 544 | ||
535 | timeout = flags & CIFS_TIMEOUT_MASK; | 545 | timeout = flags & CIFS_TIMEOUT_MASK; |
536 | optype = flags & CIFS_OP_MASK; | 546 | optype = flags & CIFS_OP_MASK; |
537 | 547 | ||
538 | rc = wait_for_free_request(server, timeout, optype); | 548 | if ((flags & CIFS_HAS_CREDITS) == 0) { |
539 | if (rc) | 549 | rc = wait_for_free_request(server, timeout, optype); |
540 | return rc; | 550 | if (rc) |
551 | return rc; | ||
552 | credits = 1; | ||
553 | } | ||
541 | 554 | ||
542 | mutex_lock(&server->srv_mutex); | 555 | mutex_lock(&server->srv_mutex); |
543 | mid = server->ops->setup_async_request(server, rqst); | 556 | mid = server->ops->setup_async_request(server, rqst); |
544 | if (IS_ERR(mid)) { | 557 | if (IS_ERR(mid)) { |
545 | mutex_unlock(&server->srv_mutex); | 558 | mutex_unlock(&server->srv_mutex); |
546 | add_credits(server, 1, optype); | 559 | add_credits_and_wake_if(server, credits, optype); |
547 | wake_up(&server->request_q); | ||
548 | return PTR_ERR(mid); | 560 | return PTR_ERR(mid); |
549 | } | 561 | } |
550 | 562 | ||
@@ -572,8 +584,7 @@ cifs_call_async(struct TCP_Server_Info *server, struct smb_rqst *rqst, | |||
572 | return 0; | 584 | return 0; |
573 | 585 | ||
574 | cifs_delete_mid(mid); | 586 | cifs_delete_mid(mid); |
575 | add_credits(server, 1, optype); | 587 | add_credits_and_wake_if(server, credits, optype); |
576 | wake_up(&server->request_q); | ||
577 | return rc; | 588 | return rc; |
578 | } | 589 | } |
579 | 590 | ||
diff --git a/fs/namespace.c b/fs/namespace.c index 2a1447c946e7..0acabea58319 100644 --- a/fs/namespace.c +++ b/fs/namespace.c | |||
@@ -890,8 +890,21 @@ static struct mount *clone_mnt(struct mount *old, struct dentry *root, | |||
890 | 890 | ||
891 | mnt->mnt.mnt_flags = old->mnt.mnt_flags & ~(MNT_WRITE_HOLD|MNT_MARKED); | 891 | mnt->mnt.mnt_flags = old->mnt.mnt_flags & ~(MNT_WRITE_HOLD|MNT_MARKED); |
892 | /* Don't allow unprivileged users to change mount flags */ | 892 | /* Don't allow unprivileged users to change mount flags */ |
893 | if ((flag & CL_UNPRIVILEGED) && (mnt->mnt.mnt_flags & MNT_READONLY)) | 893 | if (flag & CL_UNPRIVILEGED) { |
894 | mnt->mnt.mnt_flags |= MNT_LOCK_READONLY; | 894 | mnt->mnt.mnt_flags |= MNT_LOCK_ATIME; |
895 | |||
896 | if (mnt->mnt.mnt_flags & MNT_READONLY) | ||
897 | mnt->mnt.mnt_flags |= MNT_LOCK_READONLY; | ||
898 | |||
899 | if (mnt->mnt.mnt_flags & MNT_NODEV) | ||
900 | mnt->mnt.mnt_flags |= MNT_LOCK_NODEV; | ||
901 | |||
902 | if (mnt->mnt.mnt_flags & MNT_NOSUID) | ||
903 | mnt->mnt.mnt_flags |= MNT_LOCK_NOSUID; | ||
904 | |||
905 | if (mnt->mnt.mnt_flags & MNT_NOEXEC) | ||
906 | mnt->mnt.mnt_flags |= MNT_LOCK_NOEXEC; | ||
907 | } | ||
895 | 908 | ||
896 | /* Don't allow unprivileged users to reveal what is under a mount */ | 909 | /* Don't allow unprivileged users to reveal what is under a mount */ |
897 | if ((flag & CL_UNPRIVILEGED) && list_empty(&old->mnt_expire)) | 910 | if ((flag & CL_UNPRIVILEGED) && list_empty(&old->mnt_expire)) |
@@ -1896,9 +1909,6 @@ static int change_mount_flags(struct vfsmount *mnt, int ms_flags) | |||
1896 | if (readonly_request == __mnt_is_readonly(mnt)) | 1909 | if (readonly_request == __mnt_is_readonly(mnt)) |
1897 | return 0; | 1910 | return 0; |
1898 | 1911 | ||
1899 | if (mnt->mnt_flags & MNT_LOCK_READONLY) | ||
1900 | return -EPERM; | ||
1901 | |||
1902 | if (readonly_request) | 1912 | if (readonly_request) |
1903 | error = mnt_make_readonly(real_mount(mnt)); | 1913 | error = mnt_make_readonly(real_mount(mnt)); |
1904 | else | 1914 | else |
@@ -1924,6 +1934,33 @@ static int do_remount(struct path *path, int flags, int mnt_flags, | |||
1924 | if (path->dentry != path->mnt->mnt_root) | 1934 | if (path->dentry != path->mnt->mnt_root) |
1925 | return -EINVAL; | 1935 | return -EINVAL; |
1926 | 1936 | ||
1937 | /* Don't allow changing of locked mnt flags. | ||
1938 | * | ||
1939 | * No locks need to be held here while testing the various | ||
1940 | * MNT_LOCK flags because those flags can never be cleared | ||
1941 | * once they are set. | ||
1942 | */ | ||
1943 | if ((mnt->mnt.mnt_flags & MNT_LOCK_READONLY) && | ||
1944 | !(mnt_flags & MNT_READONLY)) { | ||
1945 | return -EPERM; | ||
1946 | } | ||
1947 | if ((mnt->mnt.mnt_flags & MNT_LOCK_NODEV) && | ||
1948 | !(mnt_flags & MNT_NODEV)) { | ||
1949 | return -EPERM; | ||
1950 | } | ||
1951 | if ((mnt->mnt.mnt_flags & MNT_LOCK_NOSUID) && | ||
1952 | !(mnt_flags & MNT_NOSUID)) { | ||
1953 | return -EPERM; | ||
1954 | } | ||
1955 | if ((mnt->mnt.mnt_flags & MNT_LOCK_NOEXEC) && | ||
1956 | !(mnt_flags & MNT_NOEXEC)) { | ||
1957 | return -EPERM; | ||
1958 | } | ||
1959 | if ((mnt->mnt.mnt_flags & MNT_LOCK_ATIME) && | ||
1960 | ((mnt->mnt.mnt_flags & MNT_ATIME_MASK) != (mnt_flags & MNT_ATIME_MASK))) { | ||
1961 | return -EPERM; | ||
1962 | } | ||
1963 | |||
1927 | err = security_sb_remount(sb, data); | 1964 | err = security_sb_remount(sb, data); |
1928 | if (err) | 1965 | if (err) |
1929 | return err; | 1966 | return err; |
@@ -1937,7 +1974,7 @@ static int do_remount(struct path *path, int flags, int mnt_flags, | |||
1937 | err = do_remount_sb(sb, flags, data, 0); | 1974 | err = do_remount_sb(sb, flags, data, 0); |
1938 | if (!err) { | 1975 | if (!err) { |
1939 | lock_mount_hash(); | 1976 | lock_mount_hash(); |
1940 | mnt_flags |= mnt->mnt.mnt_flags & MNT_PROPAGATION_MASK; | 1977 | mnt_flags |= mnt->mnt.mnt_flags & ~MNT_USER_SETTABLE_MASK; |
1941 | mnt->mnt.mnt_flags = mnt_flags; | 1978 | mnt->mnt.mnt_flags = mnt_flags; |
1942 | touch_mnt_namespace(mnt->mnt_ns); | 1979 | touch_mnt_namespace(mnt->mnt_ns); |
1943 | unlock_mount_hash(); | 1980 | unlock_mount_hash(); |
@@ -2122,7 +2159,7 @@ static int do_new_mount(struct path *path, const char *fstype, int flags, | |||
2122 | */ | 2159 | */ |
2123 | if (!(type->fs_flags & FS_USERNS_DEV_MOUNT)) { | 2160 | if (!(type->fs_flags & FS_USERNS_DEV_MOUNT)) { |
2124 | flags |= MS_NODEV; | 2161 | flags |= MS_NODEV; |
2125 | mnt_flags |= MNT_NODEV; | 2162 | mnt_flags |= MNT_NODEV | MNT_LOCK_NODEV; |
2126 | } | 2163 | } |
2127 | } | 2164 | } |
2128 | 2165 | ||
@@ -2436,6 +2473,14 @@ long do_mount(const char *dev_name, const char *dir_name, | |||
2436 | if (flags & MS_RDONLY) | 2473 | if (flags & MS_RDONLY) |
2437 | mnt_flags |= MNT_READONLY; | 2474 | mnt_flags |= MNT_READONLY; |
2438 | 2475 | ||
2476 | /* The default atime for remount is preservation */ | ||
2477 | if ((flags & MS_REMOUNT) && | ||
2478 | ((flags & (MS_NOATIME | MS_NODIRATIME | MS_RELATIME | | ||
2479 | MS_STRICTATIME)) == 0)) { | ||
2480 | mnt_flags &= ~MNT_ATIME_MASK; | ||
2481 | mnt_flags |= path.mnt->mnt_flags & MNT_ATIME_MASK; | ||
2482 | } | ||
2483 | |||
2439 | flags &= ~(MS_NOSUID | MS_NOEXEC | MS_NODEV | MS_ACTIVE | MS_BORN | | 2484 | flags &= ~(MS_NOSUID | MS_NOEXEC | MS_NODEV | MS_ACTIVE | MS_BORN | |
2440 | MS_NOATIME | MS_NODIRATIME | MS_RELATIME| MS_KERNMOUNT | | 2485 | MS_NOATIME | MS_NODIRATIME | MS_RELATIME| MS_KERNMOUNT | |
2441 | MS_STRICTATIME); | 2486 | MS_STRICTATIME); |
@@ -2972,13 +3017,13 @@ static void *mntns_get(struct task_struct *task) | |||
2972 | struct mnt_namespace *ns = NULL; | 3017 | struct mnt_namespace *ns = NULL; |
2973 | struct nsproxy *nsproxy; | 3018 | struct nsproxy *nsproxy; |
2974 | 3019 | ||
2975 | rcu_read_lock(); | 3020 | task_lock(task); |
2976 | nsproxy = task_nsproxy(task); | 3021 | nsproxy = task->nsproxy; |
2977 | if (nsproxy) { | 3022 | if (nsproxy) { |
2978 | ns = nsproxy->mnt_ns; | 3023 | ns = nsproxy->mnt_ns; |
2979 | get_mnt_ns(ns); | 3024 | get_mnt_ns(ns); |
2980 | } | 3025 | } |
2981 | rcu_read_unlock(); | 3026 | task_unlock(task); |
2982 | 3027 | ||
2983 | return ns; | 3028 | return ns; |
2984 | } | 3029 | } |
diff --git a/fs/nfs/client.c b/fs/nfs/client.c index 1d09289c8f0e..180d1ec9c32e 100644 --- a/fs/nfs/client.c +++ b/fs/nfs/client.c | |||
@@ -1205,7 +1205,7 @@ static const struct file_operations nfs_server_list_fops = { | |||
1205 | .open = nfs_server_list_open, | 1205 | .open = nfs_server_list_open, |
1206 | .read = seq_read, | 1206 | .read = seq_read, |
1207 | .llseek = seq_lseek, | 1207 | .llseek = seq_lseek, |
1208 | .release = seq_release, | 1208 | .release = seq_release_net, |
1209 | .owner = THIS_MODULE, | 1209 | .owner = THIS_MODULE, |
1210 | }; | 1210 | }; |
1211 | 1211 | ||
@@ -1226,7 +1226,7 @@ static const struct file_operations nfs_volume_list_fops = { | |||
1226 | .open = nfs_volume_list_open, | 1226 | .open = nfs_volume_list_open, |
1227 | .read = seq_read, | 1227 | .read = seq_read, |
1228 | .llseek = seq_lseek, | 1228 | .llseek = seq_lseek, |
1229 | .release = seq_release, | 1229 | .release = seq_release_net, |
1230 | .owner = THIS_MODULE, | 1230 | .owner = THIS_MODULE, |
1231 | }; | 1231 | }; |
1232 | 1232 | ||
@@ -1236,19 +1236,8 @@ static const struct file_operations nfs_volume_list_fops = { | |||
1236 | */ | 1236 | */ |
1237 | static int nfs_server_list_open(struct inode *inode, struct file *file) | 1237 | static int nfs_server_list_open(struct inode *inode, struct file *file) |
1238 | { | 1238 | { |
1239 | struct seq_file *m; | 1239 | return seq_open_net(inode, file, &nfs_server_list_ops, |
1240 | int ret; | 1240 | sizeof(struct seq_net_private)); |
1241 | struct pid_namespace *pid_ns = file->f_dentry->d_sb->s_fs_info; | ||
1242 | struct net *net = pid_ns->child_reaper->nsproxy->net_ns; | ||
1243 | |||
1244 | ret = seq_open(file, &nfs_server_list_ops); | ||
1245 | if (ret < 0) | ||
1246 | return ret; | ||
1247 | |||
1248 | m = file->private_data; | ||
1249 | m->private = net; | ||
1250 | |||
1251 | return 0; | ||
1252 | } | 1241 | } |
1253 | 1242 | ||
1254 | /* | 1243 | /* |
@@ -1256,7 +1245,7 @@ static int nfs_server_list_open(struct inode *inode, struct file *file) | |||
1256 | */ | 1245 | */ |
1257 | static void *nfs_server_list_start(struct seq_file *m, loff_t *_pos) | 1246 | static void *nfs_server_list_start(struct seq_file *m, loff_t *_pos) |
1258 | { | 1247 | { |
1259 | struct nfs_net *nn = net_generic(m->private, nfs_net_id); | 1248 | struct nfs_net *nn = net_generic(seq_file_net(m), nfs_net_id); |
1260 | 1249 | ||
1261 | /* lock the list against modification */ | 1250 | /* lock the list against modification */ |
1262 | spin_lock(&nn->nfs_client_lock); | 1251 | spin_lock(&nn->nfs_client_lock); |
@@ -1268,7 +1257,7 @@ static void *nfs_server_list_start(struct seq_file *m, loff_t *_pos) | |||
1268 | */ | 1257 | */ |
1269 | static void *nfs_server_list_next(struct seq_file *p, void *v, loff_t *pos) | 1258 | static void *nfs_server_list_next(struct seq_file *p, void *v, loff_t *pos) |
1270 | { | 1259 | { |
1271 | struct nfs_net *nn = net_generic(p->private, nfs_net_id); | 1260 | struct nfs_net *nn = net_generic(seq_file_net(p), nfs_net_id); |
1272 | 1261 | ||
1273 | return seq_list_next(v, &nn->nfs_client_list, pos); | 1262 | return seq_list_next(v, &nn->nfs_client_list, pos); |
1274 | } | 1263 | } |
@@ -1278,7 +1267,7 @@ static void *nfs_server_list_next(struct seq_file *p, void *v, loff_t *pos) | |||
1278 | */ | 1267 | */ |
1279 | static void nfs_server_list_stop(struct seq_file *p, void *v) | 1268 | static void nfs_server_list_stop(struct seq_file *p, void *v) |
1280 | { | 1269 | { |
1281 | struct nfs_net *nn = net_generic(p->private, nfs_net_id); | 1270 | struct nfs_net *nn = net_generic(seq_file_net(p), nfs_net_id); |
1282 | 1271 | ||
1283 | spin_unlock(&nn->nfs_client_lock); | 1272 | spin_unlock(&nn->nfs_client_lock); |
1284 | } | 1273 | } |
@@ -1289,7 +1278,7 @@ static void nfs_server_list_stop(struct seq_file *p, void *v) | |||
1289 | static int nfs_server_list_show(struct seq_file *m, void *v) | 1278 | static int nfs_server_list_show(struct seq_file *m, void *v) |
1290 | { | 1279 | { |
1291 | struct nfs_client *clp; | 1280 | struct nfs_client *clp; |
1292 | struct nfs_net *nn = net_generic(m->private, nfs_net_id); | 1281 | struct nfs_net *nn = net_generic(seq_file_net(m), nfs_net_id); |
1293 | 1282 | ||
1294 | /* display header on line 1 */ | 1283 | /* display header on line 1 */ |
1295 | if (v == &nn->nfs_client_list) { | 1284 | if (v == &nn->nfs_client_list) { |
@@ -1321,19 +1310,8 @@ static int nfs_server_list_show(struct seq_file *m, void *v) | |||
1321 | */ | 1310 | */ |
1322 | static int nfs_volume_list_open(struct inode *inode, struct file *file) | 1311 | static int nfs_volume_list_open(struct inode *inode, struct file *file) |
1323 | { | 1312 | { |
1324 | struct seq_file *m; | 1313 | return seq_open_net(inode, file, &nfs_server_list_ops, |
1325 | int ret; | 1314 | sizeof(struct seq_net_private)); |
1326 | struct pid_namespace *pid_ns = file->f_dentry->d_sb->s_fs_info; | ||
1327 | struct net *net = pid_ns->child_reaper->nsproxy->net_ns; | ||
1328 | |||
1329 | ret = seq_open(file, &nfs_volume_list_ops); | ||
1330 | if (ret < 0) | ||
1331 | return ret; | ||
1332 | |||
1333 | m = file->private_data; | ||
1334 | m->private = net; | ||
1335 | |||
1336 | return 0; | ||
1337 | } | 1315 | } |
1338 | 1316 | ||
1339 | /* | 1317 | /* |
@@ -1341,7 +1319,7 @@ static int nfs_volume_list_open(struct inode *inode, struct file *file) | |||
1341 | */ | 1319 | */ |
1342 | static void *nfs_volume_list_start(struct seq_file *m, loff_t *_pos) | 1320 | static void *nfs_volume_list_start(struct seq_file *m, loff_t *_pos) |
1343 | { | 1321 | { |
1344 | struct nfs_net *nn = net_generic(m->private, nfs_net_id); | 1322 | struct nfs_net *nn = net_generic(seq_file_net(m), nfs_net_id); |
1345 | 1323 | ||
1346 | /* lock the list against modification */ | 1324 | /* lock the list against modification */ |
1347 | spin_lock(&nn->nfs_client_lock); | 1325 | spin_lock(&nn->nfs_client_lock); |
@@ -1353,7 +1331,7 @@ static void *nfs_volume_list_start(struct seq_file *m, loff_t *_pos) | |||
1353 | */ | 1331 | */ |
1354 | static void *nfs_volume_list_next(struct seq_file *p, void *v, loff_t *pos) | 1332 | static void *nfs_volume_list_next(struct seq_file *p, void *v, loff_t *pos) |
1355 | { | 1333 | { |
1356 | struct nfs_net *nn = net_generic(p->private, nfs_net_id); | 1334 | struct nfs_net *nn = net_generic(seq_file_net(p), nfs_net_id); |
1357 | 1335 | ||
1358 | return seq_list_next(v, &nn->nfs_volume_list, pos); | 1336 | return seq_list_next(v, &nn->nfs_volume_list, pos); |
1359 | } | 1337 | } |
@@ -1363,7 +1341,7 @@ static void *nfs_volume_list_next(struct seq_file *p, void *v, loff_t *pos) | |||
1363 | */ | 1341 | */ |
1364 | static void nfs_volume_list_stop(struct seq_file *p, void *v) | 1342 | static void nfs_volume_list_stop(struct seq_file *p, void *v) |
1365 | { | 1343 | { |
1366 | struct nfs_net *nn = net_generic(p->private, nfs_net_id); | 1344 | struct nfs_net *nn = net_generic(seq_file_net(p), nfs_net_id); |
1367 | 1345 | ||
1368 | spin_unlock(&nn->nfs_client_lock); | 1346 | spin_unlock(&nn->nfs_client_lock); |
1369 | } | 1347 | } |
@@ -1376,7 +1354,7 @@ static int nfs_volume_list_show(struct seq_file *m, void *v) | |||
1376 | struct nfs_server *server; | 1354 | struct nfs_server *server; |
1377 | struct nfs_client *clp; | 1355 | struct nfs_client *clp; |
1378 | char dev[8], fsid[17]; | 1356 | char dev[8], fsid[17]; |
1379 | struct nfs_net *nn = net_generic(m->private, nfs_net_id); | 1357 | struct nfs_net *nn = net_generic(seq_file_net(m), nfs_net_id); |
1380 | 1358 | ||
1381 | /* display header on line 1 */ | 1359 | /* display header on line 1 */ |
1382 | if (v == &nn->nfs_volume_list) { | 1360 | if (v == &nn->nfs_volume_list) { |
@@ -1407,6 +1385,45 @@ static int nfs_volume_list_show(struct seq_file *m, void *v) | |||
1407 | return 0; | 1385 | return 0; |
1408 | } | 1386 | } |
1409 | 1387 | ||
1388 | int nfs_fs_proc_net_init(struct net *net) | ||
1389 | { | ||
1390 | struct nfs_net *nn = net_generic(net, nfs_net_id); | ||
1391 | struct proc_dir_entry *p; | ||
1392 | |||
1393 | nn->proc_nfsfs = proc_net_mkdir(net, "nfsfs", net->proc_net); | ||
1394 | if (!nn->proc_nfsfs) | ||
1395 | goto error_0; | ||
1396 | |||
1397 | /* a file of servers with which we're dealing */ | ||
1398 | p = proc_create("servers", S_IFREG|S_IRUGO, | ||
1399 | nn->proc_nfsfs, &nfs_server_list_fops); | ||
1400 | if (!p) | ||
1401 | goto error_1; | ||
1402 | |||
1403 | /* a file of volumes that we have mounted */ | ||
1404 | p = proc_create("volumes", S_IFREG|S_IRUGO, | ||
1405 | nn->proc_nfsfs, &nfs_volume_list_fops); | ||
1406 | if (!p) | ||
1407 | goto error_2; | ||
1408 | return 0; | ||
1409 | |||
1410 | error_2: | ||
1411 | remove_proc_entry("servers", nn->proc_nfsfs); | ||
1412 | error_1: | ||
1413 | remove_proc_entry("fs/nfsfs", NULL); | ||
1414 | error_0: | ||
1415 | return -ENOMEM; | ||
1416 | } | ||
1417 | |||
1418 | void nfs_fs_proc_net_exit(struct net *net) | ||
1419 | { | ||
1420 | struct nfs_net *nn = net_generic(net, nfs_net_id); | ||
1421 | |||
1422 | remove_proc_entry("volumes", nn->proc_nfsfs); | ||
1423 | remove_proc_entry("servers", nn->proc_nfsfs); | ||
1424 | remove_proc_entry("fs/nfsfs", NULL); | ||
1425 | } | ||
1426 | |||
1410 | /* | 1427 | /* |
1411 | * initialise the /proc/fs/nfsfs/ directory | 1428 | * initialise the /proc/fs/nfsfs/ directory |
1412 | */ | 1429 | */ |
@@ -1419,14 +1436,12 @@ int __init nfs_fs_proc_init(void) | |||
1419 | goto error_0; | 1436 | goto error_0; |
1420 | 1437 | ||
1421 | /* a file of servers with which we're dealing */ | 1438 | /* a file of servers with which we're dealing */ |
1422 | p = proc_create("servers", S_IFREG|S_IRUGO, | 1439 | p = proc_symlink("servers", proc_fs_nfs, "../../net/nfsfs/servers"); |
1423 | proc_fs_nfs, &nfs_server_list_fops); | ||
1424 | if (!p) | 1440 | if (!p) |
1425 | goto error_1; | 1441 | goto error_1; |
1426 | 1442 | ||
1427 | /* a file of volumes that we have mounted */ | 1443 | /* a file of volumes that we have mounted */ |
1428 | p = proc_create("volumes", S_IFREG|S_IRUGO, | 1444 | p = proc_symlink("volumes", proc_fs_nfs, "../../net/nfsfs/volumes"); |
1429 | proc_fs_nfs, &nfs_volume_list_fops); | ||
1430 | if (!p) | 1445 | if (!p) |
1431 | goto error_2; | 1446 | goto error_2; |
1432 | return 0; | 1447 | return 0; |
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c index abd37a380535..68921b01b792 100644 --- a/fs/nfs/inode.c +++ b/fs/nfs/inode.c | |||
@@ -1840,11 +1840,12 @@ EXPORT_SYMBOL_GPL(nfs_net_id); | |||
1840 | static int nfs_net_init(struct net *net) | 1840 | static int nfs_net_init(struct net *net) |
1841 | { | 1841 | { |
1842 | nfs_clients_init(net); | 1842 | nfs_clients_init(net); |
1843 | return 0; | 1843 | return nfs_fs_proc_net_init(net); |
1844 | } | 1844 | } |
1845 | 1845 | ||
1846 | static void nfs_net_exit(struct net *net) | 1846 | static void nfs_net_exit(struct net *net) |
1847 | { | 1847 | { |
1848 | nfs_fs_proc_net_exit(net); | ||
1848 | nfs_cleanup_cb_ident_idr(net); | 1849 | nfs_cleanup_cb_ident_idr(net); |
1849 | } | 1850 | } |
1850 | 1851 | ||
diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h index 617f36611d4a..e2a45ae5014e 100644 --- a/fs/nfs/internal.h +++ b/fs/nfs/internal.h | |||
@@ -195,7 +195,16 @@ extern struct rpc_clnt *nfs4_find_or_create_ds_client(struct nfs_client *, | |||
195 | #ifdef CONFIG_PROC_FS | 195 | #ifdef CONFIG_PROC_FS |
196 | extern int __init nfs_fs_proc_init(void); | 196 | extern int __init nfs_fs_proc_init(void); |
197 | extern void nfs_fs_proc_exit(void); | 197 | extern void nfs_fs_proc_exit(void); |
198 | extern int nfs_fs_proc_net_init(struct net *net); | ||
199 | extern void nfs_fs_proc_net_exit(struct net *net); | ||
198 | #else | 200 | #else |
201 | static inline int nfs_fs_proc_net_init(struct net *net) | ||
202 | { | ||
203 | return 0; | ||
204 | } | ||
205 | static inline void nfs_fs_proc_net_exit(struct net *net) | ||
206 | { | ||
207 | } | ||
199 | static inline int nfs_fs_proc_init(void) | 208 | static inline int nfs_fs_proc_init(void) |
200 | { | 209 | { |
201 | return 0; | 210 | return 0; |
diff --git a/fs/nfs/netns.h b/fs/nfs/netns.h index 8ee1fab83268..ef221fb8a183 100644 --- a/fs/nfs/netns.h +++ b/fs/nfs/netns.h | |||
@@ -29,6 +29,9 @@ struct nfs_net { | |||
29 | #endif | 29 | #endif |
30 | spinlock_t nfs_client_lock; | 30 | spinlock_t nfs_client_lock; |
31 | struct timespec boot_time; | 31 | struct timespec boot_time; |
32 | #ifdef CONFIG_PROC_FS | ||
33 | struct proc_dir_entry *proc_nfsfs; | ||
34 | #endif | ||
32 | }; | 35 | }; |
33 | 36 | ||
34 | extern int nfs_net_id; | 37 | extern int nfs_net_id; |
diff --git a/fs/nfsd/acl.h b/fs/nfsd/acl.h index a986ceb6fd0d..4cd7c69a6cb9 100644 --- a/fs/nfsd/acl.h +++ b/fs/nfsd/acl.h | |||
@@ -47,7 +47,7 @@ struct svc_rqst; | |||
47 | #define NFS4_ACL_MAX ((PAGE_SIZE - sizeof(struct nfs4_acl)) \ | 47 | #define NFS4_ACL_MAX ((PAGE_SIZE - sizeof(struct nfs4_acl)) \ |
48 | / sizeof(struct nfs4_ace)) | 48 | / sizeof(struct nfs4_ace)) |
49 | 49 | ||
50 | struct nfs4_acl *nfs4_acl_new(int); | 50 | int nfs4_acl_bytes(int entries); |
51 | int nfs4_acl_get_whotype(char *, u32); | 51 | int nfs4_acl_get_whotype(char *, u32); |
52 | __be32 nfs4_acl_write_who(struct xdr_stream *xdr, int who); | 52 | __be32 nfs4_acl_write_who(struct xdr_stream *xdr, int who); |
53 | 53 | ||
diff --git a/fs/nfsd/auth.c b/fs/nfsd/auth.c index 72f44823adbb..9d46a0bdd9f9 100644 --- a/fs/nfsd/auth.c +++ b/fs/nfsd/auth.c | |||
@@ -28,7 +28,7 @@ int nfsd_setuser(struct svc_rqst *rqstp, struct svc_export *exp) | |||
28 | validate_process_creds(); | 28 | validate_process_creds(); |
29 | 29 | ||
30 | /* discard any old override before preparing the new set */ | 30 | /* discard any old override before preparing the new set */ |
31 | revert_creds(get_cred(current->real_cred)); | 31 | revert_creds(get_cred(current_real_cred())); |
32 | new = prepare_creds(); | 32 | new = prepare_creds(); |
33 | if (!new) | 33 | if (!new) |
34 | return -ENOMEM; | 34 | return -ENOMEM; |
diff --git a/fs/nfsd/export.c b/fs/nfsd/export.c index 13b85f94d9e2..72ffd7cce3c3 100644 --- a/fs/nfsd/export.c +++ b/fs/nfsd/export.c | |||
@@ -698,8 +698,8 @@ static void svc_export_init(struct cache_head *cnew, struct cache_head *citem) | |||
698 | 698 | ||
699 | kref_get(&item->ex_client->ref); | 699 | kref_get(&item->ex_client->ref); |
700 | new->ex_client = item->ex_client; | 700 | new->ex_client = item->ex_client; |
701 | new->ex_path.dentry = dget(item->ex_path.dentry); | 701 | new->ex_path = item->ex_path; |
702 | new->ex_path.mnt = mntget(item->ex_path.mnt); | 702 | path_get(&item->ex_path); |
703 | new->ex_fslocs.locations = NULL; | 703 | new->ex_fslocs.locations = NULL; |
704 | new->ex_fslocs.locations_count = 0; | 704 | new->ex_fslocs.locations_count = 0; |
705 | new->ex_fslocs.migrated = 0; | 705 | new->ex_fslocs.migrated = 0; |
@@ -1253,7 +1253,7 @@ static int e_show(struct seq_file *m, void *p) | |||
1253 | return 0; | 1253 | return 0; |
1254 | } | 1254 | } |
1255 | 1255 | ||
1256 | cache_get(&exp->h); | 1256 | exp_get(exp); |
1257 | if (cache_check(cd, &exp->h, NULL)) | 1257 | if (cache_check(cd, &exp->h, NULL)) |
1258 | return 0; | 1258 | return 0; |
1259 | exp_put(exp); | 1259 | exp_put(exp); |
diff --git a/fs/nfsd/export.h b/fs/nfsd/export.h index cfeea85c5bed..04dc8c167b0c 100644 --- a/fs/nfsd/export.h +++ b/fs/nfsd/export.h | |||
@@ -101,9 +101,10 @@ static inline void exp_put(struct svc_export *exp) | |||
101 | cache_put(&exp->h, exp->cd); | 101 | cache_put(&exp->h, exp->cd); |
102 | } | 102 | } |
103 | 103 | ||
104 | static inline void exp_get(struct svc_export *exp) | 104 | static inline struct svc_export *exp_get(struct svc_export *exp) |
105 | { | 105 | { |
106 | cache_get(&exp->h); | 106 | cache_get(&exp->h); |
107 | return exp; | ||
107 | } | 108 | } |
108 | struct svc_export * rqst_exp_find(struct svc_rqst *, int, u32 *); | 109 | struct svc_export * rqst_exp_find(struct svc_rqst *, int, u32 *); |
109 | 110 | ||
diff --git a/fs/nfsd/fault_inject.c b/fs/nfsd/fault_inject.c index 2ed05c3cd43d..c16bf5af6831 100644 --- a/fs/nfsd/fault_inject.c +++ b/fs/nfsd/fault_inject.c | |||
@@ -17,81 +17,13 @@ | |||
17 | 17 | ||
18 | struct nfsd_fault_inject_op { | 18 | struct nfsd_fault_inject_op { |
19 | char *file; | 19 | char *file; |
20 | u64 (*forget)(struct nfs4_client *, u64); | 20 | u64 (*get)(void); |
21 | u64 (*print)(struct nfs4_client *, u64); | 21 | u64 (*set_val)(u64); |
22 | u64 (*set_clnt)(struct sockaddr_storage *, size_t); | ||
22 | }; | 23 | }; |
23 | 24 | ||
24 | static struct nfsd_fault_inject_op inject_ops[] = { | ||
25 | { | ||
26 | .file = "forget_clients", | ||
27 | .forget = nfsd_forget_client, | ||
28 | .print = nfsd_print_client, | ||
29 | }, | ||
30 | { | ||
31 | .file = "forget_locks", | ||
32 | .forget = nfsd_forget_client_locks, | ||
33 | .print = nfsd_print_client_locks, | ||
34 | }, | ||
35 | { | ||
36 | .file = "forget_openowners", | ||
37 | .forget = nfsd_forget_client_openowners, | ||
38 | .print = nfsd_print_client_openowners, | ||
39 | }, | ||
40 | { | ||
41 | .file = "forget_delegations", | ||
42 | .forget = nfsd_forget_client_delegations, | ||
43 | .print = nfsd_print_client_delegations, | ||
44 | }, | ||
45 | { | ||
46 | .file = "recall_delegations", | ||
47 | .forget = nfsd_recall_client_delegations, | ||
48 | .print = nfsd_print_client_delegations, | ||
49 | }, | ||
50 | }; | ||
51 | |||
52 | static long int NUM_INJECT_OPS = sizeof(inject_ops) / sizeof(struct nfsd_fault_inject_op); | ||
53 | static struct dentry *debug_dir; | 25 | static struct dentry *debug_dir; |
54 | 26 | ||
55 | static void nfsd_inject_set(struct nfsd_fault_inject_op *op, u64 val) | ||
56 | { | ||
57 | u64 count = 0; | ||
58 | |||
59 | if (val == 0) | ||
60 | printk(KERN_INFO "NFSD Fault Injection: %s (all)", op->file); | ||
61 | else | ||
62 | printk(KERN_INFO "NFSD Fault Injection: %s (n = %llu)", op->file, val); | ||
63 | |||
64 | nfs4_lock_state(); | ||
65 | count = nfsd_for_n_state(val, op->forget); | ||
66 | nfs4_unlock_state(); | ||
67 | printk(KERN_INFO "NFSD: %s: found %llu", op->file, count); | ||
68 | } | ||
69 | |||
70 | static void nfsd_inject_set_client(struct nfsd_fault_inject_op *op, | ||
71 | struct sockaddr_storage *addr, | ||
72 | size_t addr_size) | ||
73 | { | ||
74 | char buf[INET6_ADDRSTRLEN]; | ||
75 | struct nfs4_client *clp; | ||
76 | u64 count; | ||
77 | |||
78 | nfs4_lock_state(); | ||
79 | clp = nfsd_find_client(addr, addr_size); | ||
80 | if (clp) { | ||
81 | count = op->forget(clp, 0); | ||
82 | rpc_ntop((struct sockaddr *)&clp->cl_addr, buf, sizeof(buf)); | ||
83 | printk(KERN_INFO "NFSD [%s]: Client %s had %llu state object(s)\n", op->file, buf, count); | ||
84 | } | ||
85 | nfs4_unlock_state(); | ||
86 | } | ||
87 | |||
88 | static void nfsd_inject_get(struct nfsd_fault_inject_op *op, u64 *val) | ||
89 | { | ||
90 | nfs4_lock_state(); | ||
91 | *val = nfsd_for_n_state(0, op->print); | ||
92 | nfs4_unlock_state(); | ||
93 | } | ||
94 | |||
95 | static ssize_t fault_inject_read(struct file *file, char __user *buf, | 27 | static ssize_t fault_inject_read(struct file *file, char __user *buf, |
96 | size_t len, loff_t *ppos) | 28 | size_t len, loff_t *ppos) |
97 | { | 29 | { |
@@ -99,9 +31,10 @@ static ssize_t fault_inject_read(struct file *file, char __user *buf, | |||
99 | char read_buf[25]; | 31 | char read_buf[25]; |
100 | size_t size; | 32 | size_t size; |
101 | loff_t pos = *ppos; | 33 | loff_t pos = *ppos; |
34 | struct nfsd_fault_inject_op *op = file_inode(file)->i_private; | ||
102 | 35 | ||
103 | if (!pos) | 36 | if (!pos) |
104 | nfsd_inject_get(file_inode(file)->i_private, &val); | 37 | val = op->get(); |
105 | size = scnprintf(read_buf, sizeof(read_buf), "%llu\n", val); | 38 | size = scnprintf(read_buf, sizeof(read_buf), "%llu\n", val); |
106 | 39 | ||
107 | return simple_read_from_buffer(buf, len, ppos, read_buf, size); | 40 | return simple_read_from_buffer(buf, len, ppos, read_buf, size); |
@@ -114,18 +47,36 @@ static ssize_t fault_inject_write(struct file *file, const char __user *buf, | |||
114 | size_t size = min(sizeof(write_buf) - 1, len); | 47 | size_t size = min(sizeof(write_buf) - 1, len); |
115 | struct net *net = current->nsproxy->net_ns; | 48 | struct net *net = current->nsproxy->net_ns; |
116 | struct sockaddr_storage sa; | 49 | struct sockaddr_storage sa; |
50 | struct nfsd_fault_inject_op *op = file_inode(file)->i_private; | ||
117 | u64 val; | 51 | u64 val; |
52 | char *nl; | ||
118 | 53 | ||
119 | if (copy_from_user(write_buf, buf, size)) | 54 | if (copy_from_user(write_buf, buf, size)) |
120 | return -EFAULT; | 55 | return -EFAULT; |
121 | write_buf[size] = '\0'; | 56 | write_buf[size] = '\0'; |
122 | 57 | ||
58 | /* Deal with any embedded newlines in the string */ | ||
59 | nl = strchr(write_buf, '\n'); | ||
60 | if (nl) { | ||
61 | size = nl - write_buf; | ||
62 | *nl = '\0'; | ||
63 | } | ||
64 | |||
123 | size = rpc_pton(net, write_buf, size, (struct sockaddr *)&sa, sizeof(sa)); | 65 | size = rpc_pton(net, write_buf, size, (struct sockaddr *)&sa, sizeof(sa)); |
124 | if (size > 0) | 66 | if (size > 0) { |
125 | nfsd_inject_set_client(file_inode(file)->i_private, &sa, size); | 67 | val = op->set_clnt(&sa, size); |
126 | else { | 68 | if (val) |
69 | pr_info("NFSD [%s]: Client %s had %llu state object(s)\n", | ||
70 | op->file, write_buf, val); | ||
71 | } else { | ||
127 | val = simple_strtoll(write_buf, NULL, 0); | 72 | val = simple_strtoll(write_buf, NULL, 0); |
128 | nfsd_inject_set(file_inode(file)->i_private, val); | 73 | if (val == 0) |
74 | pr_info("NFSD Fault Injection: %s (all)", op->file); | ||
75 | else | ||
76 | pr_info("NFSD Fault Injection: %s (n = %llu)", | ||
77 | op->file, val); | ||
78 | val = op->set_val(val); | ||
79 | pr_info("NFSD: %s: found %llu", op->file, val); | ||
129 | } | 80 | } |
130 | return len; /* on success, claim we got the whole input */ | 81 | return len; /* on success, claim we got the whole input */ |
131 | } | 82 | } |
@@ -141,6 +92,41 @@ void nfsd_fault_inject_cleanup(void) | |||
141 | debugfs_remove_recursive(debug_dir); | 92 | debugfs_remove_recursive(debug_dir); |
142 | } | 93 | } |
143 | 94 | ||
95 | static struct nfsd_fault_inject_op inject_ops[] = { | ||
96 | { | ||
97 | .file = "forget_clients", | ||
98 | .get = nfsd_inject_print_clients, | ||
99 | .set_val = nfsd_inject_forget_clients, | ||
100 | .set_clnt = nfsd_inject_forget_client, | ||
101 | }, | ||
102 | { | ||
103 | .file = "forget_locks", | ||
104 | .get = nfsd_inject_print_locks, | ||
105 | .set_val = nfsd_inject_forget_locks, | ||
106 | .set_clnt = nfsd_inject_forget_client_locks, | ||
107 | }, | ||
108 | { | ||
109 | .file = "forget_openowners", | ||
110 | .get = nfsd_inject_print_openowners, | ||
111 | .set_val = nfsd_inject_forget_openowners, | ||
112 | .set_clnt = nfsd_inject_forget_client_openowners, | ||
113 | }, | ||
114 | { | ||
115 | .file = "forget_delegations", | ||
116 | .get = nfsd_inject_print_delegations, | ||
117 | .set_val = nfsd_inject_forget_delegations, | ||
118 | .set_clnt = nfsd_inject_forget_client_delegations, | ||
119 | }, | ||
120 | { | ||
121 | .file = "recall_delegations", | ||
122 | .get = nfsd_inject_print_delegations, | ||
123 | .set_val = nfsd_inject_recall_delegations, | ||
124 | .set_clnt = nfsd_inject_recall_client_delegations, | ||
125 | }, | ||
126 | }; | ||
127 | |||
128 | #define NUM_INJECT_OPS (sizeof(inject_ops)/sizeof(struct nfsd_fault_inject_op)) | ||
129 | |||
144 | int nfsd_fault_inject_init(void) | 130 | int nfsd_fault_inject_init(void) |
145 | { | 131 | { |
146 | unsigned int i; | 132 | unsigned int i; |
diff --git a/fs/nfsd/netns.h b/fs/nfsd/netns.h index d32b3aa6600d..ea6749a32760 100644 --- a/fs/nfsd/netns.h +++ b/fs/nfsd/netns.h | |||
@@ -29,14 +29,19 @@ | |||
29 | #define CLIENT_HASH_SIZE (1 << CLIENT_HASH_BITS) | 29 | #define CLIENT_HASH_SIZE (1 << CLIENT_HASH_BITS) |
30 | #define CLIENT_HASH_MASK (CLIENT_HASH_SIZE - 1) | 30 | #define CLIENT_HASH_MASK (CLIENT_HASH_SIZE - 1) |
31 | 31 | ||
32 | #define LOCKOWNER_INO_HASH_BITS 8 | ||
33 | #define LOCKOWNER_INO_HASH_SIZE (1 << LOCKOWNER_INO_HASH_BITS) | ||
34 | |||
35 | #define SESSION_HASH_SIZE 512 | 32 | #define SESSION_HASH_SIZE 512 |
36 | 33 | ||
37 | struct cld_net; | 34 | struct cld_net; |
38 | struct nfsd4_client_tracking_ops; | 35 | struct nfsd4_client_tracking_ops; |
39 | 36 | ||
37 | /* | ||
38 | * Represents a nfsd "container". With respect to nfsv4 state tracking, the | ||
39 | * fields of interest are the *_id_hashtbls and the *_name_tree. These track | ||
40 | * the nfs4_client objects by either short or long form clientid. | ||
41 | * | ||
42 | * Each nfsd_net runs a nfs4_laundromat workqueue job when necessary to clean | ||
43 | * up expired clients and delegations within the container. | ||
44 | */ | ||
40 | struct nfsd_net { | 45 | struct nfsd_net { |
41 | struct cld_net *cld_net; | 46 | struct cld_net *cld_net; |
42 | 47 | ||
@@ -66,8 +71,6 @@ struct nfsd_net { | |||
66 | struct rb_root conf_name_tree; | 71 | struct rb_root conf_name_tree; |
67 | struct list_head *unconf_id_hashtbl; | 72 | struct list_head *unconf_id_hashtbl; |
68 | struct rb_root unconf_name_tree; | 73 | struct rb_root unconf_name_tree; |
69 | struct list_head *ownerstr_hashtbl; | ||
70 | struct list_head *lockowner_ino_hashtbl; | ||
71 | struct list_head *sessionid_hashtbl; | 74 | struct list_head *sessionid_hashtbl; |
72 | /* | 75 | /* |
73 | * client_lru holds client queue ordered by nfs4_client.cl_time | 76 | * client_lru holds client queue ordered by nfs4_client.cl_time |
@@ -97,10 +100,16 @@ struct nfsd_net { | |||
97 | bool nfsd_net_up; | 100 | bool nfsd_net_up; |
98 | bool lockd_up; | 101 | bool lockd_up; |
99 | 102 | ||
103 | /* Time of server startup */ | ||
104 | struct timeval nfssvc_boot; | ||
105 | |||
100 | /* | 106 | /* |
101 | * Time of server startup | 107 | * Max number of connections this nfsd container will allow. Defaults |
108 | * to '0' which is means that it bases this on the number of threads. | ||
102 | */ | 109 | */ |
103 | struct timeval nfssvc_boot; | 110 | unsigned int max_connections; |
111 | |||
112 | u32 clientid_counter; | ||
104 | 113 | ||
105 | struct svc_serv *nfsd_serv; | 114 | struct svc_serv *nfsd_serv; |
106 | }; | 115 | }; |
diff --git a/fs/nfsd/nfs2acl.c b/fs/nfsd/nfs2acl.c index 12b023a7ab7d..ac54ea60b3f6 100644 --- a/fs/nfsd/nfs2acl.c +++ b/fs/nfsd/nfs2acl.c | |||
@@ -54,14 +54,14 @@ static __be32 nfsacld_proc_getacl(struct svc_rqst * rqstp, | |||
54 | 54 | ||
55 | if (resp->mask & (NFS_ACL|NFS_ACLCNT)) { | 55 | if (resp->mask & (NFS_ACL|NFS_ACLCNT)) { |
56 | acl = get_acl(inode, ACL_TYPE_ACCESS); | 56 | acl = get_acl(inode, ACL_TYPE_ACCESS); |
57 | if (IS_ERR(acl)) { | ||
58 | nfserr = nfserrno(PTR_ERR(acl)); | ||
59 | goto fail; | ||
60 | } | ||
61 | if (acl == NULL) { | 57 | if (acl == NULL) { |
62 | /* Solaris returns the inode's minimum ACL. */ | 58 | /* Solaris returns the inode's minimum ACL. */ |
63 | acl = posix_acl_from_mode(inode->i_mode, GFP_KERNEL); | 59 | acl = posix_acl_from_mode(inode->i_mode, GFP_KERNEL); |
64 | } | 60 | } |
61 | if (IS_ERR(acl)) { | ||
62 | nfserr = nfserrno(PTR_ERR(acl)); | ||
63 | goto fail; | ||
64 | } | ||
65 | resp->acl_access = acl; | 65 | resp->acl_access = acl; |
66 | } | 66 | } |
67 | if (resp->mask & (NFS_DFACL|NFS_DFACLCNT)) { | 67 | if (resp->mask & (NFS_DFACL|NFS_DFACLCNT)) { |
diff --git a/fs/nfsd/nfs3acl.c b/fs/nfsd/nfs3acl.c index 2a514e21dc74..34cbbab6abd7 100644 --- a/fs/nfsd/nfs3acl.c +++ b/fs/nfsd/nfs3acl.c | |||
@@ -47,14 +47,14 @@ static __be32 nfsd3_proc_getacl(struct svc_rqst * rqstp, | |||
47 | 47 | ||
48 | if (resp->mask & (NFS_ACL|NFS_ACLCNT)) { | 48 | if (resp->mask & (NFS_ACL|NFS_ACLCNT)) { |
49 | acl = get_acl(inode, ACL_TYPE_ACCESS); | 49 | acl = get_acl(inode, ACL_TYPE_ACCESS); |
50 | if (IS_ERR(acl)) { | ||
51 | nfserr = nfserrno(PTR_ERR(acl)); | ||
52 | goto fail; | ||
53 | } | ||
54 | if (acl == NULL) { | 50 | if (acl == NULL) { |
55 | /* Solaris returns the inode's minimum ACL. */ | 51 | /* Solaris returns the inode's minimum ACL. */ |
56 | acl = posix_acl_from_mode(inode->i_mode, GFP_KERNEL); | 52 | acl = posix_acl_from_mode(inode->i_mode, GFP_KERNEL); |
57 | } | 53 | } |
54 | if (IS_ERR(acl)) { | ||
55 | nfserr = nfserrno(PTR_ERR(acl)); | ||
56 | goto fail; | ||
57 | } | ||
58 | resp->acl_access = acl; | 58 | resp->acl_access = acl; |
59 | } | 59 | } |
60 | if (resp->mask & (NFS_DFACL|NFS_DFACLCNT)) { | 60 | if (resp->mask & (NFS_DFACL|NFS_DFACLCNT)) { |
diff --git a/fs/nfsd/nfs3proc.c b/fs/nfsd/nfs3proc.c index 401289913130..fa2525b2e9d7 100644 --- a/fs/nfsd/nfs3proc.c +++ b/fs/nfsd/nfs3proc.c | |||
@@ -157,11 +157,7 @@ nfsd3_proc_read(struct svc_rqst *rqstp, struct nfsd3_readargs *argp, | |||
157 | * 1 (status) + 22 (post_op_attr) + 1 (count) + 1 (eof) | 157 | * 1 (status) + 22 (post_op_attr) + 1 (count) + 1 (eof) |
158 | * + 1 (xdr opaque byte count) = 26 | 158 | * + 1 (xdr opaque byte count) = 26 |
159 | */ | 159 | */ |
160 | 160 | resp->count = min(argp->count, max_blocksize); | |
161 | resp->count = argp->count; | ||
162 | if (max_blocksize < resp->count) | ||
163 | resp->count = max_blocksize; | ||
164 | |||
165 | svc_reserve_auth(rqstp, ((1 + NFS3_POST_OP_ATTR_WORDS + 3)<<2) + resp->count +4); | 161 | svc_reserve_auth(rqstp, ((1 + NFS3_POST_OP_ATTR_WORDS + 3)<<2) + resp->count +4); |
166 | 162 | ||
167 | fh_copy(&resp->fh, &argp->fh); | 163 | fh_copy(&resp->fh, &argp->fh); |
@@ -286,8 +282,7 @@ nfsd3_proc_symlink(struct svc_rqst *rqstp, struct nfsd3_symlinkargs *argp, | |||
286 | fh_copy(&resp->dirfh, &argp->ffh); | 282 | fh_copy(&resp->dirfh, &argp->ffh); |
287 | fh_init(&resp->fh, NFS3_FHSIZE); | 283 | fh_init(&resp->fh, NFS3_FHSIZE); |
288 | nfserr = nfsd_symlink(rqstp, &resp->dirfh, argp->fname, argp->flen, | 284 | nfserr = nfsd_symlink(rqstp, &resp->dirfh, argp->fname, argp->flen, |
289 | argp->tname, argp->tlen, | 285 | argp->tname, &resp->fh); |
290 | &resp->fh, &argp->attrs); | ||
291 | RETURN_STATUS(nfserr); | 286 | RETURN_STATUS(nfserr); |
292 | } | 287 | } |
293 | 288 | ||
diff --git a/fs/nfsd/nfs3xdr.c b/fs/nfsd/nfs3xdr.c index e6c01e80325e..39c5eb3ad33a 100644 --- a/fs/nfsd/nfs3xdr.c +++ b/fs/nfsd/nfs3xdr.c | |||
@@ -120,10 +120,7 @@ decode_sattr3(__be32 *p, struct iattr *iap) | |||
120 | 120 | ||
121 | iap->ia_valid |= ATTR_SIZE; | 121 | iap->ia_valid |= ATTR_SIZE; |
122 | p = xdr_decode_hyper(p, &newsize); | 122 | p = xdr_decode_hyper(p, &newsize); |
123 | if (newsize <= NFS_OFFSET_MAX) | 123 | iap->ia_size = min_t(u64, newsize, NFS_OFFSET_MAX); |
124 | iap->ia_size = newsize; | ||
125 | else | ||
126 | iap->ia_size = NFS_OFFSET_MAX; | ||
127 | } | 124 | } |
128 | if ((tmp = ntohl(*p++)) == 1) { /* set to server time */ | 125 | if ((tmp = ntohl(*p++)) == 1) { /* set to server time */ |
129 | iap->ia_valid |= ATTR_ATIME; | 126 | iap->ia_valid |= ATTR_ATIME; |
@@ -338,10 +335,8 @@ nfs3svc_decode_readargs(struct svc_rqst *rqstp, __be32 *p, | |||
338 | return 0; | 335 | return 0; |
339 | p = xdr_decode_hyper(p, &args->offset); | 336 | p = xdr_decode_hyper(p, &args->offset); |
340 | 337 | ||
341 | len = args->count = ntohl(*p++); | 338 | args->count = ntohl(*p++); |
342 | 339 | len = min(args->count, max_blocksize); | |
343 | if (len > max_blocksize) | ||
344 | len = max_blocksize; | ||
345 | 340 | ||
346 | /* set up the kvec */ | 341 | /* set up the kvec */ |
347 | v=0; | 342 | v=0; |
@@ -349,7 +344,7 @@ nfs3svc_decode_readargs(struct svc_rqst *rqstp, __be32 *p, | |||
349 | struct page *p = *(rqstp->rq_next_page++); | 344 | struct page *p = *(rqstp->rq_next_page++); |
350 | 345 | ||
351 | rqstp->rq_vec[v].iov_base = page_address(p); | 346 | rqstp->rq_vec[v].iov_base = page_address(p); |
352 | rqstp->rq_vec[v].iov_len = len < PAGE_SIZE? len : PAGE_SIZE; | 347 | rqstp->rq_vec[v].iov_len = min_t(unsigned int, len, PAGE_SIZE); |
353 | len -= rqstp->rq_vec[v].iov_len; | 348 | len -= rqstp->rq_vec[v].iov_len; |
354 | v++; | 349 | v++; |
355 | } | 350 | } |
@@ -484,9 +479,7 @@ nfs3svc_decode_symlinkargs(struct svc_rqst *rqstp, __be32 *p, | |||
484 | } | 479 | } |
485 | /* now copy next page if there is one */ | 480 | /* now copy next page if there is one */ |
486 | if (len && !avail && rqstp->rq_arg.page_len) { | 481 | if (len && !avail && rqstp->rq_arg.page_len) { |
487 | avail = rqstp->rq_arg.page_len; | 482 | avail = min_t(unsigned int, rqstp->rq_arg.page_len, PAGE_SIZE); |
488 | if (avail > PAGE_SIZE) | ||
489 | avail = PAGE_SIZE; | ||
490 | old = page_address(rqstp->rq_arg.pages[0]); | 483 | old = page_address(rqstp->rq_arg.pages[0]); |
491 | } | 484 | } |
492 | while (len && avail && *old) { | 485 | while (len && avail && *old) { |
@@ -571,10 +564,7 @@ nfs3svc_decode_readdirargs(struct svc_rqst *rqstp, __be32 *p, | |||
571 | args->verf = p; p += 2; | 564 | args->verf = p; p += 2; |
572 | args->dircount = ~0; | 565 | args->dircount = ~0; |
573 | args->count = ntohl(*p++); | 566 | args->count = ntohl(*p++); |
574 | 567 | args->count = min_t(u32, args->count, PAGE_SIZE); | |
575 | if (args->count > PAGE_SIZE) | ||
576 | args->count = PAGE_SIZE; | ||
577 | |||
578 | args->buffer = page_address(*(rqstp->rq_next_page++)); | 568 | args->buffer = page_address(*(rqstp->rq_next_page++)); |
579 | 569 | ||
580 | return xdr_argsize_check(rqstp, p); | 570 | return xdr_argsize_check(rqstp, p); |
@@ -595,10 +585,7 @@ nfs3svc_decode_readdirplusargs(struct svc_rqst *rqstp, __be32 *p, | |||
595 | args->dircount = ntohl(*p++); | 585 | args->dircount = ntohl(*p++); |
596 | args->count = ntohl(*p++); | 586 | args->count = ntohl(*p++); |
597 | 587 | ||
598 | len = (args->count > max_blocksize) ? max_blocksize : | 588 | len = args->count = min(args->count, max_blocksize); |
599 | args->count; | ||
600 | args->count = len; | ||
601 | |||
602 | while (len > 0) { | 589 | while (len > 0) { |
603 | struct page *p = *(rqstp->rq_next_page++); | 590 | struct page *p = *(rqstp->rq_next_page++); |
604 | if (!args->buffer) | 591 | if (!args->buffer) |
@@ -913,8 +900,7 @@ encode_entry(struct readdir_cd *ccd, const char *name, int namlen, | |||
913 | */ | 900 | */ |
914 | 901 | ||
915 | /* truncate filename if too long */ | 902 | /* truncate filename if too long */ |
916 | if (namlen > NFS3_MAXNAMLEN) | 903 | namlen = min(namlen, NFS3_MAXNAMLEN); |
917 | namlen = NFS3_MAXNAMLEN; | ||
918 | 904 | ||
919 | slen = XDR_QUADLEN(namlen); | 905 | slen = XDR_QUADLEN(namlen); |
920 | elen = slen + NFS3_ENTRY_BAGGAGE | 906 | elen = slen + NFS3_ENTRY_BAGGAGE |
diff --git a/fs/nfsd/nfs4acl.c b/fs/nfsd/nfs4acl.c index d714156a19fd..59fd76651781 100644 --- a/fs/nfsd/nfs4acl.c +++ b/fs/nfsd/nfs4acl.c | |||
@@ -146,35 +146,43 @@ nfsd4_get_nfs4_acl(struct svc_rqst *rqstp, struct dentry *dentry, | |||
146 | int size = 0; | 146 | int size = 0; |
147 | 147 | ||
148 | pacl = get_acl(inode, ACL_TYPE_ACCESS); | 148 | pacl = get_acl(inode, ACL_TYPE_ACCESS); |
149 | if (!pacl) { | 149 | if (!pacl) |
150 | pacl = posix_acl_from_mode(inode->i_mode, GFP_KERNEL); | 150 | pacl = posix_acl_from_mode(inode->i_mode, GFP_KERNEL); |
151 | if (IS_ERR(pacl)) | 151 | |
152 | return PTR_ERR(pacl); | 152 | if (IS_ERR(pacl)) |
153 | } | 153 | return PTR_ERR(pacl); |
154 | |||
154 | /* allocate for worst case: one (deny, allow) pair each: */ | 155 | /* allocate for worst case: one (deny, allow) pair each: */ |
155 | size += 2 * pacl->a_count; | 156 | size += 2 * pacl->a_count; |
156 | 157 | ||
157 | if (S_ISDIR(inode->i_mode)) { | 158 | if (S_ISDIR(inode->i_mode)) { |
158 | flags = NFS4_ACL_DIR; | 159 | flags = NFS4_ACL_DIR; |
159 | dpacl = get_acl(inode, ACL_TYPE_DEFAULT); | 160 | dpacl = get_acl(inode, ACL_TYPE_DEFAULT); |
161 | if (IS_ERR(dpacl)) { | ||
162 | error = PTR_ERR(dpacl); | ||
163 | goto rel_pacl; | ||
164 | } | ||
165 | |||
160 | if (dpacl) | 166 | if (dpacl) |
161 | size += 2 * dpacl->a_count; | 167 | size += 2 * dpacl->a_count; |
162 | } | 168 | } |
163 | 169 | ||
164 | *acl = nfs4_acl_new(size); | 170 | *acl = kmalloc(nfs4_acl_bytes(size), GFP_KERNEL); |
165 | if (*acl == NULL) { | 171 | if (*acl == NULL) { |
166 | error = -ENOMEM; | 172 | error = -ENOMEM; |
167 | goto out; | 173 | goto out; |
168 | } | 174 | } |
175 | (*acl)->naces = 0; | ||
169 | 176 | ||
170 | _posix_to_nfsv4_one(pacl, *acl, flags & ~NFS4_ACL_TYPE_DEFAULT); | 177 | _posix_to_nfsv4_one(pacl, *acl, flags & ~NFS4_ACL_TYPE_DEFAULT); |
171 | 178 | ||
172 | if (dpacl) | 179 | if (dpacl) |
173 | _posix_to_nfsv4_one(dpacl, *acl, flags | NFS4_ACL_TYPE_DEFAULT); | 180 | _posix_to_nfsv4_one(dpacl, *acl, flags | NFS4_ACL_TYPE_DEFAULT); |
174 | 181 | ||
175 | out: | 182 | out: |
176 | posix_acl_release(pacl); | ||
177 | posix_acl_release(dpacl); | 183 | posix_acl_release(dpacl); |
184 | rel_pacl: | ||
185 | posix_acl_release(pacl); | ||
178 | return error; | 186 | return error; |
179 | } | 187 | } |
180 | 188 | ||
@@ -872,16 +880,13 @@ ace2type(struct nfs4_ace *ace) | |||
872 | return -1; | 880 | return -1; |
873 | } | 881 | } |
874 | 882 | ||
875 | struct nfs4_acl * | 883 | /* |
876 | nfs4_acl_new(int n) | 884 | * return the size of the struct nfs4_acl required to represent an acl |
885 | * with @entries entries. | ||
886 | */ | ||
887 | int nfs4_acl_bytes(int entries) | ||
877 | { | 888 | { |
878 | struct nfs4_acl *acl; | 889 | return sizeof(struct nfs4_acl) + entries * sizeof(struct nfs4_ace); |
879 | |||
880 | acl = kmalloc(sizeof(*acl) + n*sizeof(struct nfs4_ace), GFP_KERNEL); | ||
881 | if (acl == NULL) | ||
882 | return NULL; | ||
883 | acl->naces = 0; | ||
884 | return acl; | ||
885 | } | 890 | } |
886 | 891 | ||
887 | static struct { | 892 | static struct { |
@@ -935,5 +940,5 @@ __be32 nfs4_acl_write_who(struct xdr_stream *xdr, int who) | |||
935 | return 0; | 940 | return 0; |
936 | } | 941 | } |
937 | WARN_ON_ONCE(1); | 942 | WARN_ON_ONCE(1); |
938 | return -1; | 943 | return nfserr_serverfault; |
939 | } | 944 | } |
diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c index 2c73cae9899d..e0be57b0f79b 100644 --- a/fs/nfsd/nfs4callback.c +++ b/fs/nfsd/nfs4callback.c | |||
@@ -337,7 +337,7 @@ static void encode_cb_recall4args(struct xdr_stream *xdr, | |||
337 | p = xdr_reserve_space(xdr, 4); | 337 | p = xdr_reserve_space(xdr, 4); |
338 | *p++ = xdr_zero; /* truncate */ | 338 | *p++ = xdr_zero; /* truncate */ |
339 | 339 | ||
340 | encode_nfs_fh4(xdr, &dp->dl_fh); | 340 | encode_nfs_fh4(xdr, &dp->dl_stid.sc_file->fi_fhandle); |
341 | 341 | ||
342 | hdr->nops++; | 342 | hdr->nops++; |
343 | } | 343 | } |
@@ -678,7 +678,7 @@ static int setup_callback_client(struct nfs4_client *clp, struct nfs4_cb_conn *c | |||
678 | (clp->cl_cred.cr_flavor >= RPC_AUTH_GSS_KRB5)) | 678 | (clp->cl_cred.cr_flavor >= RPC_AUTH_GSS_KRB5)) |
679 | return -EINVAL; | 679 | return -EINVAL; |
680 | args.client_name = clp->cl_cred.cr_principal; | 680 | args.client_name = clp->cl_cred.cr_principal; |
681 | args.prognumber = conn->cb_prog, | 681 | args.prognumber = conn->cb_prog; |
682 | args.protocol = XPRT_TRANSPORT_TCP; | 682 | args.protocol = XPRT_TRANSPORT_TCP; |
683 | args.authflavor = clp->cl_cred.cr_flavor; | 683 | args.authflavor = clp->cl_cred.cr_flavor; |
684 | clp->cl_cb_ident = conn->cb_ident; | 684 | clp->cl_cb_ident = conn->cb_ident; |
@@ -689,7 +689,8 @@ static int setup_callback_client(struct nfs4_client *clp, struct nfs4_cb_conn *c | |||
689 | clp->cl_cb_session = ses; | 689 | clp->cl_cb_session = ses; |
690 | args.bc_xprt = conn->cb_xprt; | 690 | args.bc_xprt = conn->cb_xprt; |
691 | args.prognumber = clp->cl_cb_session->se_cb_prog; | 691 | args.prognumber = clp->cl_cb_session->se_cb_prog; |
692 | args.protocol = XPRT_TRANSPORT_BC_TCP; | 692 | args.protocol = conn->cb_xprt->xpt_class->xcl_ident | |
693 | XPRT_TRANSPORT_BC; | ||
693 | args.authflavor = ses->se_cb_sec.flavor; | 694 | args.authflavor = ses->se_cb_sec.flavor; |
694 | } | 695 | } |
695 | /* Create RPC client */ | 696 | /* Create RPC client */ |
@@ -904,7 +905,7 @@ static void nfsd4_cb_recall_release(void *calldata) | |||
904 | spin_lock(&clp->cl_lock); | 905 | spin_lock(&clp->cl_lock); |
905 | list_del(&cb->cb_per_client); | 906 | list_del(&cb->cb_per_client); |
906 | spin_unlock(&clp->cl_lock); | 907 | spin_unlock(&clp->cl_lock); |
907 | nfs4_put_delegation(dp); | 908 | nfs4_put_stid(&dp->dl_stid); |
908 | } | 909 | } |
909 | } | 910 | } |
910 | 911 | ||
@@ -933,7 +934,7 @@ void nfsd4_shutdown_callback(struct nfs4_client *clp) | |||
933 | set_bit(NFSD4_CLIENT_CB_KILL, &clp->cl_flags); | 934 | set_bit(NFSD4_CLIENT_CB_KILL, &clp->cl_flags); |
934 | /* | 935 | /* |
935 | * Note this won't actually result in a null callback; | 936 | * Note this won't actually result in a null callback; |
936 | * instead, nfsd4_do_callback_rpc() will detect the killed | 937 | * instead, nfsd4_run_cb_null() will detect the killed |
937 | * client, destroy the rpc client, and stop: | 938 | * client, destroy the rpc client, and stop: |
938 | */ | 939 | */ |
939 | do_probe_callback(clp); | 940 | do_probe_callback(clp); |
@@ -1011,9 +1012,9 @@ static void nfsd4_process_cb_update(struct nfsd4_callback *cb) | |||
1011 | run_nfsd4_cb(cb); | 1012 | run_nfsd4_cb(cb); |
1012 | } | 1013 | } |
1013 | 1014 | ||
1014 | static void nfsd4_do_callback_rpc(struct work_struct *w) | 1015 | static void |
1016 | nfsd4_run_callback_rpc(struct nfsd4_callback *cb) | ||
1015 | { | 1017 | { |
1016 | struct nfsd4_callback *cb = container_of(w, struct nfsd4_callback, cb_work); | ||
1017 | struct nfs4_client *clp = cb->cb_clp; | 1018 | struct nfs4_client *clp = cb->cb_clp; |
1018 | struct rpc_clnt *clnt; | 1019 | struct rpc_clnt *clnt; |
1019 | 1020 | ||
@@ -1031,9 +1032,22 @@ static void nfsd4_do_callback_rpc(struct work_struct *w) | |||
1031 | cb->cb_ops, cb); | 1032 | cb->cb_ops, cb); |
1032 | } | 1033 | } |
1033 | 1034 | ||
1034 | void nfsd4_init_callback(struct nfsd4_callback *cb) | 1035 | void |
1036 | nfsd4_run_cb_null(struct work_struct *w) | ||
1035 | { | 1037 | { |
1036 | INIT_WORK(&cb->cb_work, nfsd4_do_callback_rpc); | 1038 | struct nfsd4_callback *cb = container_of(w, struct nfsd4_callback, |
1039 | cb_work); | ||
1040 | nfsd4_run_callback_rpc(cb); | ||
1041 | } | ||
1042 | |||
1043 | void | ||
1044 | nfsd4_run_cb_recall(struct work_struct *w) | ||
1045 | { | ||
1046 | struct nfsd4_callback *cb = container_of(w, struct nfsd4_callback, | ||
1047 | cb_work); | ||
1048 | |||
1049 | nfsd4_prepare_cb_recall(cb->cb_op); | ||
1050 | nfsd4_run_callback_rpc(cb); | ||
1037 | } | 1051 | } |
1038 | 1052 | ||
1039 | void nfsd4_cb_recall(struct nfs4_delegation *dp) | 1053 | void nfsd4_cb_recall(struct nfs4_delegation *dp) |
diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c index 8f029db5d271..5e0dc528a0e8 100644 --- a/fs/nfsd/nfs4proc.c +++ b/fs/nfsd/nfs4proc.c | |||
@@ -177,7 +177,7 @@ fh_dup2(struct svc_fh *dst, struct svc_fh *src) | |||
177 | fh_put(dst); | 177 | fh_put(dst); |
178 | dget(src->fh_dentry); | 178 | dget(src->fh_dentry); |
179 | if (src->fh_export) | 179 | if (src->fh_export) |
180 | cache_get(&src->fh_export->h); | 180 | exp_get(src->fh_export); |
181 | *dst = *src; | 181 | *dst = *src; |
182 | } | 182 | } |
183 | 183 | ||
@@ -385,8 +385,6 @@ nfsd4_open(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, | |||
385 | if (nfsd4_has_session(cstate)) | 385 | if (nfsd4_has_session(cstate)) |
386 | copy_clientid(&open->op_clientid, cstate->session); | 386 | copy_clientid(&open->op_clientid, cstate->session); |
387 | 387 | ||
388 | nfs4_lock_state(); | ||
389 | |||
390 | /* check seqid for replay. set nfs4_owner */ | 388 | /* check seqid for replay. set nfs4_owner */ |
391 | resp = rqstp->rq_resp; | 389 | resp = rqstp->rq_resp; |
392 | status = nfsd4_process_open1(&resp->cstate, open, nn); | 390 | status = nfsd4_process_open1(&resp->cstate, open, nn); |
@@ -431,8 +429,7 @@ nfsd4_open(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, | |||
431 | break; | 429 | break; |
432 | case NFS4_OPEN_CLAIM_PREVIOUS: | 430 | case NFS4_OPEN_CLAIM_PREVIOUS: |
433 | status = nfs4_check_open_reclaim(&open->op_clientid, | 431 | status = nfs4_check_open_reclaim(&open->op_clientid, |
434 | cstate->minorversion, | 432 | cstate, nn); |
435 | nn); | ||
436 | if (status) | 433 | if (status) |
437 | goto out; | 434 | goto out; |
438 | open->op_openowner->oo_flags |= NFS4_OO_CONFIRMED; | 435 | open->op_openowner->oo_flags |= NFS4_OO_CONFIRMED; |
@@ -461,19 +458,17 @@ nfsd4_open(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, | |||
461 | * set, (2) sets open->op_stateid, (3) sets open->op_delegation. | 458 | * set, (2) sets open->op_stateid, (3) sets open->op_delegation. |
462 | */ | 459 | */ |
463 | status = nfsd4_process_open2(rqstp, resfh, open); | 460 | status = nfsd4_process_open2(rqstp, resfh, open); |
464 | WARN_ON(status && open->op_created); | 461 | WARN(status && open->op_created, |
462 | "nfsd4_process_open2 failed to open newly-created file! status=%u\n", | ||
463 | be32_to_cpu(status)); | ||
465 | out: | 464 | out: |
466 | if (resfh && resfh != &cstate->current_fh) { | 465 | if (resfh && resfh != &cstate->current_fh) { |
467 | fh_dup2(&cstate->current_fh, resfh); | 466 | fh_dup2(&cstate->current_fh, resfh); |
468 | fh_put(resfh); | 467 | fh_put(resfh); |
469 | kfree(resfh); | 468 | kfree(resfh); |
470 | } | 469 | } |
471 | nfsd4_cleanup_open_state(open, status); | 470 | nfsd4_cleanup_open_state(cstate, open, status); |
472 | if (open->op_openowner && !nfsd4_has_session(cstate)) | ||
473 | cstate->replay_owner = &open->op_openowner->oo_owner; | ||
474 | nfsd4_bump_seqid(cstate, status); | 471 | nfsd4_bump_seqid(cstate, status); |
475 | if (!cstate->replay_owner) | ||
476 | nfs4_unlock_state(); | ||
477 | return status; | 472 | return status; |
478 | } | 473 | } |
479 | 474 | ||
@@ -581,8 +576,12 @@ static void gen_boot_verifier(nfs4_verifier *verifier, struct net *net) | |||
581 | __be32 verf[2]; | 576 | __be32 verf[2]; |
582 | struct nfsd_net *nn = net_generic(net, nfsd_net_id); | 577 | struct nfsd_net *nn = net_generic(net, nfsd_net_id); |
583 | 578 | ||
584 | verf[0] = (__be32)nn->nfssvc_boot.tv_sec; | 579 | /* |
585 | verf[1] = (__be32)nn->nfssvc_boot.tv_usec; | 580 | * This is opaque to client, so no need to byte-swap. Use |
581 | * __force to keep sparse happy | ||
582 | */ | ||
583 | verf[0] = (__force __be32)nn->nfssvc_boot.tv_sec; | ||
584 | verf[1] = (__force __be32)nn->nfssvc_boot.tv_usec; | ||
586 | memcpy(verifier->data, verf, sizeof(verifier->data)); | 585 | memcpy(verifier->data, verf, sizeof(verifier->data)); |
587 | } | 586 | } |
588 | 587 | ||
@@ -619,8 +618,7 @@ nfsd4_create(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, | |||
619 | case NF4LNK: | 618 | case NF4LNK: |
620 | status = nfsd_symlink(rqstp, &cstate->current_fh, | 619 | status = nfsd_symlink(rqstp, &cstate->current_fh, |
621 | create->cr_name, create->cr_namelen, | 620 | create->cr_name, create->cr_namelen, |
622 | create->cr_linkname, create->cr_linklen, | 621 | create->cr_data, &resfh); |
623 | &resfh, &create->cr_iattr); | ||
624 | break; | 622 | break; |
625 | 623 | ||
626 | case NF4BLK: | 624 | case NF4BLK: |
@@ -909,8 +907,8 @@ nfsd4_secinfo_no_name(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstat | |||
909 | default: | 907 | default: |
910 | return nfserr_inval; | 908 | return nfserr_inval; |
911 | } | 909 | } |
912 | exp_get(cstate->current_fh.fh_export); | 910 | |
913 | sin->sin_exp = cstate->current_fh.fh_export; | 911 | sin->sin_exp = exp_get(cstate->current_fh.fh_export); |
914 | fh_put(&cstate->current_fh); | 912 | fh_put(&cstate->current_fh); |
915 | return nfs_ok; | 913 | return nfs_ok; |
916 | } | 914 | } |
@@ -1289,7 +1287,7 @@ nfsd4_proc_compound(struct svc_rqst *rqstp, | |||
1289 | * Don't use the deferral mechanism for NFSv4; compounds make it | 1287 | * Don't use the deferral mechanism for NFSv4; compounds make it |
1290 | * too hard to avoid non-idempotency problems. | 1288 | * too hard to avoid non-idempotency problems. |
1291 | */ | 1289 | */ |
1292 | rqstp->rq_usedeferral = 0; | 1290 | rqstp->rq_usedeferral = false; |
1293 | 1291 | ||
1294 | /* | 1292 | /* |
1295 | * According to RFC3010, this takes precedence over all other errors. | 1293 | * According to RFC3010, this takes precedence over all other errors. |
@@ -1391,10 +1389,7 @@ encode_op: | |||
1391 | args->ops, args->opcnt, resp->opcnt, op->opnum, | 1389 | args->ops, args->opcnt, resp->opcnt, op->opnum, |
1392 | be32_to_cpu(status)); | 1390 | be32_to_cpu(status)); |
1393 | 1391 | ||
1394 | if (cstate->replay_owner) { | 1392 | nfsd4_cstate_clear_replay(cstate); |
1395 | nfs4_unlock_state(); | ||
1396 | cstate->replay_owner = NULL; | ||
1397 | } | ||
1398 | /* XXX Ugh, we need to get rid of this kind of special case: */ | 1393 | /* XXX Ugh, we need to get rid of this kind of special case: */ |
1399 | if (op->opnum == OP_READ && op->u.read.rd_filp) | 1394 | if (op->opnum == OP_READ && op->u.read.rd_filp) |
1400 | fput(op->u.read.rd_filp); | 1395 | fput(op->u.read.rd_filp); |
@@ -1408,7 +1403,7 @@ encode_op: | |||
1408 | BUG_ON(cstate->replay_owner); | 1403 | BUG_ON(cstate->replay_owner); |
1409 | out: | 1404 | out: |
1410 | /* Reset deferral mechanism for RPC deferrals */ | 1405 | /* Reset deferral mechanism for RPC deferrals */ |
1411 | rqstp->rq_usedeferral = 1; | 1406 | rqstp->rq_usedeferral = true; |
1412 | dprintk("nfsv4 compound returned %d\n", ntohl(status)); | 1407 | dprintk("nfsv4 compound returned %d\n", ntohl(status)); |
1413 | return status; | 1408 | return status; |
1414 | } | 1409 | } |
@@ -1520,21 +1515,17 @@ static inline u32 nfsd4_read_rsize(struct svc_rqst *rqstp, struct nfsd4_op *op) | |||
1520 | u32 maxcount = 0, rlen = 0; | 1515 | u32 maxcount = 0, rlen = 0; |
1521 | 1516 | ||
1522 | maxcount = svc_max_payload(rqstp); | 1517 | maxcount = svc_max_payload(rqstp); |
1523 | rlen = op->u.read.rd_length; | 1518 | rlen = min(op->u.read.rd_length, maxcount); |
1524 | |||
1525 | if (rlen > maxcount) | ||
1526 | rlen = maxcount; | ||
1527 | 1519 | ||
1528 | return (op_encode_hdr_size + 2 + XDR_QUADLEN(rlen)) * sizeof(__be32); | 1520 | return (op_encode_hdr_size + 2 + XDR_QUADLEN(rlen)) * sizeof(__be32); |
1529 | } | 1521 | } |
1530 | 1522 | ||
1531 | static inline u32 nfsd4_readdir_rsize(struct svc_rqst *rqstp, struct nfsd4_op *op) | 1523 | static inline u32 nfsd4_readdir_rsize(struct svc_rqst *rqstp, struct nfsd4_op *op) |
1532 | { | 1524 | { |
1533 | u32 maxcount = svc_max_payload(rqstp); | 1525 | u32 maxcount = 0, rlen = 0; |
1534 | u32 rlen = op->u.readdir.rd_maxcount; | ||
1535 | 1526 | ||
1536 | if (rlen > maxcount) | 1527 | maxcount = svc_max_payload(rqstp); |
1537 | rlen = maxcount; | 1528 | rlen = min(op->u.readdir.rd_maxcount, maxcount); |
1538 | 1529 | ||
1539 | return (op_encode_hdr_size + op_encode_verifier_maxsz + | 1530 | return (op_encode_hdr_size + op_encode_verifier_maxsz + |
1540 | XDR_QUADLEN(rlen)) * sizeof(__be32); | 1531 | XDR_QUADLEN(rlen)) * sizeof(__be32); |
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index 2204e1fe5725..2e80a59e7e91 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c | |||
@@ -70,13 +70,11 @@ static u64 current_sessionid = 1; | |||
70 | #define CURRENT_STATEID(stateid) (!memcmp((stateid), ¤tstateid, sizeof(stateid_t))) | 70 | #define CURRENT_STATEID(stateid) (!memcmp((stateid), ¤tstateid, sizeof(stateid_t))) |
71 | 71 | ||
72 | /* forward declarations */ | 72 | /* forward declarations */ |
73 | static int check_for_locks(struct nfs4_file *filp, struct nfs4_lockowner *lowner); | 73 | static bool check_for_locks(struct nfs4_file *fp, struct nfs4_lockowner *lowner); |
74 | static void nfs4_free_ol_stateid(struct nfs4_stid *stid); | ||
74 | 75 | ||
75 | /* Locking: */ | 76 | /* Locking: */ |
76 | 77 | ||
77 | /* Currently used for almost all code touching nfsv4 state: */ | ||
78 | static DEFINE_MUTEX(client_mutex); | ||
79 | |||
80 | /* | 78 | /* |
81 | * Currently used for the del_recall_lru and file hash table. In an | 79 | * Currently used for the del_recall_lru and file hash table. In an |
82 | * effort to decrease the scope of the client_mutex, this spinlock may | 80 | * effort to decrease the scope of the client_mutex, this spinlock may |
@@ -84,18 +82,18 @@ static DEFINE_MUTEX(client_mutex); | |||
84 | */ | 82 | */ |
85 | static DEFINE_SPINLOCK(state_lock); | 83 | static DEFINE_SPINLOCK(state_lock); |
86 | 84 | ||
85 | /* | ||
86 | * A waitqueue for all in-progress 4.0 CLOSE operations that are waiting for | ||
87 | * the refcount on the open stateid to drop. | ||
88 | */ | ||
89 | static DECLARE_WAIT_QUEUE_HEAD(close_wq); | ||
90 | |||
87 | static struct kmem_cache *openowner_slab; | 91 | static struct kmem_cache *openowner_slab; |
88 | static struct kmem_cache *lockowner_slab; | 92 | static struct kmem_cache *lockowner_slab; |
89 | static struct kmem_cache *file_slab; | 93 | static struct kmem_cache *file_slab; |
90 | static struct kmem_cache *stateid_slab; | 94 | static struct kmem_cache *stateid_slab; |
91 | static struct kmem_cache *deleg_slab; | 95 | static struct kmem_cache *deleg_slab; |
92 | 96 | ||
93 | void | ||
94 | nfs4_lock_state(void) | ||
95 | { | ||
96 | mutex_lock(&client_mutex); | ||
97 | } | ||
98 | |||
99 | static void free_session(struct nfsd4_session *); | 97 | static void free_session(struct nfsd4_session *); |
100 | 98 | ||
101 | static bool is_session_dead(struct nfsd4_session *ses) | 99 | static bool is_session_dead(struct nfsd4_session *ses) |
@@ -103,12 +101,6 @@ static bool is_session_dead(struct nfsd4_session *ses) | |||
103 | return ses->se_flags & NFS4_SESSION_DEAD; | 101 | return ses->se_flags & NFS4_SESSION_DEAD; |
104 | } | 102 | } |
105 | 103 | ||
106 | void nfsd4_put_session(struct nfsd4_session *ses) | ||
107 | { | ||
108 | if (atomic_dec_and_test(&ses->se_ref) && is_session_dead(ses)) | ||
109 | free_session(ses); | ||
110 | } | ||
111 | |||
112 | static __be32 mark_session_dead_locked(struct nfsd4_session *ses, int ref_held_by_me) | 104 | static __be32 mark_session_dead_locked(struct nfsd4_session *ses, int ref_held_by_me) |
113 | { | 105 | { |
114 | if (atomic_read(&ses->se_ref) > ref_held_by_me) | 106 | if (atomic_read(&ses->se_ref) > ref_held_by_me) |
@@ -117,46 +109,17 @@ static __be32 mark_session_dead_locked(struct nfsd4_session *ses, int ref_held_b | |||
117 | return nfs_ok; | 109 | return nfs_ok; |
118 | } | 110 | } |
119 | 111 | ||
120 | static __be32 nfsd4_get_session_locked(struct nfsd4_session *ses) | ||
121 | { | ||
122 | if (is_session_dead(ses)) | ||
123 | return nfserr_badsession; | ||
124 | atomic_inc(&ses->se_ref); | ||
125 | return nfs_ok; | ||
126 | } | ||
127 | |||
128 | void | ||
129 | nfs4_unlock_state(void) | ||
130 | { | ||
131 | mutex_unlock(&client_mutex); | ||
132 | } | ||
133 | |||
134 | static bool is_client_expired(struct nfs4_client *clp) | 112 | static bool is_client_expired(struct nfs4_client *clp) |
135 | { | 113 | { |
136 | return clp->cl_time == 0; | 114 | return clp->cl_time == 0; |
137 | } | 115 | } |
138 | 116 | ||
139 | static __be32 mark_client_expired_locked(struct nfs4_client *clp) | 117 | static __be32 get_client_locked(struct nfs4_client *clp) |
140 | { | ||
141 | if (atomic_read(&clp->cl_refcount)) | ||
142 | return nfserr_jukebox; | ||
143 | clp->cl_time = 0; | ||
144 | return nfs_ok; | ||
145 | } | ||
146 | |||
147 | static __be32 mark_client_expired(struct nfs4_client *clp) | ||
148 | { | 118 | { |
149 | struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); | 119 | struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); |
150 | __be32 ret; | ||
151 | 120 | ||
152 | spin_lock(&nn->client_lock); | 121 | lockdep_assert_held(&nn->client_lock); |
153 | ret = mark_client_expired_locked(clp); | ||
154 | spin_unlock(&nn->client_lock); | ||
155 | return ret; | ||
156 | } | ||
157 | 122 | ||
158 | static __be32 get_client_locked(struct nfs4_client *clp) | ||
159 | { | ||
160 | if (is_client_expired(clp)) | 123 | if (is_client_expired(clp)) |
161 | return nfserr_expired; | 124 | return nfserr_expired; |
162 | atomic_inc(&clp->cl_refcount); | 125 | atomic_inc(&clp->cl_refcount); |
@@ -197,13 +160,17 @@ renew_client(struct nfs4_client *clp) | |||
197 | 160 | ||
198 | static void put_client_renew_locked(struct nfs4_client *clp) | 161 | static void put_client_renew_locked(struct nfs4_client *clp) |
199 | { | 162 | { |
163 | struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); | ||
164 | |||
165 | lockdep_assert_held(&nn->client_lock); | ||
166 | |||
200 | if (!atomic_dec_and_test(&clp->cl_refcount)) | 167 | if (!atomic_dec_and_test(&clp->cl_refcount)) |
201 | return; | 168 | return; |
202 | if (!is_client_expired(clp)) | 169 | if (!is_client_expired(clp)) |
203 | renew_client_locked(clp); | 170 | renew_client_locked(clp); |
204 | } | 171 | } |
205 | 172 | ||
206 | void put_client_renew(struct nfs4_client *clp) | 173 | static void put_client_renew(struct nfs4_client *clp) |
207 | { | 174 | { |
208 | struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); | 175 | struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); |
209 | 176 | ||
@@ -214,6 +181,79 @@ void put_client_renew(struct nfs4_client *clp) | |||
214 | spin_unlock(&nn->client_lock); | 181 | spin_unlock(&nn->client_lock); |
215 | } | 182 | } |
216 | 183 | ||
184 | static __be32 nfsd4_get_session_locked(struct nfsd4_session *ses) | ||
185 | { | ||
186 | __be32 status; | ||
187 | |||
188 | if (is_session_dead(ses)) | ||
189 | return nfserr_badsession; | ||
190 | status = get_client_locked(ses->se_client); | ||
191 | if (status) | ||
192 | return status; | ||
193 | atomic_inc(&ses->se_ref); | ||
194 | return nfs_ok; | ||
195 | } | ||
196 | |||
197 | static void nfsd4_put_session_locked(struct nfsd4_session *ses) | ||
198 | { | ||
199 | struct nfs4_client *clp = ses->se_client; | ||
200 | struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); | ||
201 | |||
202 | lockdep_assert_held(&nn->client_lock); | ||
203 | |||
204 | if (atomic_dec_and_test(&ses->se_ref) && is_session_dead(ses)) | ||
205 | free_session(ses); | ||
206 | put_client_renew_locked(clp); | ||
207 | } | ||
208 | |||
209 | static void nfsd4_put_session(struct nfsd4_session *ses) | ||
210 | { | ||
211 | struct nfs4_client *clp = ses->se_client; | ||
212 | struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); | ||
213 | |||
214 | spin_lock(&nn->client_lock); | ||
215 | nfsd4_put_session_locked(ses); | ||
216 | spin_unlock(&nn->client_lock); | ||
217 | } | ||
218 | |||
219 | static int | ||
220 | same_owner_str(struct nfs4_stateowner *sop, struct xdr_netobj *owner) | ||
221 | { | ||
222 | return (sop->so_owner.len == owner->len) && | ||
223 | 0 == memcmp(sop->so_owner.data, owner->data, owner->len); | ||
224 | } | ||
225 | |||
226 | static struct nfs4_openowner * | ||
227 | find_openstateowner_str_locked(unsigned int hashval, struct nfsd4_open *open, | ||
228 | struct nfs4_client *clp) | ||
229 | { | ||
230 | struct nfs4_stateowner *so; | ||
231 | |||
232 | lockdep_assert_held(&clp->cl_lock); | ||
233 | |||
234 | list_for_each_entry(so, &clp->cl_ownerstr_hashtbl[hashval], | ||
235 | so_strhash) { | ||
236 | if (!so->so_is_open_owner) | ||
237 | continue; | ||
238 | if (same_owner_str(so, &open->op_owner)) { | ||
239 | atomic_inc(&so->so_count); | ||
240 | return openowner(so); | ||
241 | } | ||
242 | } | ||
243 | return NULL; | ||
244 | } | ||
245 | |||
246 | static struct nfs4_openowner * | ||
247 | find_openstateowner_str(unsigned int hashval, struct nfsd4_open *open, | ||
248 | struct nfs4_client *clp) | ||
249 | { | ||
250 | struct nfs4_openowner *oo; | ||
251 | |||
252 | spin_lock(&clp->cl_lock); | ||
253 | oo = find_openstateowner_str_locked(hashval, open, clp); | ||
254 | spin_unlock(&clp->cl_lock); | ||
255 | return oo; | ||
256 | } | ||
217 | 257 | ||
218 | static inline u32 | 258 | static inline u32 |
219 | opaque_hashval(const void *ptr, int nbytes) | 259 | opaque_hashval(const void *ptr, int nbytes) |
@@ -236,10 +276,11 @@ static void nfsd4_free_file(struct nfs4_file *f) | |||
236 | static inline void | 276 | static inline void |
237 | put_nfs4_file(struct nfs4_file *fi) | 277 | put_nfs4_file(struct nfs4_file *fi) |
238 | { | 278 | { |
279 | might_lock(&state_lock); | ||
280 | |||
239 | if (atomic_dec_and_lock(&fi->fi_ref, &state_lock)) { | 281 | if (atomic_dec_and_lock(&fi->fi_ref, &state_lock)) { |
240 | hlist_del(&fi->fi_hash); | 282 | hlist_del(&fi->fi_hash); |
241 | spin_unlock(&state_lock); | 283 | spin_unlock(&state_lock); |
242 | iput(fi->fi_inode); | ||
243 | nfsd4_free_file(fi); | 284 | nfsd4_free_file(fi); |
244 | } | 285 | } |
245 | } | 286 | } |
@@ -250,7 +291,80 @@ get_nfs4_file(struct nfs4_file *fi) | |||
250 | atomic_inc(&fi->fi_ref); | 291 | atomic_inc(&fi->fi_ref); |
251 | } | 292 | } |
252 | 293 | ||
253 | static int num_delegations; | 294 | static struct file * |
295 | __nfs4_get_fd(struct nfs4_file *f, int oflag) | ||
296 | { | ||
297 | if (f->fi_fds[oflag]) | ||
298 | return get_file(f->fi_fds[oflag]); | ||
299 | return NULL; | ||
300 | } | ||
301 | |||
302 | static struct file * | ||
303 | find_writeable_file_locked(struct nfs4_file *f) | ||
304 | { | ||
305 | struct file *ret; | ||
306 | |||
307 | lockdep_assert_held(&f->fi_lock); | ||
308 | |||
309 | ret = __nfs4_get_fd(f, O_WRONLY); | ||
310 | if (!ret) | ||
311 | ret = __nfs4_get_fd(f, O_RDWR); | ||
312 | return ret; | ||
313 | } | ||
314 | |||
315 | static struct file * | ||
316 | find_writeable_file(struct nfs4_file *f) | ||
317 | { | ||
318 | struct file *ret; | ||
319 | |||
320 | spin_lock(&f->fi_lock); | ||
321 | ret = find_writeable_file_locked(f); | ||
322 | spin_unlock(&f->fi_lock); | ||
323 | |||
324 | return ret; | ||
325 | } | ||
326 | |||
327 | static struct file *find_readable_file_locked(struct nfs4_file *f) | ||
328 | { | ||
329 | struct file *ret; | ||
330 | |||
331 | lockdep_assert_held(&f->fi_lock); | ||
332 | |||
333 | ret = __nfs4_get_fd(f, O_RDONLY); | ||
334 | if (!ret) | ||
335 | ret = __nfs4_get_fd(f, O_RDWR); | ||
336 | return ret; | ||
337 | } | ||
338 | |||
339 | static struct file * | ||
340 | find_readable_file(struct nfs4_file *f) | ||
341 | { | ||
342 | struct file *ret; | ||
343 | |||
344 | spin_lock(&f->fi_lock); | ||
345 | ret = find_readable_file_locked(f); | ||
346 | spin_unlock(&f->fi_lock); | ||
347 | |||
348 | return ret; | ||
349 | } | ||
350 | |||
351 | static struct file * | ||
352 | find_any_file(struct nfs4_file *f) | ||
353 | { | ||
354 | struct file *ret; | ||
355 | |||
356 | spin_lock(&f->fi_lock); | ||
357 | ret = __nfs4_get_fd(f, O_RDWR); | ||
358 | if (!ret) { | ||
359 | ret = __nfs4_get_fd(f, O_WRONLY); | ||
360 | if (!ret) | ||
361 | ret = __nfs4_get_fd(f, O_RDONLY); | ||
362 | } | ||
363 | spin_unlock(&f->fi_lock); | ||
364 | return ret; | ||
365 | } | ||
366 | |||
367 | static atomic_long_t num_delegations; | ||
254 | unsigned long max_delegations; | 368 | unsigned long max_delegations; |
255 | 369 | ||
256 | /* | 370 | /* |
@@ -262,12 +376,11 @@ unsigned long max_delegations; | |||
262 | #define OWNER_HASH_SIZE (1 << OWNER_HASH_BITS) | 376 | #define OWNER_HASH_SIZE (1 << OWNER_HASH_BITS) |
263 | #define OWNER_HASH_MASK (OWNER_HASH_SIZE - 1) | 377 | #define OWNER_HASH_MASK (OWNER_HASH_SIZE - 1) |
264 | 378 | ||
265 | static unsigned int ownerstr_hashval(u32 clientid, struct xdr_netobj *ownername) | 379 | static unsigned int ownerstr_hashval(struct xdr_netobj *ownername) |
266 | { | 380 | { |
267 | unsigned int ret; | 381 | unsigned int ret; |
268 | 382 | ||
269 | ret = opaque_hashval(ownername->data, ownername->len); | 383 | ret = opaque_hashval(ownername->data, ownername->len); |
270 | ret += clientid; | ||
271 | return ret & OWNER_HASH_MASK; | 384 | return ret & OWNER_HASH_MASK; |
272 | } | 385 | } |
273 | 386 | ||
@@ -275,75 +388,124 @@ static unsigned int ownerstr_hashval(u32 clientid, struct xdr_netobj *ownername) | |||
275 | #define FILE_HASH_BITS 8 | 388 | #define FILE_HASH_BITS 8 |
276 | #define FILE_HASH_SIZE (1 << FILE_HASH_BITS) | 389 | #define FILE_HASH_SIZE (1 << FILE_HASH_BITS) |
277 | 390 | ||
278 | static unsigned int file_hashval(struct inode *ino) | 391 | static unsigned int nfsd_fh_hashval(struct knfsd_fh *fh) |
392 | { | ||
393 | return jhash2(fh->fh_base.fh_pad, XDR_QUADLEN(fh->fh_size), 0); | ||
394 | } | ||
395 | |||
396 | static unsigned int file_hashval(struct knfsd_fh *fh) | ||
397 | { | ||
398 | return nfsd_fh_hashval(fh) & (FILE_HASH_SIZE - 1); | ||
399 | } | ||
400 | |||
401 | static bool nfsd_fh_match(struct knfsd_fh *fh1, struct knfsd_fh *fh2) | ||
279 | { | 402 | { |
280 | /* XXX: why are we hashing on inode pointer, anyway? */ | 403 | return fh1->fh_size == fh2->fh_size && |
281 | return hash_ptr(ino, FILE_HASH_BITS); | 404 | !memcmp(fh1->fh_base.fh_pad, |
405 | fh2->fh_base.fh_pad, | ||
406 | fh1->fh_size); | ||
282 | } | 407 | } |
283 | 408 | ||
284 | static struct hlist_head file_hashtbl[FILE_HASH_SIZE]; | 409 | static struct hlist_head file_hashtbl[FILE_HASH_SIZE]; |
285 | 410 | ||
286 | static void __nfs4_file_get_access(struct nfs4_file *fp, int oflag) | 411 | static void |
412 | __nfs4_file_get_access(struct nfs4_file *fp, u32 access) | ||
287 | { | 413 | { |
288 | WARN_ON_ONCE(!(fp->fi_fds[oflag] || fp->fi_fds[O_RDWR])); | 414 | lockdep_assert_held(&fp->fi_lock); |
289 | atomic_inc(&fp->fi_access[oflag]); | 415 | |
416 | if (access & NFS4_SHARE_ACCESS_WRITE) | ||
417 | atomic_inc(&fp->fi_access[O_WRONLY]); | ||
418 | if (access & NFS4_SHARE_ACCESS_READ) | ||
419 | atomic_inc(&fp->fi_access[O_RDONLY]); | ||
290 | } | 420 | } |
291 | 421 | ||
292 | static void nfs4_file_get_access(struct nfs4_file *fp, int oflag) | 422 | static __be32 |
423 | nfs4_file_get_access(struct nfs4_file *fp, u32 access) | ||
293 | { | 424 | { |
294 | if (oflag == O_RDWR) { | 425 | lockdep_assert_held(&fp->fi_lock); |
295 | __nfs4_file_get_access(fp, O_RDONLY); | 426 | |
296 | __nfs4_file_get_access(fp, O_WRONLY); | 427 | /* Does this access mode make sense? */ |
297 | } else | 428 | if (access & ~NFS4_SHARE_ACCESS_BOTH) |
298 | __nfs4_file_get_access(fp, oflag); | 429 | return nfserr_inval; |
430 | |||
431 | /* Does it conflict with a deny mode already set? */ | ||
432 | if ((access & fp->fi_share_deny) != 0) | ||
433 | return nfserr_share_denied; | ||
434 | |||
435 | __nfs4_file_get_access(fp, access); | ||
436 | return nfs_ok; | ||
299 | } | 437 | } |
300 | 438 | ||
301 | static void nfs4_file_put_fd(struct nfs4_file *fp, int oflag) | 439 | static __be32 nfs4_file_check_deny(struct nfs4_file *fp, u32 deny) |
302 | { | 440 | { |
303 | if (fp->fi_fds[oflag]) { | 441 | /* Common case is that there is no deny mode. */ |
304 | fput(fp->fi_fds[oflag]); | 442 | if (deny) { |
305 | fp->fi_fds[oflag] = NULL; | 443 | /* Does this deny mode make sense? */ |
444 | if (deny & ~NFS4_SHARE_DENY_BOTH) | ||
445 | return nfserr_inval; | ||
446 | |||
447 | if ((deny & NFS4_SHARE_DENY_READ) && | ||
448 | atomic_read(&fp->fi_access[O_RDONLY])) | ||
449 | return nfserr_share_denied; | ||
450 | |||
451 | if ((deny & NFS4_SHARE_DENY_WRITE) && | ||
452 | atomic_read(&fp->fi_access[O_WRONLY])) | ||
453 | return nfserr_share_denied; | ||
306 | } | 454 | } |
455 | return nfs_ok; | ||
307 | } | 456 | } |
308 | 457 | ||
309 | static void __nfs4_file_put_access(struct nfs4_file *fp, int oflag) | 458 | static void __nfs4_file_put_access(struct nfs4_file *fp, int oflag) |
310 | { | 459 | { |
311 | if (atomic_dec_and_test(&fp->fi_access[oflag])) { | 460 | might_lock(&fp->fi_lock); |
312 | nfs4_file_put_fd(fp, oflag); | 461 | |
462 | if (atomic_dec_and_lock(&fp->fi_access[oflag], &fp->fi_lock)) { | ||
463 | struct file *f1 = NULL; | ||
464 | struct file *f2 = NULL; | ||
465 | |||
466 | swap(f1, fp->fi_fds[oflag]); | ||
313 | if (atomic_read(&fp->fi_access[1 - oflag]) == 0) | 467 | if (atomic_read(&fp->fi_access[1 - oflag]) == 0) |
314 | nfs4_file_put_fd(fp, O_RDWR); | 468 | swap(f2, fp->fi_fds[O_RDWR]); |
469 | spin_unlock(&fp->fi_lock); | ||
470 | if (f1) | ||
471 | fput(f1); | ||
472 | if (f2) | ||
473 | fput(f2); | ||
315 | } | 474 | } |
316 | } | 475 | } |
317 | 476 | ||
318 | static void nfs4_file_put_access(struct nfs4_file *fp, int oflag) | 477 | static void nfs4_file_put_access(struct nfs4_file *fp, u32 access) |
319 | { | 478 | { |
320 | if (oflag == O_RDWR) { | 479 | WARN_ON_ONCE(access & ~NFS4_SHARE_ACCESS_BOTH); |
321 | __nfs4_file_put_access(fp, O_RDONLY); | 480 | |
481 | if (access & NFS4_SHARE_ACCESS_WRITE) | ||
322 | __nfs4_file_put_access(fp, O_WRONLY); | 482 | __nfs4_file_put_access(fp, O_WRONLY); |
323 | } else | 483 | if (access & NFS4_SHARE_ACCESS_READ) |
324 | __nfs4_file_put_access(fp, oflag); | 484 | __nfs4_file_put_access(fp, O_RDONLY); |
325 | } | 485 | } |
326 | 486 | ||
327 | static struct nfs4_stid *nfs4_alloc_stid(struct nfs4_client *cl, struct | 487 | static struct nfs4_stid *nfs4_alloc_stid(struct nfs4_client *cl, |
328 | kmem_cache *slab) | 488 | struct kmem_cache *slab) |
329 | { | 489 | { |
330 | struct idr *stateids = &cl->cl_stateids; | ||
331 | struct nfs4_stid *stid; | 490 | struct nfs4_stid *stid; |
332 | int new_id; | 491 | int new_id; |
333 | 492 | ||
334 | stid = kmem_cache_alloc(slab, GFP_KERNEL); | 493 | stid = kmem_cache_zalloc(slab, GFP_KERNEL); |
335 | if (!stid) | 494 | if (!stid) |
336 | return NULL; | 495 | return NULL; |
337 | 496 | ||
338 | new_id = idr_alloc_cyclic(stateids, stid, 0, 0, GFP_KERNEL); | 497 | idr_preload(GFP_KERNEL); |
498 | spin_lock(&cl->cl_lock); | ||
499 | new_id = idr_alloc_cyclic(&cl->cl_stateids, stid, 0, 0, GFP_NOWAIT); | ||
500 | spin_unlock(&cl->cl_lock); | ||
501 | idr_preload_end(); | ||
339 | if (new_id < 0) | 502 | if (new_id < 0) |
340 | goto out_free; | 503 | goto out_free; |
341 | stid->sc_client = cl; | 504 | stid->sc_client = cl; |
342 | stid->sc_type = 0; | ||
343 | stid->sc_stateid.si_opaque.so_id = new_id; | 505 | stid->sc_stateid.si_opaque.so_id = new_id; |
344 | stid->sc_stateid.si_opaque.so_clid = cl->cl_clientid; | 506 | stid->sc_stateid.si_opaque.so_clid = cl->cl_clientid; |
345 | /* Will be incremented before return to client: */ | 507 | /* Will be incremented before return to client: */ |
346 | stid->sc_stateid.si_generation = 0; | 508 | atomic_set(&stid->sc_count, 1); |
347 | 509 | ||
348 | /* | 510 | /* |
349 | * It shouldn't be a problem to reuse an opaque stateid value. | 511 | * It shouldn't be a problem to reuse an opaque stateid value. |
@@ -360,9 +522,24 @@ out_free: | |||
360 | return NULL; | 522 | return NULL; |
361 | } | 523 | } |
362 | 524 | ||
363 | static struct nfs4_ol_stateid * nfs4_alloc_stateid(struct nfs4_client *clp) | 525 | static struct nfs4_ol_stateid * nfs4_alloc_open_stateid(struct nfs4_client *clp) |
364 | { | 526 | { |
365 | return openlockstateid(nfs4_alloc_stid(clp, stateid_slab)); | 527 | struct nfs4_stid *stid; |
528 | struct nfs4_ol_stateid *stp; | ||
529 | |||
530 | stid = nfs4_alloc_stid(clp, stateid_slab); | ||
531 | if (!stid) | ||
532 | return NULL; | ||
533 | |||
534 | stp = openlockstateid(stid); | ||
535 | stp->st_stid.sc_free = nfs4_free_ol_stateid; | ||
536 | return stp; | ||
537 | } | ||
538 | |||
539 | static void nfs4_free_deleg(struct nfs4_stid *stid) | ||
540 | { | ||
541 | kmem_cache_free(deleg_slab, stid); | ||
542 | atomic_long_dec(&num_delegations); | ||
366 | } | 543 | } |
367 | 544 | ||
368 | /* | 545 | /* |
@@ -379,10 +556,11 @@ static struct nfs4_ol_stateid * nfs4_alloc_stateid(struct nfs4_client *clp) | |||
379 | * Each filter is 256 bits. We hash the filehandle to 32bit and use the | 556 | * Each filter is 256 bits. We hash the filehandle to 32bit and use the |
380 | * low 3 bytes as hash-table indices. | 557 | * low 3 bytes as hash-table indices. |
381 | * | 558 | * |
382 | * 'state_lock', which is always held when block_delegations() is called, | 559 | * 'blocked_delegations_lock', which is always taken in block_delegations(), |
383 | * is used to manage concurrent access. Testing does not need the lock | 560 | * is used to manage concurrent access. Testing does not need the lock |
384 | * except when swapping the two filters. | 561 | * except when swapping the two filters. |
385 | */ | 562 | */ |
563 | static DEFINE_SPINLOCK(blocked_delegations_lock); | ||
386 | static struct bloom_pair { | 564 | static struct bloom_pair { |
387 | int entries, old_entries; | 565 | int entries, old_entries; |
388 | time_t swap_time; | 566 | time_t swap_time; |
@@ -398,7 +576,7 @@ static int delegation_blocked(struct knfsd_fh *fh) | |||
398 | if (bd->entries == 0) | 576 | if (bd->entries == 0) |
399 | return 0; | 577 | return 0; |
400 | if (seconds_since_boot() - bd->swap_time > 30) { | 578 | if (seconds_since_boot() - bd->swap_time > 30) { |
401 | spin_lock(&state_lock); | 579 | spin_lock(&blocked_delegations_lock); |
402 | if (seconds_since_boot() - bd->swap_time > 30) { | 580 | if (seconds_since_boot() - bd->swap_time > 30) { |
403 | bd->entries -= bd->old_entries; | 581 | bd->entries -= bd->old_entries; |
404 | bd->old_entries = bd->entries; | 582 | bd->old_entries = bd->entries; |
@@ -407,7 +585,7 @@ static int delegation_blocked(struct knfsd_fh *fh) | |||
407 | bd->new = 1-bd->new; | 585 | bd->new = 1-bd->new; |
408 | bd->swap_time = seconds_since_boot(); | 586 | bd->swap_time = seconds_since_boot(); |
409 | } | 587 | } |
410 | spin_unlock(&state_lock); | 588 | spin_unlock(&blocked_delegations_lock); |
411 | } | 589 | } |
412 | hash = arch_fast_hash(&fh->fh_base, fh->fh_size, 0); | 590 | hash = arch_fast_hash(&fh->fh_base, fh->fh_size, 0); |
413 | if (test_bit(hash&255, bd->set[0]) && | 591 | if (test_bit(hash&255, bd->set[0]) && |
@@ -430,69 +608,73 @@ static void block_delegations(struct knfsd_fh *fh) | |||
430 | 608 | ||
431 | hash = arch_fast_hash(&fh->fh_base, fh->fh_size, 0); | 609 | hash = arch_fast_hash(&fh->fh_base, fh->fh_size, 0); |
432 | 610 | ||
611 | spin_lock(&blocked_delegations_lock); | ||
433 | __set_bit(hash&255, bd->set[bd->new]); | 612 | __set_bit(hash&255, bd->set[bd->new]); |
434 | __set_bit((hash>>8)&255, bd->set[bd->new]); | 613 | __set_bit((hash>>8)&255, bd->set[bd->new]); |
435 | __set_bit((hash>>16)&255, bd->set[bd->new]); | 614 | __set_bit((hash>>16)&255, bd->set[bd->new]); |
436 | if (bd->entries == 0) | 615 | if (bd->entries == 0) |
437 | bd->swap_time = seconds_since_boot(); | 616 | bd->swap_time = seconds_since_boot(); |
438 | bd->entries += 1; | 617 | bd->entries += 1; |
618 | spin_unlock(&blocked_delegations_lock); | ||
439 | } | 619 | } |
440 | 620 | ||
441 | static struct nfs4_delegation * | 621 | static struct nfs4_delegation * |
442 | alloc_init_deleg(struct nfs4_client *clp, struct nfs4_ol_stateid *stp, struct svc_fh *current_fh) | 622 | alloc_init_deleg(struct nfs4_client *clp, struct svc_fh *current_fh) |
443 | { | 623 | { |
444 | struct nfs4_delegation *dp; | 624 | struct nfs4_delegation *dp; |
625 | long n; | ||
445 | 626 | ||
446 | dprintk("NFSD alloc_init_deleg\n"); | 627 | dprintk("NFSD alloc_init_deleg\n"); |
447 | if (num_delegations > max_delegations) | 628 | n = atomic_long_inc_return(&num_delegations); |
448 | return NULL; | 629 | if (n < 0 || n > max_delegations) |
630 | goto out_dec; | ||
449 | if (delegation_blocked(¤t_fh->fh_handle)) | 631 | if (delegation_blocked(¤t_fh->fh_handle)) |
450 | return NULL; | 632 | goto out_dec; |
451 | dp = delegstateid(nfs4_alloc_stid(clp, deleg_slab)); | 633 | dp = delegstateid(nfs4_alloc_stid(clp, deleg_slab)); |
452 | if (dp == NULL) | 634 | if (dp == NULL) |
453 | return dp; | 635 | goto out_dec; |
636 | |||
637 | dp->dl_stid.sc_free = nfs4_free_deleg; | ||
454 | /* | 638 | /* |
455 | * delegation seqid's are never incremented. The 4.1 special | 639 | * delegation seqid's are never incremented. The 4.1 special |
456 | * meaning of seqid 0 isn't meaningful, really, but let's avoid | 640 | * meaning of seqid 0 isn't meaningful, really, but let's avoid |
457 | * 0 anyway just for consistency and use 1: | 641 | * 0 anyway just for consistency and use 1: |
458 | */ | 642 | */ |
459 | dp->dl_stid.sc_stateid.si_generation = 1; | 643 | dp->dl_stid.sc_stateid.si_generation = 1; |
460 | num_delegations++; | ||
461 | INIT_LIST_HEAD(&dp->dl_perfile); | 644 | INIT_LIST_HEAD(&dp->dl_perfile); |
462 | INIT_LIST_HEAD(&dp->dl_perclnt); | 645 | INIT_LIST_HEAD(&dp->dl_perclnt); |
463 | INIT_LIST_HEAD(&dp->dl_recall_lru); | 646 | INIT_LIST_HEAD(&dp->dl_recall_lru); |
464 | dp->dl_file = NULL; | ||
465 | dp->dl_type = NFS4_OPEN_DELEGATE_READ; | 647 | dp->dl_type = NFS4_OPEN_DELEGATE_READ; |
466 | fh_copy_shallow(&dp->dl_fh, ¤t_fh->fh_handle); | 648 | INIT_WORK(&dp->dl_recall.cb_work, nfsd4_run_cb_recall); |
467 | dp->dl_time = 0; | ||
468 | atomic_set(&dp->dl_count, 1); | ||
469 | nfsd4_init_callback(&dp->dl_recall); | ||
470 | return dp; | 649 | return dp; |
650 | out_dec: | ||
651 | atomic_long_dec(&num_delegations); | ||
652 | return NULL; | ||
471 | } | 653 | } |
472 | 654 | ||
473 | static void remove_stid(struct nfs4_stid *s) | 655 | void |
656 | nfs4_put_stid(struct nfs4_stid *s) | ||
474 | { | 657 | { |
475 | struct idr *stateids = &s->sc_client->cl_stateids; | 658 | struct nfs4_file *fp = s->sc_file; |
659 | struct nfs4_client *clp = s->sc_client; | ||
476 | 660 | ||
477 | idr_remove(stateids, s->sc_stateid.si_opaque.so_id); | 661 | might_lock(&clp->cl_lock); |
478 | } | ||
479 | 662 | ||
480 | static void nfs4_free_stid(struct kmem_cache *slab, struct nfs4_stid *s) | 663 | if (!atomic_dec_and_lock(&s->sc_count, &clp->cl_lock)) { |
481 | { | 664 | wake_up_all(&close_wq); |
482 | kmem_cache_free(slab, s); | 665 | return; |
483 | } | ||
484 | |||
485 | void | ||
486 | nfs4_put_delegation(struct nfs4_delegation *dp) | ||
487 | { | ||
488 | if (atomic_dec_and_test(&dp->dl_count)) { | ||
489 | nfs4_free_stid(deleg_slab, &dp->dl_stid); | ||
490 | num_delegations--; | ||
491 | } | 666 | } |
667 | idr_remove(&clp->cl_stateids, s->sc_stateid.si_opaque.so_id); | ||
668 | spin_unlock(&clp->cl_lock); | ||
669 | s->sc_free(s); | ||
670 | if (fp) | ||
671 | put_nfs4_file(fp); | ||
492 | } | 672 | } |
493 | 673 | ||
494 | static void nfs4_put_deleg_lease(struct nfs4_file *fp) | 674 | static void nfs4_put_deleg_lease(struct nfs4_file *fp) |
495 | { | 675 | { |
676 | lockdep_assert_held(&state_lock); | ||
677 | |||
496 | if (!fp->fi_lease) | 678 | if (!fp->fi_lease) |
497 | return; | 679 | return; |
498 | if (atomic_dec_and_test(&fp->fi_delegees)) { | 680 | if (atomic_dec_and_test(&fp->fi_delegees)) { |
@@ -512,54 +694,54 @@ static void | |||
512 | hash_delegation_locked(struct nfs4_delegation *dp, struct nfs4_file *fp) | 694 | hash_delegation_locked(struct nfs4_delegation *dp, struct nfs4_file *fp) |
513 | { | 695 | { |
514 | lockdep_assert_held(&state_lock); | 696 | lockdep_assert_held(&state_lock); |
697 | lockdep_assert_held(&fp->fi_lock); | ||
515 | 698 | ||
699 | atomic_inc(&dp->dl_stid.sc_count); | ||
516 | dp->dl_stid.sc_type = NFS4_DELEG_STID; | 700 | dp->dl_stid.sc_type = NFS4_DELEG_STID; |
517 | list_add(&dp->dl_perfile, &fp->fi_delegations); | 701 | list_add(&dp->dl_perfile, &fp->fi_delegations); |
518 | list_add(&dp->dl_perclnt, &dp->dl_stid.sc_client->cl_delegations); | 702 | list_add(&dp->dl_perclnt, &dp->dl_stid.sc_client->cl_delegations); |
519 | } | 703 | } |
520 | 704 | ||
521 | /* Called under the state lock. */ | ||
522 | static void | 705 | static void |
523 | unhash_delegation(struct nfs4_delegation *dp) | 706 | unhash_delegation_locked(struct nfs4_delegation *dp) |
524 | { | 707 | { |
525 | spin_lock(&state_lock); | 708 | struct nfs4_file *fp = dp->dl_stid.sc_file; |
526 | list_del_init(&dp->dl_perclnt); | ||
527 | list_del_init(&dp->dl_perfile); | ||
528 | list_del_init(&dp->dl_recall_lru); | ||
529 | spin_unlock(&state_lock); | ||
530 | if (dp->dl_file) { | ||
531 | nfs4_put_deleg_lease(dp->dl_file); | ||
532 | put_nfs4_file(dp->dl_file); | ||
533 | dp->dl_file = NULL; | ||
534 | } | ||
535 | } | ||
536 | |||
537 | 709 | ||
710 | lockdep_assert_held(&state_lock); | ||
538 | 711 | ||
539 | static void destroy_revoked_delegation(struct nfs4_delegation *dp) | 712 | dp->dl_stid.sc_type = NFS4_CLOSED_DELEG_STID; |
540 | { | 713 | /* Ensure that deleg break won't try to requeue it */ |
714 | ++dp->dl_time; | ||
715 | spin_lock(&fp->fi_lock); | ||
716 | list_del_init(&dp->dl_perclnt); | ||
541 | list_del_init(&dp->dl_recall_lru); | 717 | list_del_init(&dp->dl_recall_lru); |
542 | remove_stid(&dp->dl_stid); | 718 | list_del_init(&dp->dl_perfile); |
543 | nfs4_put_delegation(dp); | 719 | spin_unlock(&fp->fi_lock); |
720 | if (fp) | ||
721 | nfs4_put_deleg_lease(fp); | ||
544 | } | 722 | } |
545 | 723 | ||
546 | static void destroy_delegation(struct nfs4_delegation *dp) | 724 | static void destroy_delegation(struct nfs4_delegation *dp) |
547 | { | 725 | { |
548 | unhash_delegation(dp); | 726 | spin_lock(&state_lock); |
549 | remove_stid(&dp->dl_stid); | 727 | unhash_delegation_locked(dp); |
550 | nfs4_put_delegation(dp); | 728 | spin_unlock(&state_lock); |
729 | nfs4_put_stid(&dp->dl_stid); | ||
551 | } | 730 | } |
552 | 731 | ||
553 | static void revoke_delegation(struct nfs4_delegation *dp) | 732 | static void revoke_delegation(struct nfs4_delegation *dp) |
554 | { | 733 | { |
555 | struct nfs4_client *clp = dp->dl_stid.sc_client; | 734 | struct nfs4_client *clp = dp->dl_stid.sc_client; |
556 | 735 | ||
736 | WARN_ON(!list_empty(&dp->dl_recall_lru)); | ||
737 | |||
557 | if (clp->cl_minorversion == 0) | 738 | if (clp->cl_minorversion == 0) |
558 | destroy_delegation(dp); | 739 | nfs4_put_stid(&dp->dl_stid); |
559 | else { | 740 | else { |
560 | unhash_delegation(dp); | ||
561 | dp->dl_stid.sc_type = NFS4_REVOKED_DELEG_STID; | 741 | dp->dl_stid.sc_type = NFS4_REVOKED_DELEG_STID; |
742 | spin_lock(&clp->cl_lock); | ||
562 | list_add(&dp->dl_recall_lru, &clp->cl_revoked); | 743 | list_add(&dp->dl_recall_lru, &clp->cl_revoked); |
744 | spin_unlock(&clp->cl_lock); | ||
563 | } | 745 | } |
564 | } | 746 | } |
565 | 747 | ||
@@ -607,57 +789,62 @@ bmap_to_share_mode(unsigned long bmap) { | |||
607 | return access; | 789 | return access; |
608 | } | 790 | } |
609 | 791 | ||
610 | static bool | ||
611 | test_share(struct nfs4_ol_stateid *stp, struct nfsd4_open *open) { | ||
612 | unsigned int access, deny; | ||
613 | |||
614 | access = bmap_to_share_mode(stp->st_access_bmap); | ||
615 | deny = bmap_to_share_mode(stp->st_deny_bmap); | ||
616 | if ((access & open->op_share_deny) || (deny & open->op_share_access)) | ||
617 | return false; | ||
618 | return true; | ||
619 | } | ||
620 | |||
621 | /* set share access for a given stateid */ | 792 | /* set share access for a given stateid */ |
622 | static inline void | 793 | static inline void |
623 | set_access(u32 access, struct nfs4_ol_stateid *stp) | 794 | set_access(u32 access, struct nfs4_ol_stateid *stp) |
624 | { | 795 | { |
625 | __set_bit(access, &stp->st_access_bmap); | 796 | unsigned char mask = 1 << access; |
797 | |||
798 | WARN_ON_ONCE(access > NFS4_SHARE_ACCESS_BOTH); | ||
799 | stp->st_access_bmap |= mask; | ||
626 | } | 800 | } |
627 | 801 | ||
628 | /* clear share access for a given stateid */ | 802 | /* clear share access for a given stateid */ |
629 | static inline void | 803 | static inline void |
630 | clear_access(u32 access, struct nfs4_ol_stateid *stp) | 804 | clear_access(u32 access, struct nfs4_ol_stateid *stp) |
631 | { | 805 | { |
632 | __clear_bit(access, &stp->st_access_bmap); | 806 | unsigned char mask = 1 << access; |
807 | |||
808 | WARN_ON_ONCE(access > NFS4_SHARE_ACCESS_BOTH); | ||
809 | stp->st_access_bmap &= ~mask; | ||
633 | } | 810 | } |
634 | 811 | ||
635 | /* test whether a given stateid has access */ | 812 | /* test whether a given stateid has access */ |
636 | static inline bool | 813 | static inline bool |
637 | test_access(u32 access, struct nfs4_ol_stateid *stp) | 814 | test_access(u32 access, struct nfs4_ol_stateid *stp) |
638 | { | 815 | { |
639 | return test_bit(access, &stp->st_access_bmap); | 816 | unsigned char mask = 1 << access; |
817 | |||
818 | return (bool)(stp->st_access_bmap & mask); | ||
640 | } | 819 | } |
641 | 820 | ||
642 | /* set share deny for a given stateid */ | 821 | /* set share deny for a given stateid */ |
643 | static inline void | 822 | static inline void |
644 | set_deny(u32 access, struct nfs4_ol_stateid *stp) | 823 | set_deny(u32 deny, struct nfs4_ol_stateid *stp) |
645 | { | 824 | { |
646 | __set_bit(access, &stp->st_deny_bmap); | 825 | unsigned char mask = 1 << deny; |
826 | |||
827 | WARN_ON_ONCE(deny > NFS4_SHARE_DENY_BOTH); | ||
828 | stp->st_deny_bmap |= mask; | ||
647 | } | 829 | } |
648 | 830 | ||
649 | /* clear share deny for a given stateid */ | 831 | /* clear share deny for a given stateid */ |
650 | static inline void | 832 | static inline void |
651 | clear_deny(u32 access, struct nfs4_ol_stateid *stp) | 833 | clear_deny(u32 deny, struct nfs4_ol_stateid *stp) |
652 | { | 834 | { |
653 | __clear_bit(access, &stp->st_deny_bmap); | 835 | unsigned char mask = 1 << deny; |
836 | |||
837 | WARN_ON_ONCE(deny > NFS4_SHARE_DENY_BOTH); | ||
838 | stp->st_deny_bmap &= ~mask; | ||
654 | } | 839 | } |
655 | 840 | ||
656 | /* test whether a given stateid is denying specific access */ | 841 | /* test whether a given stateid is denying specific access */ |
657 | static inline bool | 842 | static inline bool |
658 | test_deny(u32 access, struct nfs4_ol_stateid *stp) | 843 | test_deny(u32 deny, struct nfs4_ol_stateid *stp) |
659 | { | 844 | { |
660 | return test_bit(access, &stp->st_deny_bmap); | 845 | unsigned char mask = 1 << deny; |
846 | |||
847 | return (bool)(stp->st_deny_bmap & mask); | ||
661 | } | 848 | } |
662 | 849 | ||
663 | static int nfs4_access_to_omode(u32 access) | 850 | static int nfs4_access_to_omode(u32 access) |
@@ -674,138 +861,283 @@ static int nfs4_access_to_omode(u32 access) | |||
674 | return O_RDONLY; | 861 | return O_RDONLY; |
675 | } | 862 | } |
676 | 863 | ||
864 | /* | ||
865 | * A stateid that had a deny mode associated with it is being released | ||
866 | * or downgraded. Recalculate the deny mode on the file. | ||
867 | */ | ||
868 | static void | ||
869 | recalculate_deny_mode(struct nfs4_file *fp) | ||
870 | { | ||
871 | struct nfs4_ol_stateid *stp; | ||
872 | |||
873 | spin_lock(&fp->fi_lock); | ||
874 | fp->fi_share_deny = 0; | ||
875 | list_for_each_entry(stp, &fp->fi_stateids, st_perfile) | ||
876 | fp->fi_share_deny |= bmap_to_share_mode(stp->st_deny_bmap); | ||
877 | spin_unlock(&fp->fi_lock); | ||
878 | } | ||
879 | |||
880 | static void | ||
881 | reset_union_bmap_deny(u32 deny, struct nfs4_ol_stateid *stp) | ||
882 | { | ||
883 | int i; | ||
884 | bool change = false; | ||
885 | |||
886 | for (i = 1; i < 4; i++) { | ||
887 | if ((i & deny) != i) { | ||
888 | change = true; | ||
889 | clear_deny(i, stp); | ||
890 | } | ||
891 | } | ||
892 | |||
893 | /* Recalculate per-file deny mode if there was a change */ | ||
894 | if (change) | ||
895 | recalculate_deny_mode(stp->st_stid.sc_file); | ||
896 | } | ||
897 | |||
677 | /* release all access and file references for a given stateid */ | 898 | /* release all access and file references for a given stateid */ |
678 | static void | 899 | static void |
679 | release_all_access(struct nfs4_ol_stateid *stp) | 900 | release_all_access(struct nfs4_ol_stateid *stp) |
680 | { | 901 | { |
681 | int i; | 902 | int i; |
903 | struct nfs4_file *fp = stp->st_stid.sc_file; | ||
904 | |||
905 | if (fp && stp->st_deny_bmap != 0) | ||
906 | recalculate_deny_mode(fp); | ||
682 | 907 | ||
683 | for (i = 1; i < 4; i++) { | 908 | for (i = 1; i < 4; i++) { |
684 | if (test_access(i, stp)) | 909 | if (test_access(i, stp)) |
685 | nfs4_file_put_access(stp->st_file, | 910 | nfs4_file_put_access(stp->st_stid.sc_file, i); |
686 | nfs4_access_to_omode(i)); | ||
687 | clear_access(i, stp); | 911 | clear_access(i, stp); |
688 | } | 912 | } |
689 | } | 913 | } |
690 | 914 | ||
691 | static void unhash_generic_stateid(struct nfs4_ol_stateid *stp) | 915 | static void nfs4_put_stateowner(struct nfs4_stateowner *sop) |
692 | { | 916 | { |
917 | struct nfs4_client *clp = sop->so_client; | ||
918 | |||
919 | might_lock(&clp->cl_lock); | ||
920 | |||
921 | if (!atomic_dec_and_lock(&sop->so_count, &clp->cl_lock)) | ||
922 | return; | ||
923 | sop->so_ops->so_unhash(sop); | ||
924 | spin_unlock(&clp->cl_lock); | ||
925 | kfree(sop->so_owner.data); | ||
926 | sop->so_ops->so_free(sop); | ||
927 | } | ||
928 | |||
929 | static void unhash_ol_stateid(struct nfs4_ol_stateid *stp) | ||
930 | { | ||
931 | struct nfs4_file *fp = stp->st_stid.sc_file; | ||
932 | |||
933 | lockdep_assert_held(&stp->st_stateowner->so_client->cl_lock); | ||
934 | |||
935 | spin_lock(&fp->fi_lock); | ||
693 | list_del(&stp->st_perfile); | 936 | list_del(&stp->st_perfile); |
937 | spin_unlock(&fp->fi_lock); | ||
694 | list_del(&stp->st_perstateowner); | 938 | list_del(&stp->st_perstateowner); |
695 | } | 939 | } |
696 | 940 | ||
697 | static void close_generic_stateid(struct nfs4_ol_stateid *stp) | 941 | static void nfs4_free_ol_stateid(struct nfs4_stid *stid) |
698 | { | 942 | { |
943 | struct nfs4_ol_stateid *stp = openlockstateid(stid); | ||
944 | |||
699 | release_all_access(stp); | 945 | release_all_access(stp); |
700 | put_nfs4_file(stp->st_file); | 946 | if (stp->st_stateowner) |
701 | stp->st_file = NULL; | 947 | nfs4_put_stateowner(stp->st_stateowner); |
948 | kmem_cache_free(stateid_slab, stid); | ||
702 | } | 949 | } |
703 | 950 | ||
704 | static void free_generic_stateid(struct nfs4_ol_stateid *stp) | 951 | static void nfs4_free_lock_stateid(struct nfs4_stid *stid) |
705 | { | 952 | { |
706 | remove_stid(&stp->st_stid); | 953 | struct nfs4_ol_stateid *stp = openlockstateid(stid); |
707 | nfs4_free_stid(stateid_slab, &stp->st_stid); | 954 | struct nfs4_lockowner *lo = lockowner(stp->st_stateowner); |
955 | struct file *file; | ||
956 | |||
957 | file = find_any_file(stp->st_stid.sc_file); | ||
958 | if (file) | ||
959 | filp_close(file, (fl_owner_t)lo); | ||
960 | nfs4_free_ol_stateid(stid); | ||
708 | } | 961 | } |
709 | 962 | ||
710 | static void release_lock_stateid(struct nfs4_ol_stateid *stp) | 963 | /* |
964 | * Put the persistent reference to an already unhashed generic stateid, while | ||
965 | * holding the cl_lock. If it's the last reference, then put it onto the | ||
966 | * reaplist for later destruction. | ||
967 | */ | ||
968 | static void put_ol_stateid_locked(struct nfs4_ol_stateid *stp, | ||
969 | struct list_head *reaplist) | ||
711 | { | 970 | { |
712 | struct file *file; | 971 | struct nfs4_stid *s = &stp->st_stid; |
972 | struct nfs4_client *clp = s->sc_client; | ||
973 | |||
974 | lockdep_assert_held(&clp->cl_lock); | ||
713 | 975 | ||
714 | unhash_generic_stateid(stp); | 976 | WARN_ON_ONCE(!list_empty(&stp->st_locks)); |
977 | |||
978 | if (!atomic_dec_and_test(&s->sc_count)) { | ||
979 | wake_up_all(&close_wq); | ||
980 | return; | ||
981 | } | ||
982 | |||
983 | idr_remove(&clp->cl_stateids, s->sc_stateid.si_opaque.so_id); | ||
984 | list_add(&stp->st_locks, reaplist); | ||
985 | } | ||
986 | |||
987 | static void unhash_lock_stateid(struct nfs4_ol_stateid *stp) | ||
988 | { | ||
989 | struct nfs4_openowner *oo = openowner(stp->st_openstp->st_stateowner); | ||
990 | |||
991 | lockdep_assert_held(&oo->oo_owner.so_client->cl_lock); | ||
992 | |||
993 | list_del_init(&stp->st_locks); | ||
994 | unhash_ol_stateid(stp); | ||
715 | unhash_stid(&stp->st_stid); | 995 | unhash_stid(&stp->st_stid); |
716 | file = find_any_file(stp->st_file); | ||
717 | if (file) | ||
718 | locks_remove_posix(file, (fl_owner_t)lockowner(stp->st_stateowner)); | ||
719 | close_generic_stateid(stp); | ||
720 | free_generic_stateid(stp); | ||
721 | } | 996 | } |
722 | 997 | ||
723 | static void unhash_lockowner(struct nfs4_lockowner *lo) | 998 | static void release_lock_stateid(struct nfs4_ol_stateid *stp) |
724 | { | 999 | { |
725 | struct nfs4_ol_stateid *stp; | 1000 | struct nfs4_openowner *oo = openowner(stp->st_openstp->st_stateowner); |
726 | 1001 | ||
727 | list_del(&lo->lo_owner.so_strhash); | 1002 | spin_lock(&oo->oo_owner.so_client->cl_lock); |
728 | list_del(&lo->lo_perstateid); | 1003 | unhash_lock_stateid(stp); |
729 | list_del(&lo->lo_owner_ino_hash); | 1004 | spin_unlock(&oo->oo_owner.so_client->cl_lock); |
730 | while (!list_empty(&lo->lo_owner.so_stateids)) { | 1005 | nfs4_put_stid(&stp->st_stid); |
731 | stp = list_first_entry(&lo->lo_owner.so_stateids, | ||
732 | struct nfs4_ol_stateid, st_perstateowner); | ||
733 | release_lock_stateid(stp); | ||
734 | } | ||
735 | } | 1006 | } |
736 | 1007 | ||
737 | static void nfs4_free_lockowner(struct nfs4_lockowner *lo) | 1008 | static void unhash_lockowner_locked(struct nfs4_lockowner *lo) |
738 | { | 1009 | { |
739 | kfree(lo->lo_owner.so_owner.data); | 1010 | struct nfs4_client *clp = lo->lo_owner.so_client; |
740 | kmem_cache_free(lockowner_slab, lo); | 1011 | |
1012 | lockdep_assert_held(&clp->cl_lock); | ||
1013 | |||
1014 | list_del_init(&lo->lo_owner.so_strhash); | ||
1015 | } | ||
1016 | |||
1017 | /* | ||
1018 | * Free a list of generic stateids that were collected earlier after being | ||
1019 | * fully unhashed. | ||
1020 | */ | ||
1021 | static void | ||
1022 | free_ol_stateid_reaplist(struct list_head *reaplist) | ||
1023 | { | ||
1024 | struct nfs4_ol_stateid *stp; | ||
1025 | struct nfs4_file *fp; | ||
1026 | |||
1027 | might_sleep(); | ||
1028 | |||
1029 | while (!list_empty(reaplist)) { | ||
1030 | stp = list_first_entry(reaplist, struct nfs4_ol_stateid, | ||
1031 | st_locks); | ||
1032 | list_del(&stp->st_locks); | ||
1033 | fp = stp->st_stid.sc_file; | ||
1034 | stp->st_stid.sc_free(&stp->st_stid); | ||
1035 | if (fp) | ||
1036 | put_nfs4_file(fp); | ||
1037 | } | ||
741 | } | 1038 | } |
742 | 1039 | ||
743 | static void release_lockowner(struct nfs4_lockowner *lo) | 1040 | static void release_lockowner(struct nfs4_lockowner *lo) |
744 | { | 1041 | { |
745 | unhash_lockowner(lo); | 1042 | struct nfs4_client *clp = lo->lo_owner.so_client; |
746 | nfs4_free_lockowner(lo); | 1043 | struct nfs4_ol_stateid *stp; |
1044 | struct list_head reaplist; | ||
1045 | |||
1046 | INIT_LIST_HEAD(&reaplist); | ||
1047 | |||
1048 | spin_lock(&clp->cl_lock); | ||
1049 | unhash_lockowner_locked(lo); | ||
1050 | while (!list_empty(&lo->lo_owner.so_stateids)) { | ||
1051 | stp = list_first_entry(&lo->lo_owner.so_stateids, | ||
1052 | struct nfs4_ol_stateid, st_perstateowner); | ||
1053 | unhash_lock_stateid(stp); | ||
1054 | put_ol_stateid_locked(stp, &reaplist); | ||
1055 | } | ||
1056 | spin_unlock(&clp->cl_lock); | ||
1057 | free_ol_stateid_reaplist(&reaplist); | ||
1058 | nfs4_put_stateowner(&lo->lo_owner); | ||
747 | } | 1059 | } |
748 | 1060 | ||
749 | static void | 1061 | static void release_open_stateid_locks(struct nfs4_ol_stateid *open_stp, |
750 | release_stateid_lockowners(struct nfs4_ol_stateid *open_stp) | 1062 | struct list_head *reaplist) |
751 | { | 1063 | { |
752 | struct nfs4_lockowner *lo; | 1064 | struct nfs4_ol_stateid *stp; |
753 | 1065 | ||
754 | while (!list_empty(&open_stp->st_lockowners)) { | 1066 | while (!list_empty(&open_stp->st_locks)) { |
755 | lo = list_entry(open_stp->st_lockowners.next, | 1067 | stp = list_entry(open_stp->st_locks.next, |
756 | struct nfs4_lockowner, lo_perstateid); | 1068 | struct nfs4_ol_stateid, st_locks); |
757 | release_lockowner(lo); | 1069 | unhash_lock_stateid(stp); |
1070 | put_ol_stateid_locked(stp, reaplist); | ||
758 | } | 1071 | } |
759 | } | 1072 | } |
760 | 1073 | ||
761 | static void unhash_open_stateid(struct nfs4_ol_stateid *stp) | 1074 | static void unhash_open_stateid(struct nfs4_ol_stateid *stp, |
1075 | struct list_head *reaplist) | ||
762 | { | 1076 | { |
763 | unhash_generic_stateid(stp); | 1077 | lockdep_assert_held(&stp->st_stid.sc_client->cl_lock); |
764 | release_stateid_lockowners(stp); | 1078 | |
765 | close_generic_stateid(stp); | 1079 | unhash_ol_stateid(stp); |
1080 | release_open_stateid_locks(stp, reaplist); | ||
766 | } | 1081 | } |
767 | 1082 | ||
768 | static void release_open_stateid(struct nfs4_ol_stateid *stp) | 1083 | static void release_open_stateid(struct nfs4_ol_stateid *stp) |
769 | { | 1084 | { |
770 | unhash_open_stateid(stp); | 1085 | LIST_HEAD(reaplist); |
771 | free_generic_stateid(stp); | 1086 | |
1087 | spin_lock(&stp->st_stid.sc_client->cl_lock); | ||
1088 | unhash_open_stateid(stp, &reaplist); | ||
1089 | put_ol_stateid_locked(stp, &reaplist); | ||
1090 | spin_unlock(&stp->st_stid.sc_client->cl_lock); | ||
1091 | free_ol_stateid_reaplist(&reaplist); | ||
772 | } | 1092 | } |
773 | 1093 | ||
774 | static void unhash_openowner(struct nfs4_openowner *oo) | 1094 | static void unhash_openowner_locked(struct nfs4_openowner *oo) |
775 | { | 1095 | { |
776 | struct nfs4_ol_stateid *stp; | 1096 | struct nfs4_client *clp = oo->oo_owner.so_client; |
777 | 1097 | ||
778 | list_del(&oo->oo_owner.so_strhash); | 1098 | lockdep_assert_held(&clp->cl_lock); |
779 | list_del(&oo->oo_perclient); | 1099 | |
780 | while (!list_empty(&oo->oo_owner.so_stateids)) { | 1100 | list_del_init(&oo->oo_owner.so_strhash); |
781 | stp = list_first_entry(&oo->oo_owner.so_stateids, | 1101 | list_del_init(&oo->oo_perclient); |
782 | struct nfs4_ol_stateid, st_perstateowner); | ||
783 | release_open_stateid(stp); | ||
784 | } | ||
785 | } | 1102 | } |
786 | 1103 | ||
787 | static void release_last_closed_stateid(struct nfs4_openowner *oo) | 1104 | static void release_last_closed_stateid(struct nfs4_openowner *oo) |
788 | { | 1105 | { |
789 | struct nfs4_ol_stateid *s = oo->oo_last_closed_stid; | 1106 | struct nfsd_net *nn = net_generic(oo->oo_owner.so_client->net, |
1107 | nfsd_net_id); | ||
1108 | struct nfs4_ol_stateid *s; | ||
790 | 1109 | ||
1110 | spin_lock(&nn->client_lock); | ||
1111 | s = oo->oo_last_closed_stid; | ||
791 | if (s) { | 1112 | if (s) { |
792 | free_generic_stateid(s); | 1113 | list_del_init(&oo->oo_close_lru); |
793 | oo->oo_last_closed_stid = NULL; | 1114 | oo->oo_last_closed_stid = NULL; |
794 | } | 1115 | } |
795 | } | 1116 | spin_unlock(&nn->client_lock); |
796 | 1117 | if (s) | |
797 | static void nfs4_free_openowner(struct nfs4_openowner *oo) | 1118 | nfs4_put_stid(&s->st_stid); |
798 | { | ||
799 | kfree(oo->oo_owner.so_owner.data); | ||
800 | kmem_cache_free(openowner_slab, oo); | ||
801 | } | 1119 | } |
802 | 1120 | ||
803 | static void release_openowner(struct nfs4_openowner *oo) | 1121 | static void release_openowner(struct nfs4_openowner *oo) |
804 | { | 1122 | { |
805 | unhash_openowner(oo); | 1123 | struct nfs4_ol_stateid *stp; |
806 | list_del(&oo->oo_close_lru); | 1124 | struct nfs4_client *clp = oo->oo_owner.so_client; |
1125 | struct list_head reaplist; | ||
1126 | |||
1127 | INIT_LIST_HEAD(&reaplist); | ||
1128 | |||
1129 | spin_lock(&clp->cl_lock); | ||
1130 | unhash_openowner_locked(oo); | ||
1131 | while (!list_empty(&oo->oo_owner.so_stateids)) { | ||
1132 | stp = list_first_entry(&oo->oo_owner.so_stateids, | ||
1133 | struct nfs4_ol_stateid, st_perstateowner); | ||
1134 | unhash_open_stateid(stp, &reaplist); | ||
1135 | put_ol_stateid_locked(stp, &reaplist); | ||
1136 | } | ||
1137 | spin_unlock(&clp->cl_lock); | ||
1138 | free_ol_stateid_reaplist(&reaplist); | ||
807 | release_last_closed_stateid(oo); | 1139 | release_last_closed_stateid(oo); |
808 | nfs4_free_openowner(oo); | 1140 | nfs4_put_stateowner(&oo->oo_owner); |
809 | } | 1141 | } |
810 | 1142 | ||
811 | static inline int | 1143 | static inline int |
@@ -842,7 +1174,7 @@ void nfsd4_bump_seqid(struct nfsd4_compound_state *cstate, __be32 nfserr) | |||
842 | return; | 1174 | return; |
843 | 1175 | ||
844 | if (!seqid_mutating_err(ntohl(nfserr))) { | 1176 | if (!seqid_mutating_err(ntohl(nfserr))) { |
845 | cstate->replay_owner = NULL; | 1177 | nfsd4_cstate_clear_replay(cstate); |
846 | return; | 1178 | return; |
847 | } | 1179 | } |
848 | if (!so) | 1180 | if (!so) |
@@ -1030,10 +1362,8 @@ static void nfsd4_init_conn(struct svc_rqst *rqstp, struct nfsd4_conn *conn, str | |||
1030 | if (ret) | 1362 | if (ret) |
1031 | /* oops; xprt is already down: */ | 1363 | /* oops; xprt is already down: */ |
1032 | nfsd4_conn_lost(&conn->cn_xpt_user); | 1364 | nfsd4_conn_lost(&conn->cn_xpt_user); |
1033 | if (conn->cn_flags & NFS4_CDFC4_BACK) { | 1365 | /* We may have gained or lost a callback channel: */ |
1034 | /* callback channel may be back up */ | 1366 | nfsd4_probe_callback_sync(ses->se_client); |
1035 | nfsd4_probe_callback(ses->se_client); | ||
1036 | } | ||
1037 | } | 1367 | } |
1038 | 1368 | ||
1039 | static struct nfsd4_conn *alloc_conn_from_crses(struct svc_rqst *rqstp, struct nfsd4_create_session *cses) | 1369 | static struct nfsd4_conn *alloc_conn_from_crses(struct svc_rqst *rqstp, struct nfsd4_create_session *cses) |
@@ -1073,9 +1403,6 @@ static void __free_session(struct nfsd4_session *ses) | |||
1073 | 1403 | ||
1074 | static void free_session(struct nfsd4_session *ses) | 1404 | static void free_session(struct nfsd4_session *ses) |
1075 | { | 1405 | { |
1076 | struct nfsd_net *nn = net_generic(ses->se_client->net, nfsd_net_id); | ||
1077 | |||
1078 | lockdep_assert_held(&nn->client_lock); | ||
1079 | nfsd4_del_conns(ses); | 1406 | nfsd4_del_conns(ses); |
1080 | nfsd4_put_drc_mem(&ses->se_fchannel); | 1407 | nfsd4_put_drc_mem(&ses->se_fchannel); |
1081 | __free_session(ses); | 1408 | __free_session(ses); |
@@ -1097,12 +1424,10 @@ static void init_session(struct svc_rqst *rqstp, struct nfsd4_session *new, stru | |||
1097 | new->se_cb_sec = cses->cb_sec; | 1424 | new->se_cb_sec = cses->cb_sec; |
1098 | atomic_set(&new->se_ref, 0); | 1425 | atomic_set(&new->se_ref, 0); |
1099 | idx = hash_sessionid(&new->se_sessionid); | 1426 | idx = hash_sessionid(&new->se_sessionid); |
1100 | spin_lock(&nn->client_lock); | ||
1101 | list_add(&new->se_hash, &nn->sessionid_hashtbl[idx]); | 1427 | list_add(&new->se_hash, &nn->sessionid_hashtbl[idx]); |
1102 | spin_lock(&clp->cl_lock); | 1428 | spin_lock(&clp->cl_lock); |
1103 | list_add(&new->se_perclnt, &clp->cl_sessions); | 1429 | list_add(&new->se_perclnt, &clp->cl_sessions); |
1104 | spin_unlock(&clp->cl_lock); | 1430 | spin_unlock(&clp->cl_lock); |
1105 | spin_unlock(&nn->client_lock); | ||
1106 | 1431 | ||
1107 | if (cses->flags & SESSION4_BACK_CHAN) { | 1432 | if (cses->flags & SESSION4_BACK_CHAN) { |
1108 | struct sockaddr *sa = svc_addr(rqstp); | 1433 | struct sockaddr *sa = svc_addr(rqstp); |
@@ -1120,12 +1445,14 @@ static void init_session(struct svc_rqst *rqstp, struct nfsd4_session *new, stru | |||
1120 | 1445 | ||
1121 | /* caller must hold client_lock */ | 1446 | /* caller must hold client_lock */ |
1122 | static struct nfsd4_session * | 1447 | static struct nfsd4_session * |
1123 | find_in_sessionid_hashtbl(struct nfs4_sessionid *sessionid, struct net *net) | 1448 | __find_in_sessionid_hashtbl(struct nfs4_sessionid *sessionid, struct net *net) |
1124 | { | 1449 | { |
1125 | struct nfsd4_session *elem; | 1450 | struct nfsd4_session *elem; |
1126 | int idx; | 1451 | int idx; |
1127 | struct nfsd_net *nn = net_generic(net, nfsd_net_id); | 1452 | struct nfsd_net *nn = net_generic(net, nfsd_net_id); |
1128 | 1453 | ||
1454 | lockdep_assert_held(&nn->client_lock); | ||
1455 | |||
1129 | dump_sessionid(__func__, sessionid); | 1456 | dump_sessionid(__func__, sessionid); |
1130 | idx = hash_sessionid(sessionid); | 1457 | idx = hash_sessionid(sessionid); |
1131 | /* Search in the appropriate list */ | 1458 | /* Search in the appropriate list */ |
@@ -1140,10 +1467,33 @@ find_in_sessionid_hashtbl(struct nfs4_sessionid *sessionid, struct net *net) | |||
1140 | return NULL; | 1467 | return NULL; |
1141 | } | 1468 | } |
1142 | 1469 | ||
1470 | static struct nfsd4_session * | ||
1471 | find_in_sessionid_hashtbl(struct nfs4_sessionid *sessionid, struct net *net, | ||
1472 | __be32 *ret) | ||
1473 | { | ||
1474 | struct nfsd4_session *session; | ||
1475 | __be32 status = nfserr_badsession; | ||
1476 | |||
1477 | session = __find_in_sessionid_hashtbl(sessionid, net); | ||
1478 | if (!session) | ||
1479 | goto out; | ||
1480 | status = nfsd4_get_session_locked(session); | ||
1481 | if (status) | ||
1482 | session = NULL; | ||
1483 | out: | ||
1484 | *ret = status; | ||
1485 | return session; | ||
1486 | } | ||
1487 | |||
1143 | /* caller must hold client_lock */ | 1488 | /* caller must hold client_lock */ |
1144 | static void | 1489 | static void |
1145 | unhash_session(struct nfsd4_session *ses) | 1490 | unhash_session(struct nfsd4_session *ses) |
1146 | { | 1491 | { |
1492 | struct nfs4_client *clp = ses->se_client; | ||
1493 | struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); | ||
1494 | |||
1495 | lockdep_assert_held(&nn->client_lock); | ||
1496 | |||
1147 | list_del(&ses->se_hash); | 1497 | list_del(&ses->se_hash); |
1148 | spin_lock(&ses->se_client->cl_lock); | 1498 | spin_lock(&ses->se_client->cl_lock); |
1149 | list_del(&ses->se_perclnt); | 1499 | list_del(&ses->se_perclnt); |
@@ -1169,15 +1519,20 @@ STALE_CLIENTID(clientid_t *clid, struct nfsd_net *nn) | |||
1169 | static struct nfs4_client *alloc_client(struct xdr_netobj name) | 1519 | static struct nfs4_client *alloc_client(struct xdr_netobj name) |
1170 | { | 1520 | { |
1171 | struct nfs4_client *clp; | 1521 | struct nfs4_client *clp; |
1522 | int i; | ||
1172 | 1523 | ||
1173 | clp = kzalloc(sizeof(struct nfs4_client), GFP_KERNEL); | 1524 | clp = kzalloc(sizeof(struct nfs4_client), GFP_KERNEL); |
1174 | if (clp == NULL) | 1525 | if (clp == NULL) |
1175 | return NULL; | 1526 | return NULL; |
1176 | clp->cl_name.data = kmemdup(name.data, name.len, GFP_KERNEL); | 1527 | clp->cl_name.data = kmemdup(name.data, name.len, GFP_KERNEL); |
1177 | if (clp->cl_name.data == NULL) { | 1528 | if (clp->cl_name.data == NULL) |
1178 | kfree(clp); | 1529 | goto err_no_name; |
1179 | return NULL; | 1530 | clp->cl_ownerstr_hashtbl = kmalloc(sizeof(struct list_head) * |
1180 | } | 1531 | OWNER_HASH_SIZE, GFP_KERNEL); |
1532 | if (!clp->cl_ownerstr_hashtbl) | ||
1533 | goto err_no_hashtbl; | ||
1534 | for (i = 0; i < OWNER_HASH_SIZE; i++) | ||
1535 | INIT_LIST_HEAD(&clp->cl_ownerstr_hashtbl[i]); | ||
1181 | clp->cl_name.len = name.len; | 1536 | clp->cl_name.len = name.len; |
1182 | INIT_LIST_HEAD(&clp->cl_sessions); | 1537 | INIT_LIST_HEAD(&clp->cl_sessions); |
1183 | idr_init(&clp->cl_stateids); | 1538 | idr_init(&clp->cl_stateids); |
@@ -1192,14 +1547,16 @@ static struct nfs4_client *alloc_client(struct xdr_netobj name) | |||
1192 | spin_lock_init(&clp->cl_lock); | 1547 | spin_lock_init(&clp->cl_lock); |
1193 | rpc_init_wait_queue(&clp->cl_cb_waitq, "Backchannel slot table"); | 1548 | rpc_init_wait_queue(&clp->cl_cb_waitq, "Backchannel slot table"); |
1194 | return clp; | 1549 | return clp; |
1550 | err_no_hashtbl: | ||
1551 | kfree(clp->cl_name.data); | ||
1552 | err_no_name: | ||
1553 | kfree(clp); | ||
1554 | return NULL; | ||
1195 | } | 1555 | } |
1196 | 1556 | ||
1197 | static void | 1557 | static void |
1198 | free_client(struct nfs4_client *clp) | 1558 | free_client(struct nfs4_client *clp) |
1199 | { | 1559 | { |
1200 | struct nfsd_net __maybe_unused *nn = net_generic(clp->net, nfsd_net_id); | ||
1201 | |||
1202 | lockdep_assert_held(&nn->client_lock); | ||
1203 | while (!list_empty(&clp->cl_sessions)) { | 1560 | while (!list_empty(&clp->cl_sessions)) { |
1204 | struct nfsd4_session *ses; | 1561 | struct nfsd4_session *ses; |
1205 | ses = list_entry(clp->cl_sessions.next, struct nfsd4_session, | 1562 | ses = list_entry(clp->cl_sessions.next, struct nfsd4_session, |
@@ -1210,18 +1567,32 @@ free_client(struct nfs4_client *clp) | |||
1210 | } | 1567 | } |
1211 | rpc_destroy_wait_queue(&clp->cl_cb_waitq); | 1568 | rpc_destroy_wait_queue(&clp->cl_cb_waitq); |
1212 | free_svc_cred(&clp->cl_cred); | 1569 | free_svc_cred(&clp->cl_cred); |
1570 | kfree(clp->cl_ownerstr_hashtbl); | ||
1213 | kfree(clp->cl_name.data); | 1571 | kfree(clp->cl_name.data); |
1214 | idr_destroy(&clp->cl_stateids); | 1572 | idr_destroy(&clp->cl_stateids); |
1215 | kfree(clp); | 1573 | kfree(clp); |
1216 | } | 1574 | } |
1217 | 1575 | ||
1218 | /* must be called under the client_lock */ | 1576 | /* must be called under the client_lock */ |
1219 | static inline void | 1577 | static void |
1220 | unhash_client_locked(struct nfs4_client *clp) | 1578 | unhash_client_locked(struct nfs4_client *clp) |
1221 | { | 1579 | { |
1580 | struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); | ||
1222 | struct nfsd4_session *ses; | 1581 | struct nfsd4_session *ses; |
1223 | 1582 | ||
1224 | list_del(&clp->cl_lru); | 1583 | lockdep_assert_held(&nn->client_lock); |
1584 | |||
1585 | /* Mark the client as expired! */ | ||
1586 | clp->cl_time = 0; | ||
1587 | /* Make it invisible */ | ||
1588 | if (!list_empty(&clp->cl_idhash)) { | ||
1589 | list_del_init(&clp->cl_idhash); | ||
1590 | if (test_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags)) | ||
1591 | rb_erase(&clp->cl_namenode, &nn->conf_name_tree); | ||
1592 | else | ||
1593 | rb_erase(&clp->cl_namenode, &nn->unconf_name_tree); | ||
1594 | } | ||
1595 | list_del_init(&clp->cl_lru); | ||
1225 | spin_lock(&clp->cl_lock); | 1596 | spin_lock(&clp->cl_lock); |
1226 | list_for_each_entry(ses, &clp->cl_sessions, se_perclnt) | 1597 | list_for_each_entry(ses, &clp->cl_sessions, se_perclnt) |
1227 | list_del_init(&ses->se_hash); | 1598 | list_del_init(&ses->se_hash); |
@@ -1229,53 +1600,71 @@ unhash_client_locked(struct nfs4_client *clp) | |||
1229 | } | 1600 | } |
1230 | 1601 | ||
1231 | static void | 1602 | static void |
1232 | destroy_client(struct nfs4_client *clp) | 1603 | unhash_client(struct nfs4_client *clp) |
1604 | { | ||
1605 | struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); | ||
1606 | |||
1607 | spin_lock(&nn->client_lock); | ||
1608 | unhash_client_locked(clp); | ||
1609 | spin_unlock(&nn->client_lock); | ||
1610 | } | ||
1611 | |||
1612 | static __be32 mark_client_expired_locked(struct nfs4_client *clp) | ||
1613 | { | ||
1614 | if (atomic_read(&clp->cl_refcount)) | ||
1615 | return nfserr_jukebox; | ||
1616 | unhash_client_locked(clp); | ||
1617 | return nfs_ok; | ||
1618 | } | ||
1619 | |||
1620 | static void | ||
1621 | __destroy_client(struct nfs4_client *clp) | ||
1233 | { | 1622 | { |
1234 | struct nfs4_openowner *oo; | 1623 | struct nfs4_openowner *oo; |
1235 | struct nfs4_delegation *dp; | 1624 | struct nfs4_delegation *dp; |
1236 | struct list_head reaplist; | 1625 | struct list_head reaplist; |
1237 | struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); | ||
1238 | 1626 | ||
1239 | INIT_LIST_HEAD(&reaplist); | 1627 | INIT_LIST_HEAD(&reaplist); |
1240 | spin_lock(&state_lock); | 1628 | spin_lock(&state_lock); |
1241 | while (!list_empty(&clp->cl_delegations)) { | 1629 | while (!list_empty(&clp->cl_delegations)) { |
1242 | dp = list_entry(clp->cl_delegations.next, struct nfs4_delegation, dl_perclnt); | 1630 | dp = list_entry(clp->cl_delegations.next, struct nfs4_delegation, dl_perclnt); |
1243 | list_del_init(&dp->dl_perclnt); | 1631 | unhash_delegation_locked(dp); |
1244 | list_move(&dp->dl_recall_lru, &reaplist); | 1632 | list_add(&dp->dl_recall_lru, &reaplist); |
1245 | } | 1633 | } |
1246 | spin_unlock(&state_lock); | 1634 | spin_unlock(&state_lock); |
1247 | while (!list_empty(&reaplist)) { | 1635 | while (!list_empty(&reaplist)) { |
1248 | dp = list_entry(reaplist.next, struct nfs4_delegation, dl_recall_lru); | 1636 | dp = list_entry(reaplist.next, struct nfs4_delegation, dl_recall_lru); |
1249 | destroy_delegation(dp); | 1637 | list_del_init(&dp->dl_recall_lru); |
1638 | nfs4_put_stid(&dp->dl_stid); | ||
1250 | } | 1639 | } |
1251 | list_splice_init(&clp->cl_revoked, &reaplist); | 1640 | while (!list_empty(&clp->cl_revoked)) { |
1252 | while (!list_empty(&reaplist)) { | ||
1253 | dp = list_entry(reaplist.next, struct nfs4_delegation, dl_recall_lru); | 1641 | dp = list_entry(reaplist.next, struct nfs4_delegation, dl_recall_lru); |
1254 | destroy_revoked_delegation(dp); | 1642 | list_del_init(&dp->dl_recall_lru); |
1643 | nfs4_put_stid(&dp->dl_stid); | ||
1255 | } | 1644 | } |
1256 | while (!list_empty(&clp->cl_openowners)) { | 1645 | while (!list_empty(&clp->cl_openowners)) { |
1257 | oo = list_entry(clp->cl_openowners.next, struct nfs4_openowner, oo_perclient); | 1646 | oo = list_entry(clp->cl_openowners.next, struct nfs4_openowner, oo_perclient); |
1647 | atomic_inc(&oo->oo_owner.so_count); | ||
1258 | release_openowner(oo); | 1648 | release_openowner(oo); |
1259 | } | 1649 | } |
1260 | nfsd4_shutdown_callback(clp); | 1650 | nfsd4_shutdown_callback(clp); |
1261 | if (clp->cl_cb_conn.cb_xprt) | 1651 | if (clp->cl_cb_conn.cb_xprt) |
1262 | svc_xprt_put(clp->cl_cb_conn.cb_xprt); | 1652 | svc_xprt_put(clp->cl_cb_conn.cb_xprt); |
1263 | list_del(&clp->cl_idhash); | ||
1264 | if (test_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags)) | ||
1265 | rb_erase(&clp->cl_namenode, &nn->conf_name_tree); | ||
1266 | else | ||
1267 | rb_erase(&clp->cl_namenode, &nn->unconf_name_tree); | ||
1268 | spin_lock(&nn->client_lock); | ||
1269 | unhash_client_locked(clp); | ||
1270 | WARN_ON_ONCE(atomic_read(&clp->cl_refcount)); | ||
1271 | free_client(clp); | 1653 | free_client(clp); |
1272 | spin_unlock(&nn->client_lock); | 1654 | } |
1655 | |||
1656 | static void | ||
1657 | destroy_client(struct nfs4_client *clp) | ||
1658 | { | ||
1659 | unhash_client(clp); | ||
1660 | __destroy_client(clp); | ||
1273 | } | 1661 | } |
1274 | 1662 | ||
1275 | static void expire_client(struct nfs4_client *clp) | 1663 | static void expire_client(struct nfs4_client *clp) |
1276 | { | 1664 | { |
1665 | unhash_client(clp); | ||
1277 | nfsd4_client_record_remove(clp); | 1666 | nfsd4_client_record_remove(clp); |
1278 | destroy_client(clp); | 1667 | __destroy_client(clp); |
1279 | } | 1668 | } |
1280 | 1669 | ||
1281 | static void copy_verf(struct nfs4_client *target, nfs4_verifier *source) | 1670 | static void copy_verf(struct nfs4_client *target, nfs4_verifier *source) |
@@ -1408,25 +1797,28 @@ static bool mach_creds_match(struct nfs4_client *cl, struct svc_rqst *rqstp) | |||
1408 | return 0 == strcmp(cl->cl_cred.cr_principal, cr->cr_principal); | 1797 | return 0 == strcmp(cl->cl_cred.cr_principal, cr->cr_principal); |
1409 | } | 1798 | } |
1410 | 1799 | ||
1411 | static void gen_clid(struct nfs4_client *clp, struct nfsd_net *nn) | 1800 | static void gen_confirm(struct nfs4_client *clp, struct nfsd_net *nn) |
1412 | { | 1801 | { |
1413 | static u32 current_clientid = 1; | 1802 | __be32 verf[2]; |
1414 | 1803 | ||
1415 | clp->cl_clientid.cl_boot = nn->boot_time; | 1804 | /* |
1416 | clp->cl_clientid.cl_id = current_clientid++; | 1805 | * This is opaque to client, so no need to byte-swap. Use |
1806 | * __force to keep sparse happy | ||
1807 | */ | ||
1808 | verf[0] = (__force __be32)get_seconds(); | ||
1809 | verf[1] = (__force __be32)nn->clientid_counter; | ||
1810 | memcpy(clp->cl_confirm.data, verf, sizeof(clp->cl_confirm.data)); | ||
1417 | } | 1811 | } |
1418 | 1812 | ||
1419 | static void gen_confirm(struct nfs4_client *clp) | 1813 | static void gen_clid(struct nfs4_client *clp, struct nfsd_net *nn) |
1420 | { | 1814 | { |
1421 | __be32 verf[2]; | 1815 | clp->cl_clientid.cl_boot = nn->boot_time; |
1422 | static u32 i; | 1816 | clp->cl_clientid.cl_id = nn->clientid_counter++; |
1423 | 1817 | gen_confirm(clp, nn); | |
1424 | verf[0] = (__be32)get_seconds(); | ||
1425 | verf[1] = (__be32)i++; | ||
1426 | memcpy(clp->cl_confirm.data, verf, sizeof(clp->cl_confirm.data)); | ||
1427 | } | 1818 | } |
1428 | 1819 | ||
1429 | static struct nfs4_stid *find_stateid(struct nfs4_client *cl, stateid_t *t) | 1820 | static struct nfs4_stid * |
1821 | find_stateid_locked(struct nfs4_client *cl, stateid_t *t) | ||
1430 | { | 1822 | { |
1431 | struct nfs4_stid *ret; | 1823 | struct nfs4_stid *ret; |
1432 | 1824 | ||
@@ -1436,16 +1828,21 @@ static struct nfs4_stid *find_stateid(struct nfs4_client *cl, stateid_t *t) | |||
1436 | return ret; | 1828 | return ret; |
1437 | } | 1829 | } |
1438 | 1830 | ||
1439 | static struct nfs4_stid *find_stateid_by_type(struct nfs4_client *cl, stateid_t *t, char typemask) | 1831 | static struct nfs4_stid * |
1832 | find_stateid_by_type(struct nfs4_client *cl, stateid_t *t, char typemask) | ||
1440 | { | 1833 | { |
1441 | struct nfs4_stid *s; | 1834 | struct nfs4_stid *s; |
1442 | 1835 | ||
1443 | s = find_stateid(cl, t); | 1836 | spin_lock(&cl->cl_lock); |
1444 | if (!s) | 1837 | s = find_stateid_locked(cl, t); |
1445 | return NULL; | 1838 | if (s != NULL) { |
1446 | if (typemask & s->sc_type) | 1839 | if (typemask & s->sc_type) |
1447 | return s; | 1840 | atomic_inc(&s->sc_count); |
1448 | return NULL; | 1841 | else |
1842 | s = NULL; | ||
1843 | } | ||
1844 | spin_unlock(&cl->cl_lock); | ||
1845 | return s; | ||
1449 | } | 1846 | } |
1450 | 1847 | ||
1451 | static struct nfs4_client *create_client(struct xdr_netobj name, | 1848 | static struct nfs4_client *create_client(struct xdr_netobj name, |
@@ -1455,7 +1852,6 @@ static struct nfs4_client *create_client(struct xdr_netobj name, | |||
1455 | struct sockaddr *sa = svc_addr(rqstp); | 1852 | struct sockaddr *sa = svc_addr(rqstp); |
1456 | int ret; | 1853 | int ret; |
1457 | struct net *net = SVC_NET(rqstp); | 1854 | struct net *net = SVC_NET(rqstp); |
1458 | struct nfsd_net *nn = net_generic(net, nfsd_net_id); | ||
1459 | 1855 | ||
1460 | clp = alloc_client(name); | 1856 | clp = alloc_client(name); |
1461 | if (clp == NULL) | 1857 | if (clp == NULL) |
@@ -1463,17 +1859,14 @@ static struct nfs4_client *create_client(struct xdr_netobj name, | |||
1463 | 1859 | ||
1464 | ret = copy_cred(&clp->cl_cred, &rqstp->rq_cred); | 1860 | ret = copy_cred(&clp->cl_cred, &rqstp->rq_cred); |
1465 | if (ret) { | 1861 | if (ret) { |
1466 | spin_lock(&nn->client_lock); | ||
1467 | free_client(clp); | 1862 | free_client(clp); |
1468 | spin_unlock(&nn->client_lock); | ||
1469 | return NULL; | 1863 | return NULL; |
1470 | } | 1864 | } |
1471 | nfsd4_init_callback(&clp->cl_cb_null); | 1865 | INIT_WORK(&clp->cl_cb_null.cb_work, nfsd4_run_cb_null); |
1472 | clp->cl_time = get_seconds(); | 1866 | clp->cl_time = get_seconds(); |
1473 | clear_bit(0, &clp->cl_cb_slot_busy); | 1867 | clear_bit(0, &clp->cl_cb_slot_busy); |
1474 | copy_verf(clp, verf); | 1868 | copy_verf(clp, verf); |
1475 | rpc_copy_addr((struct sockaddr *) &clp->cl_addr, sa); | 1869 | rpc_copy_addr((struct sockaddr *) &clp->cl_addr, sa); |
1476 | gen_confirm(clp); | ||
1477 | clp->cl_cb_session = NULL; | 1870 | clp->cl_cb_session = NULL; |
1478 | clp->net = net; | 1871 | clp->net = net; |
1479 | return clp; | 1872 | return clp; |
@@ -1525,11 +1918,13 @@ add_to_unconfirmed(struct nfs4_client *clp) | |||
1525 | unsigned int idhashval; | 1918 | unsigned int idhashval; |
1526 | struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); | 1919 | struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); |
1527 | 1920 | ||
1921 | lockdep_assert_held(&nn->client_lock); | ||
1922 | |||
1528 | clear_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags); | 1923 | clear_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags); |
1529 | add_clp_to_name_tree(clp, &nn->unconf_name_tree); | 1924 | add_clp_to_name_tree(clp, &nn->unconf_name_tree); |
1530 | idhashval = clientid_hashval(clp->cl_clientid.cl_id); | 1925 | idhashval = clientid_hashval(clp->cl_clientid.cl_id); |
1531 | list_add(&clp->cl_idhash, &nn->unconf_id_hashtbl[idhashval]); | 1926 | list_add(&clp->cl_idhash, &nn->unconf_id_hashtbl[idhashval]); |
1532 | renew_client(clp); | 1927 | renew_client_locked(clp); |
1533 | } | 1928 | } |
1534 | 1929 | ||
1535 | static void | 1930 | static void |
@@ -1538,12 +1933,14 @@ move_to_confirmed(struct nfs4_client *clp) | |||
1538 | unsigned int idhashval = clientid_hashval(clp->cl_clientid.cl_id); | 1933 | unsigned int idhashval = clientid_hashval(clp->cl_clientid.cl_id); |
1539 | struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); | 1934 | struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); |
1540 | 1935 | ||
1936 | lockdep_assert_held(&nn->client_lock); | ||
1937 | |||
1541 | dprintk("NFSD: move_to_confirm nfs4_client %p\n", clp); | 1938 | dprintk("NFSD: move_to_confirm nfs4_client %p\n", clp); |
1542 | list_move(&clp->cl_idhash, &nn->conf_id_hashtbl[idhashval]); | 1939 | list_move(&clp->cl_idhash, &nn->conf_id_hashtbl[idhashval]); |
1543 | rb_erase(&clp->cl_namenode, &nn->unconf_name_tree); | 1940 | rb_erase(&clp->cl_namenode, &nn->unconf_name_tree); |
1544 | add_clp_to_name_tree(clp, &nn->conf_name_tree); | 1941 | add_clp_to_name_tree(clp, &nn->conf_name_tree); |
1545 | set_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags); | 1942 | set_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags); |
1546 | renew_client(clp); | 1943 | renew_client_locked(clp); |
1547 | } | 1944 | } |
1548 | 1945 | ||
1549 | static struct nfs4_client * | 1946 | static struct nfs4_client * |
@@ -1556,7 +1953,7 @@ find_client_in_id_table(struct list_head *tbl, clientid_t *clid, bool sessions) | |||
1556 | if (same_clid(&clp->cl_clientid, clid)) { | 1953 | if (same_clid(&clp->cl_clientid, clid)) { |
1557 | if ((bool)clp->cl_minorversion != sessions) | 1954 | if ((bool)clp->cl_minorversion != sessions) |
1558 | return NULL; | 1955 | return NULL; |
1559 | renew_client(clp); | 1956 | renew_client_locked(clp); |
1560 | return clp; | 1957 | return clp; |
1561 | } | 1958 | } |
1562 | } | 1959 | } |
@@ -1568,6 +1965,7 @@ find_confirmed_client(clientid_t *clid, bool sessions, struct nfsd_net *nn) | |||
1568 | { | 1965 | { |
1569 | struct list_head *tbl = nn->conf_id_hashtbl; | 1966 | struct list_head *tbl = nn->conf_id_hashtbl; |
1570 | 1967 | ||
1968 | lockdep_assert_held(&nn->client_lock); | ||
1571 | return find_client_in_id_table(tbl, clid, sessions); | 1969 | return find_client_in_id_table(tbl, clid, sessions); |
1572 | } | 1970 | } |
1573 | 1971 | ||
@@ -1576,6 +1974,7 @@ find_unconfirmed_client(clientid_t *clid, bool sessions, struct nfsd_net *nn) | |||
1576 | { | 1974 | { |
1577 | struct list_head *tbl = nn->unconf_id_hashtbl; | 1975 | struct list_head *tbl = nn->unconf_id_hashtbl; |
1578 | 1976 | ||
1977 | lockdep_assert_held(&nn->client_lock); | ||
1579 | return find_client_in_id_table(tbl, clid, sessions); | 1978 | return find_client_in_id_table(tbl, clid, sessions); |
1580 | } | 1979 | } |
1581 | 1980 | ||
@@ -1587,12 +1986,14 @@ static bool clp_used_exchangeid(struct nfs4_client *clp) | |||
1587 | static struct nfs4_client * | 1986 | static struct nfs4_client * |
1588 | find_confirmed_client_by_name(struct xdr_netobj *name, struct nfsd_net *nn) | 1987 | find_confirmed_client_by_name(struct xdr_netobj *name, struct nfsd_net *nn) |
1589 | { | 1988 | { |
1989 | lockdep_assert_held(&nn->client_lock); | ||
1590 | return find_clp_in_name_tree(name, &nn->conf_name_tree); | 1990 | return find_clp_in_name_tree(name, &nn->conf_name_tree); |
1591 | } | 1991 | } |
1592 | 1992 | ||
1593 | static struct nfs4_client * | 1993 | static struct nfs4_client * |
1594 | find_unconfirmed_client_by_name(struct xdr_netobj *name, struct nfsd_net *nn) | 1994 | find_unconfirmed_client_by_name(struct xdr_netobj *name, struct nfsd_net *nn) |
1595 | { | 1995 | { |
1996 | lockdep_assert_held(&nn->client_lock); | ||
1596 | return find_clp_in_name_tree(name, &nn->unconf_name_tree); | 1997 | return find_clp_in_name_tree(name, &nn->unconf_name_tree); |
1597 | } | 1998 | } |
1598 | 1999 | ||
@@ -1642,7 +2043,7 @@ out_err: | |||
1642 | /* | 2043 | /* |
1643 | * Cache a reply. nfsd4_check_resp_size() has bounded the cache size. | 2044 | * Cache a reply. nfsd4_check_resp_size() has bounded the cache size. |
1644 | */ | 2045 | */ |
1645 | void | 2046 | static void |
1646 | nfsd4_store_cache_entry(struct nfsd4_compoundres *resp) | 2047 | nfsd4_store_cache_entry(struct nfsd4_compoundres *resp) |
1647 | { | 2048 | { |
1648 | struct xdr_buf *buf = resp->xdr.buf; | 2049 | struct xdr_buf *buf = resp->xdr.buf; |
@@ -1758,7 +2159,8 @@ nfsd4_exchange_id(struct svc_rqst *rqstp, | |||
1758 | struct nfsd4_compound_state *cstate, | 2159 | struct nfsd4_compound_state *cstate, |
1759 | struct nfsd4_exchange_id *exid) | 2160 | struct nfsd4_exchange_id *exid) |
1760 | { | 2161 | { |
1761 | struct nfs4_client *unconf, *conf, *new; | 2162 | struct nfs4_client *conf, *new; |
2163 | struct nfs4_client *unconf = NULL; | ||
1762 | __be32 status; | 2164 | __be32 status; |
1763 | char addr_str[INET6_ADDRSTRLEN]; | 2165 | char addr_str[INET6_ADDRSTRLEN]; |
1764 | nfs4_verifier verf = exid->verifier; | 2166 | nfs4_verifier verf = exid->verifier; |
@@ -1787,8 +2189,12 @@ nfsd4_exchange_id(struct svc_rqst *rqstp, | |||
1787 | return nfserr_encr_alg_unsupp; | 2189 | return nfserr_encr_alg_unsupp; |
1788 | } | 2190 | } |
1789 | 2191 | ||
2192 | new = create_client(exid->clname, rqstp, &verf); | ||
2193 | if (new == NULL) | ||
2194 | return nfserr_jukebox; | ||
2195 | |||
1790 | /* Cases below refer to rfc 5661 section 18.35.4: */ | 2196 | /* Cases below refer to rfc 5661 section 18.35.4: */ |
1791 | nfs4_lock_state(); | 2197 | spin_lock(&nn->client_lock); |
1792 | conf = find_confirmed_client_by_name(&exid->clname, nn); | 2198 | conf = find_confirmed_client_by_name(&exid->clname, nn); |
1793 | if (conf) { | 2199 | if (conf) { |
1794 | bool creds_match = same_creds(&conf->cl_cred, &rqstp->rq_cred); | 2200 | bool creds_match = same_creds(&conf->cl_cred, &rqstp->rq_cred); |
@@ -1813,7 +2219,6 @@ nfsd4_exchange_id(struct svc_rqst *rqstp, | |||
1813 | } | 2219 | } |
1814 | /* case 6 */ | 2220 | /* case 6 */ |
1815 | exid->flags |= EXCHGID4_FLAG_CONFIRMED_R; | 2221 | exid->flags |= EXCHGID4_FLAG_CONFIRMED_R; |
1816 | new = conf; | ||
1817 | goto out_copy; | 2222 | goto out_copy; |
1818 | } | 2223 | } |
1819 | if (!creds_match) { /* case 3 */ | 2224 | if (!creds_match) { /* case 3 */ |
@@ -1821,15 +2226,14 @@ nfsd4_exchange_id(struct svc_rqst *rqstp, | |||
1821 | status = nfserr_clid_inuse; | 2226 | status = nfserr_clid_inuse; |
1822 | goto out; | 2227 | goto out; |
1823 | } | 2228 | } |
1824 | expire_client(conf); | ||
1825 | goto out_new; | 2229 | goto out_new; |
1826 | } | 2230 | } |
1827 | if (verfs_match) { /* case 2 */ | 2231 | if (verfs_match) { /* case 2 */ |
1828 | conf->cl_exchange_flags |= EXCHGID4_FLAG_CONFIRMED_R; | 2232 | conf->cl_exchange_flags |= EXCHGID4_FLAG_CONFIRMED_R; |
1829 | new = conf; | ||
1830 | goto out_copy; | 2233 | goto out_copy; |
1831 | } | 2234 | } |
1832 | /* case 5, client reboot */ | 2235 | /* case 5, client reboot */ |
2236 | conf = NULL; | ||
1833 | goto out_new; | 2237 | goto out_new; |
1834 | } | 2238 | } |
1835 | 2239 | ||
@@ -1840,33 +2244,38 @@ nfsd4_exchange_id(struct svc_rqst *rqstp, | |||
1840 | 2244 | ||
1841 | unconf = find_unconfirmed_client_by_name(&exid->clname, nn); | 2245 | unconf = find_unconfirmed_client_by_name(&exid->clname, nn); |
1842 | if (unconf) /* case 4, possible retry or client restart */ | 2246 | if (unconf) /* case 4, possible retry or client restart */ |
1843 | expire_client(unconf); | 2247 | unhash_client_locked(unconf); |
1844 | 2248 | ||
1845 | /* case 1 (normal case) */ | 2249 | /* case 1 (normal case) */ |
1846 | out_new: | 2250 | out_new: |
1847 | new = create_client(exid->clname, rqstp, &verf); | 2251 | if (conf) { |
1848 | if (new == NULL) { | 2252 | status = mark_client_expired_locked(conf); |
1849 | status = nfserr_jukebox; | 2253 | if (status) |
1850 | goto out; | 2254 | goto out; |
1851 | } | 2255 | } |
1852 | new->cl_minorversion = cstate->minorversion; | 2256 | new->cl_minorversion = cstate->minorversion; |
1853 | new->cl_mach_cred = (exid->spa_how == SP4_MACH_CRED); | 2257 | new->cl_mach_cred = (exid->spa_how == SP4_MACH_CRED); |
1854 | 2258 | ||
1855 | gen_clid(new, nn); | 2259 | gen_clid(new, nn); |
1856 | add_to_unconfirmed(new); | 2260 | add_to_unconfirmed(new); |
2261 | swap(new, conf); | ||
1857 | out_copy: | 2262 | out_copy: |
1858 | exid->clientid.cl_boot = new->cl_clientid.cl_boot; | 2263 | exid->clientid.cl_boot = conf->cl_clientid.cl_boot; |
1859 | exid->clientid.cl_id = new->cl_clientid.cl_id; | 2264 | exid->clientid.cl_id = conf->cl_clientid.cl_id; |
1860 | 2265 | ||
1861 | exid->seqid = new->cl_cs_slot.sl_seqid + 1; | 2266 | exid->seqid = conf->cl_cs_slot.sl_seqid + 1; |
1862 | nfsd4_set_ex_flags(new, exid); | 2267 | nfsd4_set_ex_flags(conf, exid); |
1863 | 2268 | ||
1864 | dprintk("nfsd4_exchange_id seqid %d flags %x\n", | 2269 | dprintk("nfsd4_exchange_id seqid %d flags %x\n", |
1865 | new->cl_cs_slot.sl_seqid, new->cl_exchange_flags); | 2270 | conf->cl_cs_slot.sl_seqid, conf->cl_exchange_flags); |
1866 | status = nfs_ok; | 2271 | status = nfs_ok; |
1867 | 2272 | ||
1868 | out: | 2273 | out: |
1869 | nfs4_unlock_state(); | 2274 | spin_unlock(&nn->client_lock); |
2275 | if (new) | ||
2276 | expire_client(new); | ||
2277 | if (unconf) | ||
2278 | expire_client(unconf); | ||
1870 | return status; | 2279 | return status; |
1871 | } | 2280 | } |
1872 | 2281 | ||
@@ -2010,6 +2419,7 @@ nfsd4_create_session(struct svc_rqst *rqstp, | |||
2010 | { | 2419 | { |
2011 | struct sockaddr *sa = svc_addr(rqstp); | 2420 | struct sockaddr *sa = svc_addr(rqstp); |
2012 | struct nfs4_client *conf, *unconf; | 2421 | struct nfs4_client *conf, *unconf; |
2422 | struct nfs4_client *old = NULL; | ||
2013 | struct nfsd4_session *new; | 2423 | struct nfsd4_session *new; |
2014 | struct nfsd4_conn *conn; | 2424 | struct nfsd4_conn *conn; |
2015 | struct nfsd4_clid_slot *cs_slot = NULL; | 2425 | struct nfsd4_clid_slot *cs_slot = NULL; |
@@ -2035,7 +2445,7 @@ nfsd4_create_session(struct svc_rqst *rqstp, | |||
2035 | if (!conn) | 2445 | if (!conn) |
2036 | goto out_free_session; | 2446 | goto out_free_session; |
2037 | 2447 | ||
2038 | nfs4_lock_state(); | 2448 | spin_lock(&nn->client_lock); |
2039 | unconf = find_unconfirmed_client(&cr_ses->clientid, true, nn); | 2449 | unconf = find_unconfirmed_client(&cr_ses->clientid, true, nn); |
2040 | conf = find_confirmed_client(&cr_ses->clientid, true, nn); | 2450 | conf = find_confirmed_client(&cr_ses->clientid, true, nn); |
2041 | WARN_ON_ONCE(conf && unconf); | 2451 | WARN_ON_ONCE(conf && unconf); |
@@ -2054,7 +2464,6 @@ nfsd4_create_session(struct svc_rqst *rqstp, | |||
2054 | goto out_free_conn; | 2464 | goto out_free_conn; |
2055 | } | 2465 | } |
2056 | } else if (unconf) { | 2466 | } else if (unconf) { |
2057 | struct nfs4_client *old; | ||
2058 | if (!same_creds(&unconf->cl_cred, &rqstp->rq_cred) || | 2467 | if (!same_creds(&unconf->cl_cred, &rqstp->rq_cred) || |
2059 | !rpc_cmp_addr(sa, (struct sockaddr *) &unconf->cl_addr)) { | 2468 | !rpc_cmp_addr(sa, (struct sockaddr *) &unconf->cl_addr)) { |
2060 | status = nfserr_clid_inuse; | 2469 | status = nfserr_clid_inuse; |
@@ -2072,10 +2481,11 @@ nfsd4_create_session(struct svc_rqst *rqstp, | |||
2072 | } | 2481 | } |
2073 | old = find_confirmed_client_by_name(&unconf->cl_name, nn); | 2482 | old = find_confirmed_client_by_name(&unconf->cl_name, nn); |
2074 | if (old) { | 2483 | if (old) { |
2075 | status = mark_client_expired(old); | 2484 | status = mark_client_expired_locked(old); |
2076 | if (status) | 2485 | if (status) { |
2486 | old = NULL; | ||
2077 | goto out_free_conn; | 2487 | goto out_free_conn; |
2078 | expire_client(old); | 2488 | } |
2079 | } | 2489 | } |
2080 | move_to_confirmed(unconf); | 2490 | move_to_confirmed(unconf); |
2081 | conf = unconf; | 2491 | conf = unconf; |
@@ -2091,20 +2501,27 @@ nfsd4_create_session(struct svc_rqst *rqstp, | |||
2091 | cr_ses->flags &= ~SESSION4_RDMA; | 2501 | cr_ses->flags &= ~SESSION4_RDMA; |
2092 | 2502 | ||
2093 | init_session(rqstp, new, conf, cr_ses); | 2503 | init_session(rqstp, new, conf, cr_ses); |
2094 | nfsd4_init_conn(rqstp, conn, new); | 2504 | nfsd4_get_session_locked(new); |
2095 | 2505 | ||
2096 | memcpy(cr_ses->sessionid.data, new->se_sessionid.data, | 2506 | memcpy(cr_ses->sessionid.data, new->se_sessionid.data, |
2097 | NFS4_MAX_SESSIONID_LEN); | 2507 | NFS4_MAX_SESSIONID_LEN); |
2098 | cs_slot->sl_seqid++; | 2508 | cs_slot->sl_seqid++; |
2099 | cr_ses->seqid = cs_slot->sl_seqid; | 2509 | cr_ses->seqid = cs_slot->sl_seqid; |
2100 | 2510 | ||
2101 | /* cache solo and embedded create sessions under the state lock */ | 2511 | /* cache solo and embedded create sessions under the client_lock */ |
2102 | nfsd4_cache_create_session(cr_ses, cs_slot, status); | 2512 | nfsd4_cache_create_session(cr_ses, cs_slot, status); |
2103 | nfs4_unlock_state(); | 2513 | spin_unlock(&nn->client_lock); |
2514 | /* init connection and backchannel */ | ||
2515 | nfsd4_init_conn(rqstp, conn, new); | ||
2516 | nfsd4_put_session(new); | ||
2517 | if (old) | ||
2518 | expire_client(old); | ||
2104 | return status; | 2519 | return status; |
2105 | out_free_conn: | 2520 | out_free_conn: |
2106 | nfs4_unlock_state(); | 2521 | spin_unlock(&nn->client_lock); |
2107 | free_conn(conn); | 2522 | free_conn(conn); |
2523 | if (old) | ||
2524 | expire_client(old); | ||
2108 | out_free_session: | 2525 | out_free_session: |
2109 | __free_session(new); | 2526 | __free_session(new); |
2110 | out_release_drc_mem: | 2527 | out_release_drc_mem: |
@@ -2152,17 +2569,16 @@ __be32 nfsd4_bind_conn_to_session(struct svc_rqst *rqstp, | |||
2152 | __be32 status; | 2569 | __be32 status; |
2153 | struct nfsd4_conn *conn; | 2570 | struct nfsd4_conn *conn; |
2154 | struct nfsd4_session *session; | 2571 | struct nfsd4_session *session; |
2155 | struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); | 2572 | struct net *net = SVC_NET(rqstp); |
2573 | struct nfsd_net *nn = net_generic(net, nfsd_net_id); | ||
2156 | 2574 | ||
2157 | if (!nfsd4_last_compound_op(rqstp)) | 2575 | if (!nfsd4_last_compound_op(rqstp)) |
2158 | return nfserr_not_only_op; | 2576 | return nfserr_not_only_op; |
2159 | nfs4_lock_state(); | ||
2160 | spin_lock(&nn->client_lock); | 2577 | spin_lock(&nn->client_lock); |
2161 | session = find_in_sessionid_hashtbl(&bcts->sessionid, SVC_NET(rqstp)); | 2578 | session = find_in_sessionid_hashtbl(&bcts->sessionid, net, &status); |
2162 | spin_unlock(&nn->client_lock); | 2579 | spin_unlock(&nn->client_lock); |
2163 | status = nfserr_badsession; | ||
2164 | if (!session) | 2580 | if (!session) |
2165 | goto out; | 2581 | goto out_no_session; |
2166 | status = nfserr_wrong_cred; | 2582 | status = nfserr_wrong_cred; |
2167 | if (!mach_creds_match(session->se_client, rqstp)) | 2583 | if (!mach_creds_match(session->se_client, rqstp)) |
2168 | goto out; | 2584 | goto out; |
@@ -2176,7 +2592,8 @@ __be32 nfsd4_bind_conn_to_session(struct svc_rqst *rqstp, | |||
2176 | nfsd4_init_conn(rqstp, conn, session); | 2592 | nfsd4_init_conn(rqstp, conn, session); |
2177 | status = nfs_ok; | 2593 | status = nfs_ok; |
2178 | out: | 2594 | out: |
2179 | nfs4_unlock_state(); | 2595 | nfsd4_put_session(session); |
2596 | out_no_session: | ||
2180 | return status; | 2597 | return status; |
2181 | } | 2598 | } |
2182 | 2599 | ||
@@ -2195,9 +2612,9 @@ nfsd4_destroy_session(struct svc_rqst *r, | |||
2195 | struct nfsd4_session *ses; | 2612 | struct nfsd4_session *ses; |
2196 | __be32 status; | 2613 | __be32 status; |
2197 | int ref_held_by_me = 0; | 2614 | int ref_held_by_me = 0; |
2198 | struct nfsd_net *nn = net_generic(SVC_NET(r), nfsd_net_id); | 2615 | struct net *net = SVC_NET(r); |
2616 | struct nfsd_net *nn = net_generic(net, nfsd_net_id); | ||
2199 | 2617 | ||
2200 | nfs4_lock_state(); | ||
2201 | status = nfserr_not_only_op; | 2618 | status = nfserr_not_only_op; |
2202 | if (nfsd4_compound_in_session(cstate->session, &sessionid->sessionid)) { | 2619 | if (nfsd4_compound_in_session(cstate->session, &sessionid->sessionid)) { |
2203 | if (!nfsd4_last_compound_op(r)) | 2620 | if (!nfsd4_last_compound_op(r)) |
@@ -2206,14 +2623,12 @@ nfsd4_destroy_session(struct svc_rqst *r, | |||
2206 | } | 2623 | } |
2207 | dump_sessionid(__func__, &sessionid->sessionid); | 2624 | dump_sessionid(__func__, &sessionid->sessionid); |
2208 | spin_lock(&nn->client_lock); | 2625 | spin_lock(&nn->client_lock); |
2209 | ses = find_in_sessionid_hashtbl(&sessionid->sessionid, SVC_NET(r)); | 2626 | ses = find_in_sessionid_hashtbl(&sessionid->sessionid, net, &status); |
2210 | status = nfserr_badsession; | ||
2211 | if (!ses) | 2627 | if (!ses) |
2212 | goto out_client_lock; | 2628 | goto out_client_lock; |
2213 | status = nfserr_wrong_cred; | 2629 | status = nfserr_wrong_cred; |
2214 | if (!mach_creds_match(ses->se_client, r)) | 2630 | if (!mach_creds_match(ses->se_client, r)) |
2215 | goto out_client_lock; | 2631 | goto out_put_session; |
2216 | nfsd4_get_session_locked(ses); | ||
2217 | status = mark_session_dead_locked(ses, 1 + ref_held_by_me); | 2632 | status = mark_session_dead_locked(ses, 1 + ref_held_by_me); |
2218 | if (status) | 2633 | if (status) |
2219 | goto out_put_session; | 2634 | goto out_put_session; |
@@ -2225,11 +2640,10 @@ nfsd4_destroy_session(struct svc_rqst *r, | |||
2225 | spin_lock(&nn->client_lock); | 2640 | spin_lock(&nn->client_lock); |
2226 | status = nfs_ok; | 2641 | status = nfs_ok; |
2227 | out_put_session: | 2642 | out_put_session: |
2228 | nfsd4_put_session(ses); | 2643 | nfsd4_put_session_locked(ses); |
2229 | out_client_lock: | 2644 | out_client_lock: |
2230 | spin_unlock(&nn->client_lock); | 2645 | spin_unlock(&nn->client_lock); |
2231 | out: | 2646 | out: |
2232 | nfs4_unlock_state(); | ||
2233 | return status; | 2647 | return status; |
2234 | } | 2648 | } |
2235 | 2649 | ||
@@ -2300,7 +2714,8 @@ nfsd4_sequence(struct svc_rqst *rqstp, | |||
2300 | struct nfsd4_conn *conn; | 2714 | struct nfsd4_conn *conn; |
2301 | __be32 status; | 2715 | __be32 status; |
2302 | int buflen; | 2716 | int buflen; |
2303 | struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); | 2717 | struct net *net = SVC_NET(rqstp); |
2718 | struct nfsd_net *nn = net_generic(net, nfsd_net_id); | ||
2304 | 2719 | ||
2305 | if (resp->opcnt != 1) | 2720 | if (resp->opcnt != 1) |
2306 | return nfserr_sequence_pos; | 2721 | return nfserr_sequence_pos; |
@@ -2314,17 +2729,10 @@ nfsd4_sequence(struct svc_rqst *rqstp, | |||
2314 | return nfserr_jukebox; | 2729 | return nfserr_jukebox; |
2315 | 2730 | ||
2316 | spin_lock(&nn->client_lock); | 2731 | spin_lock(&nn->client_lock); |
2317 | status = nfserr_badsession; | 2732 | session = find_in_sessionid_hashtbl(&seq->sessionid, net, &status); |
2318 | session = find_in_sessionid_hashtbl(&seq->sessionid, SVC_NET(rqstp)); | ||
2319 | if (!session) | 2733 | if (!session) |
2320 | goto out_no_session; | 2734 | goto out_no_session; |
2321 | clp = session->se_client; | 2735 | clp = session->se_client; |
2322 | status = get_client_locked(clp); | ||
2323 | if (status) | ||
2324 | goto out_no_session; | ||
2325 | status = nfsd4_get_session_locked(session); | ||
2326 | if (status) | ||
2327 | goto out_put_client; | ||
2328 | 2736 | ||
2329 | status = nfserr_too_many_ops; | 2737 | status = nfserr_too_many_ops; |
2330 | if (nfsd4_session_too_many_ops(rqstp, session)) | 2738 | if (nfsd4_session_too_many_ops(rqstp, session)) |
@@ -2354,6 +2762,7 @@ nfsd4_sequence(struct svc_rqst *rqstp, | |||
2354 | goto out_put_session; | 2762 | goto out_put_session; |
2355 | cstate->slot = slot; | 2763 | cstate->slot = slot; |
2356 | cstate->session = session; | 2764 | cstate->session = session; |
2765 | cstate->clp = clp; | ||
2357 | /* Return the cached reply status and set cstate->status | 2766 | /* Return the cached reply status and set cstate->status |
2358 | * for nfsd4_proc_compound processing */ | 2767 | * for nfsd4_proc_compound processing */ |
2359 | status = nfsd4_replay_cache_entry(resp, seq); | 2768 | status = nfsd4_replay_cache_entry(resp, seq); |
@@ -2388,6 +2797,7 @@ nfsd4_sequence(struct svc_rqst *rqstp, | |||
2388 | 2797 | ||
2389 | cstate->slot = slot; | 2798 | cstate->slot = slot; |
2390 | cstate->session = session; | 2799 | cstate->session = session; |
2800 | cstate->clp = clp; | ||
2391 | 2801 | ||
2392 | out: | 2802 | out: |
2393 | switch (clp->cl_cb_state) { | 2803 | switch (clp->cl_cb_state) { |
@@ -2408,31 +2818,48 @@ out_no_session: | |||
2408 | spin_unlock(&nn->client_lock); | 2818 | spin_unlock(&nn->client_lock); |
2409 | return status; | 2819 | return status; |
2410 | out_put_session: | 2820 | out_put_session: |
2411 | nfsd4_put_session(session); | 2821 | nfsd4_put_session_locked(session); |
2412 | out_put_client: | ||
2413 | put_client_renew_locked(clp); | ||
2414 | goto out_no_session; | 2822 | goto out_no_session; |
2415 | } | 2823 | } |
2416 | 2824 | ||
2825 | void | ||
2826 | nfsd4_sequence_done(struct nfsd4_compoundres *resp) | ||
2827 | { | ||
2828 | struct nfsd4_compound_state *cs = &resp->cstate; | ||
2829 | |||
2830 | if (nfsd4_has_session(cs)) { | ||
2831 | if (cs->status != nfserr_replay_cache) { | ||
2832 | nfsd4_store_cache_entry(resp); | ||
2833 | cs->slot->sl_flags &= ~NFSD4_SLOT_INUSE; | ||
2834 | } | ||
2835 | /* Drop session reference that was taken in nfsd4_sequence() */ | ||
2836 | nfsd4_put_session(cs->session); | ||
2837 | } else if (cs->clp) | ||
2838 | put_client_renew(cs->clp); | ||
2839 | } | ||
2840 | |||
2417 | __be32 | 2841 | __be32 |
2418 | nfsd4_destroy_clientid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, struct nfsd4_destroy_clientid *dc) | 2842 | nfsd4_destroy_clientid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, struct nfsd4_destroy_clientid *dc) |
2419 | { | 2843 | { |
2420 | struct nfs4_client *conf, *unconf, *clp; | 2844 | struct nfs4_client *conf, *unconf; |
2845 | struct nfs4_client *clp = NULL; | ||
2421 | __be32 status = 0; | 2846 | __be32 status = 0; |
2422 | struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); | 2847 | struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); |
2423 | 2848 | ||
2424 | nfs4_lock_state(); | 2849 | spin_lock(&nn->client_lock); |
2425 | unconf = find_unconfirmed_client(&dc->clientid, true, nn); | 2850 | unconf = find_unconfirmed_client(&dc->clientid, true, nn); |
2426 | conf = find_confirmed_client(&dc->clientid, true, nn); | 2851 | conf = find_confirmed_client(&dc->clientid, true, nn); |
2427 | WARN_ON_ONCE(conf && unconf); | 2852 | WARN_ON_ONCE(conf && unconf); |
2428 | 2853 | ||
2429 | if (conf) { | 2854 | if (conf) { |
2430 | clp = conf; | ||
2431 | |||
2432 | if (client_has_state(conf)) { | 2855 | if (client_has_state(conf)) { |
2433 | status = nfserr_clientid_busy; | 2856 | status = nfserr_clientid_busy; |
2434 | goto out; | 2857 | goto out; |
2435 | } | 2858 | } |
2859 | status = mark_client_expired_locked(conf); | ||
2860 | if (status) | ||
2861 | goto out; | ||
2862 | clp = conf; | ||
2436 | } else if (unconf) | 2863 | } else if (unconf) |
2437 | clp = unconf; | 2864 | clp = unconf; |
2438 | else { | 2865 | else { |
@@ -2440,12 +2867,15 @@ nfsd4_destroy_clientid(struct svc_rqst *rqstp, struct nfsd4_compound_state *csta | |||
2440 | goto out; | 2867 | goto out; |
2441 | } | 2868 | } |
2442 | if (!mach_creds_match(clp, rqstp)) { | 2869 | if (!mach_creds_match(clp, rqstp)) { |
2870 | clp = NULL; | ||
2443 | status = nfserr_wrong_cred; | 2871 | status = nfserr_wrong_cred; |
2444 | goto out; | 2872 | goto out; |
2445 | } | 2873 | } |
2446 | expire_client(clp); | 2874 | unhash_client_locked(clp); |
2447 | out: | 2875 | out: |
2448 | nfs4_unlock_state(); | 2876 | spin_unlock(&nn->client_lock); |
2877 | if (clp) | ||
2878 | expire_client(clp); | ||
2449 | return status; | 2879 | return status; |
2450 | } | 2880 | } |
2451 | 2881 | ||
@@ -2464,7 +2894,6 @@ nfsd4_reclaim_complete(struct svc_rqst *rqstp, struct nfsd4_compound_state *csta | |||
2464 | return nfs_ok; | 2894 | return nfs_ok; |
2465 | } | 2895 | } |
2466 | 2896 | ||
2467 | nfs4_lock_state(); | ||
2468 | status = nfserr_complete_already; | 2897 | status = nfserr_complete_already; |
2469 | if (test_and_set_bit(NFSD4_CLIENT_RECLAIM_COMPLETE, | 2898 | if (test_and_set_bit(NFSD4_CLIENT_RECLAIM_COMPLETE, |
2470 | &cstate->session->se_client->cl_flags)) | 2899 | &cstate->session->se_client->cl_flags)) |
@@ -2484,7 +2913,6 @@ nfsd4_reclaim_complete(struct svc_rqst *rqstp, struct nfsd4_compound_state *csta | |||
2484 | status = nfs_ok; | 2913 | status = nfs_ok; |
2485 | nfsd4_client_record_create(cstate->session->se_client); | 2914 | nfsd4_client_record_create(cstate->session->se_client); |
2486 | out: | 2915 | out: |
2487 | nfs4_unlock_state(); | ||
2488 | return status; | 2916 | return status; |
2489 | } | 2917 | } |
2490 | 2918 | ||
@@ -2494,12 +2922,16 @@ nfsd4_setclientid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, | |||
2494 | { | 2922 | { |
2495 | struct xdr_netobj clname = setclid->se_name; | 2923 | struct xdr_netobj clname = setclid->se_name; |
2496 | nfs4_verifier clverifier = setclid->se_verf; | 2924 | nfs4_verifier clverifier = setclid->se_verf; |
2497 | struct nfs4_client *conf, *unconf, *new; | 2925 | struct nfs4_client *conf, *new; |
2926 | struct nfs4_client *unconf = NULL; | ||
2498 | __be32 status; | 2927 | __be32 status; |
2499 | struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); | 2928 | struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); |
2500 | 2929 | ||
2930 | new = create_client(clname, rqstp, &clverifier); | ||
2931 | if (new == NULL) | ||
2932 | return nfserr_jukebox; | ||
2501 | /* Cases below refer to rfc 3530 section 14.2.33: */ | 2933 | /* Cases below refer to rfc 3530 section 14.2.33: */ |
2502 | nfs4_lock_state(); | 2934 | spin_lock(&nn->client_lock); |
2503 | conf = find_confirmed_client_by_name(&clname, nn); | 2935 | conf = find_confirmed_client_by_name(&clname, nn); |
2504 | if (conf) { | 2936 | if (conf) { |
2505 | /* case 0: */ | 2937 | /* case 0: */ |
@@ -2517,11 +2949,7 @@ nfsd4_setclientid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, | |||
2517 | } | 2949 | } |
2518 | unconf = find_unconfirmed_client_by_name(&clname, nn); | 2950 | unconf = find_unconfirmed_client_by_name(&clname, nn); |
2519 | if (unconf) | 2951 | if (unconf) |
2520 | expire_client(unconf); | 2952 | unhash_client_locked(unconf); |
2521 | status = nfserr_jukebox; | ||
2522 | new = create_client(clname, rqstp, &clverifier); | ||
2523 | if (new == NULL) | ||
2524 | goto out; | ||
2525 | if (conf && same_verf(&conf->cl_verifier, &clverifier)) | 2953 | if (conf && same_verf(&conf->cl_verifier, &clverifier)) |
2526 | /* case 1: probable callback update */ | 2954 | /* case 1: probable callback update */ |
2527 | copy_clid(new, conf); | 2955 | copy_clid(new, conf); |
@@ -2533,9 +2961,14 @@ nfsd4_setclientid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, | |||
2533 | setclid->se_clientid.cl_boot = new->cl_clientid.cl_boot; | 2961 | setclid->se_clientid.cl_boot = new->cl_clientid.cl_boot; |
2534 | setclid->se_clientid.cl_id = new->cl_clientid.cl_id; | 2962 | setclid->se_clientid.cl_id = new->cl_clientid.cl_id; |
2535 | memcpy(setclid->se_confirm.data, new->cl_confirm.data, sizeof(setclid->se_confirm.data)); | 2963 | memcpy(setclid->se_confirm.data, new->cl_confirm.data, sizeof(setclid->se_confirm.data)); |
2964 | new = NULL; | ||
2536 | status = nfs_ok; | 2965 | status = nfs_ok; |
2537 | out: | 2966 | out: |
2538 | nfs4_unlock_state(); | 2967 | spin_unlock(&nn->client_lock); |
2968 | if (new) | ||
2969 | free_client(new); | ||
2970 | if (unconf) | ||
2971 | expire_client(unconf); | ||
2539 | return status; | 2972 | return status; |
2540 | } | 2973 | } |
2541 | 2974 | ||
@@ -2546,6 +2979,7 @@ nfsd4_setclientid_confirm(struct svc_rqst *rqstp, | |||
2546 | struct nfsd4_setclientid_confirm *setclientid_confirm) | 2979 | struct nfsd4_setclientid_confirm *setclientid_confirm) |
2547 | { | 2980 | { |
2548 | struct nfs4_client *conf, *unconf; | 2981 | struct nfs4_client *conf, *unconf; |
2982 | struct nfs4_client *old = NULL; | ||
2549 | nfs4_verifier confirm = setclientid_confirm->sc_confirm; | 2983 | nfs4_verifier confirm = setclientid_confirm->sc_confirm; |
2550 | clientid_t * clid = &setclientid_confirm->sc_clientid; | 2984 | clientid_t * clid = &setclientid_confirm->sc_clientid; |
2551 | __be32 status; | 2985 | __be32 status; |
@@ -2553,8 +2987,8 @@ nfsd4_setclientid_confirm(struct svc_rqst *rqstp, | |||
2553 | 2987 | ||
2554 | if (STALE_CLIENTID(clid, nn)) | 2988 | if (STALE_CLIENTID(clid, nn)) |
2555 | return nfserr_stale_clientid; | 2989 | return nfserr_stale_clientid; |
2556 | nfs4_lock_state(); | ||
2557 | 2990 | ||
2991 | spin_lock(&nn->client_lock); | ||
2558 | conf = find_confirmed_client(clid, false, nn); | 2992 | conf = find_confirmed_client(clid, false, nn); |
2559 | unconf = find_unconfirmed_client(clid, false, nn); | 2993 | unconf = find_unconfirmed_client(clid, false, nn); |
2560 | /* | 2994 | /* |
@@ -2578,22 +3012,30 @@ nfsd4_setclientid_confirm(struct svc_rqst *rqstp, | |||
2578 | } | 3012 | } |
2579 | status = nfs_ok; | 3013 | status = nfs_ok; |
2580 | if (conf) { /* case 1: callback update */ | 3014 | if (conf) { /* case 1: callback update */ |
3015 | old = unconf; | ||
3016 | unhash_client_locked(old); | ||
2581 | nfsd4_change_callback(conf, &unconf->cl_cb_conn); | 3017 | nfsd4_change_callback(conf, &unconf->cl_cb_conn); |
2582 | nfsd4_probe_callback(conf); | ||
2583 | expire_client(unconf); | ||
2584 | } else { /* case 3: normal case; new or rebooted client */ | 3018 | } else { /* case 3: normal case; new or rebooted client */ |
2585 | conf = find_confirmed_client_by_name(&unconf->cl_name, nn); | 3019 | old = find_confirmed_client_by_name(&unconf->cl_name, nn); |
2586 | if (conf) { | 3020 | if (old) { |
2587 | status = mark_client_expired(conf); | 3021 | status = mark_client_expired_locked(old); |
2588 | if (status) | 3022 | if (status) { |
3023 | old = NULL; | ||
2589 | goto out; | 3024 | goto out; |
2590 | expire_client(conf); | 3025 | } |
2591 | } | 3026 | } |
2592 | move_to_confirmed(unconf); | 3027 | move_to_confirmed(unconf); |
2593 | nfsd4_probe_callback(unconf); | 3028 | conf = unconf; |
2594 | } | 3029 | } |
3030 | get_client_locked(conf); | ||
3031 | spin_unlock(&nn->client_lock); | ||
3032 | nfsd4_probe_callback(conf); | ||
3033 | spin_lock(&nn->client_lock); | ||
3034 | put_client_renew_locked(conf); | ||
2595 | out: | 3035 | out: |
2596 | nfs4_unlock_state(); | 3036 | spin_unlock(&nn->client_lock); |
3037 | if (old) | ||
3038 | expire_client(old); | ||
2597 | return status; | 3039 | return status; |
2598 | } | 3040 | } |
2599 | 3041 | ||
@@ -2603,21 +3045,23 @@ static struct nfs4_file *nfsd4_alloc_file(void) | |||
2603 | } | 3045 | } |
2604 | 3046 | ||
2605 | /* OPEN Share state helper functions */ | 3047 | /* OPEN Share state helper functions */ |
2606 | static void nfsd4_init_file(struct nfs4_file *fp, struct inode *ino) | 3048 | static void nfsd4_init_file(struct nfs4_file *fp, struct knfsd_fh *fh) |
2607 | { | 3049 | { |
2608 | unsigned int hashval = file_hashval(ino); | 3050 | unsigned int hashval = file_hashval(fh); |
3051 | |||
3052 | lockdep_assert_held(&state_lock); | ||
2609 | 3053 | ||
2610 | atomic_set(&fp->fi_ref, 1); | 3054 | atomic_set(&fp->fi_ref, 1); |
3055 | spin_lock_init(&fp->fi_lock); | ||
2611 | INIT_LIST_HEAD(&fp->fi_stateids); | 3056 | INIT_LIST_HEAD(&fp->fi_stateids); |
2612 | INIT_LIST_HEAD(&fp->fi_delegations); | 3057 | INIT_LIST_HEAD(&fp->fi_delegations); |
2613 | fp->fi_inode = igrab(ino); | 3058 | fh_copy_shallow(&fp->fi_fhandle, fh); |
2614 | fp->fi_had_conflict = false; | 3059 | fp->fi_had_conflict = false; |
2615 | fp->fi_lease = NULL; | 3060 | fp->fi_lease = NULL; |
3061 | fp->fi_share_deny = 0; | ||
2616 | memset(fp->fi_fds, 0, sizeof(fp->fi_fds)); | 3062 | memset(fp->fi_fds, 0, sizeof(fp->fi_fds)); |
2617 | memset(fp->fi_access, 0, sizeof(fp->fi_access)); | 3063 | memset(fp->fi_access, 0, sizeof(fp->fi_access)); |
2618 | spin_lock(&state_lock); | ||
2619 | hlist_add_head(&fp->fi_hash, &file_hashtbl[hashval]); | 3064 | hlist_add_head(&fp->fi_hash, &file_hashtbl[hashval]); |
2620 | spin_unlock(&state_lock); | ||
2621 | } | 3065 | } |
2622 | 3066 | ||
2623 | void | 3067 | void |
@@ -2673,6 +3117,28 @@ static void init_nfs4_replay(struct nfs4_replay *rp) | |||
2673 | rp->rp_status = nfserr_serverfault; | 3117 | rp->rp_status = nfserr_serverfault; |
2674 | rp->rp_buflen = 0; | 3118 | rp->rp_buflen = 0; |
2675 | rp->rp_buf = rp->rp_ibuf; | 3119 | rp->rp_buf = rp->rp_ibuf; |
3120 | mutex_init(&rp->rp_mutex); | ||
3121 | } | ||
3122 | |||
3123 | static void nfsd4_cstate_assign_replay(struct nfsd4_compound_state *cstate, | ||
3124 | struct nfs4_stateowner *so) | ||
3125 | { | ||
3126 | if (!nfsd4_has_session(cstate)) { | ||
3127 | mutex_lock(&so->so_replay.rp_mutex); | ||
3128 | cstate->replay_owner = so; | ||
3129 | atomic_inc(&so->so_count); | ||
3130 | } | ||
3131 | } | ||
3132 | |||
3133 | void nfsd4_cstate_clear_replay(struct nfsd4_compound_state *cstate) | ||
3134 | { | ||
3135 | struct nfs4_stateowner *so = cstate->replay_owner; | ||
3136 | |||
3137 | if (so != NULL) { | ||
3138 | cstate->replay_owner = NULL; | ||
3139 | mutex_unlock(&so->so_replay.rp_mutex); | ||
3140 | nfs4_put_stateowner(so); | ||
3141 | } | ||
2676 | } | 3142 | } |
2677 | 3143 | ||
2678 | static inline void *alloc_stateowner(struct kmem_cache *slab, struct xdr_netobj *owner, struct nfs4_client *clp) | 3144 | static inline void *alloc_stateowner(struct kmem_cache *slab, struct xdr_netobj *owner, struct nfs4_client *clp) |
@@ -2693,111 +3159,172 @@ static inline void *alloc_stateowner(struct kmem_cache *slab, struct xdr_netobj | |||
2693 | INIT_LIST_HEAD(&sop->so_stateids); | 3159 | INIT_LIST_HEAD(&sop->so_stateids); |
2694 | sop->so_client = clp; | 3160 | sop->so_client = clp; |
2695 | init_nfs4_replay(&sop->so_replay); | 3161 | init_nfs4_replay(&sop->so_replay); |
3162 | atomic_set(&sop->so_count, 1); | ||
2696 | return sop; | 3163 | return sop; |
2697 | } | 3164 | } |
2698 | 3165 | ||
2699 | static void hash_openowner(struct nfs4_openowner *oo, struct nfs4_client *clp, unsigned int strhashval) | 3166 | static void hash_openowner(struct nfs4_openowner *oo, struct nfs4_client *clp, unsigned int strhashval) |
2700 | { | 3167 | { |
2701 | struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); | 3168 | lockdep_assert_held(&clp->cl_lock); |
2702 | 3169 | ||
2703 | list_add(&oo->oo_owner.so_strhash, &nn->ownerstr_hashtbl[strhashval]); | 3170 | list_add(&oo->oo_owner.so_strhash, |
3171 | &clp->cl_ownerstr_hashtbl[strhashval]); | ||
2704 | list_add(&oo->oo_perclient, &clp->cl_openowners); | 3172 | list_add(&oo->oo_perclient, &clp->cl_openowners); |
2705 | } | 3173 | } |
2706 | 3174 | ||
3175 | static void nfs4_unhash_openowner(struct nfs4_stateowner *so) | ||
3176 | { | ||
3177 | unhash_openowner_locked(openowner(so)); | ||
3178 | } | ||
3179 | |||
3180 | static void nfs4_free_openowner(struct nfs4_stateowner *so) | ||
3181 | { | ||
3182 | struct nfs4_openowner *oo = openowner(so); | ||
3183 | |||
3184 | kmem_cache_free(openowner_slab, oo); | ||
3185 | } | ||
3186 | |||
3187 | static const struct nfs4_stateowner_operations openowner_ops = { | ||
3188 | .so_unhash = nfs4_unhash_openowner, | ||
3189 | .so_free = nfs4_free_openowner, | ||
3190 | }; | ||
3191 | |||
2707 | static struct nfs4_openowner * | 3192 | static struct nfs4_openowner * |
2708 | alloc_init_open_stateowner(unsigned int strhashval, struct nfs4_client *clp, struct nfsd4_open *open) { | 3193 | alloc_init_open_stateowner(unsigned int strhashval, struct nfsd4_open *open, |
2709 | struct nfs4_openowner *oo; | 3194 | struct nfsd4_compound_state *cstate) |
3195 | { | ||
3196 | struct nfs4_client *clp = cstate->clp; | ||
3197 | struct nfs4_openowner *oo, *ret; | ||
2710 | 3198 | ||
2711 | oo = alloc_stateowner(openowner_slab, &open->op_owner, clp); | 3199 | oo = alloc_stateowner(openowner_slab, &open->op_owner, clp); |
2712 | if (!oo) | 3200 | if (!oo) |
2713 | return NULL; | 3201 | return NULL; |
3202 | oo->oo_owner.so_ops = &openowner_ops; | ||
2714 | oo->oo_owner.so_is_open_owner = 1; | 3203 | oo->oo_owner.so_is_open_owner = 1; |
2715 | oo->oo_owner.so_seqid = open->op_seqid; | 3204 | oo->oo_owner.so_seqid = open->op_seqid; |
2716 | oo->oo_flags = NFS4_OO_NEW; | 3205 | oo->oo_flags = 0; |
3206 | if (nfsd4_has_session(cstate)) | ||
3207 | oo->oo_flags |= NFS4_OO_CONFIRMED; | ||
2717 | oo->oo_time = 0; | 3208 | oo->oo_time = 0; |
2718 | oo->oo_last_closed_stid = NULL; | 3209 | oo->oo_last_closed_stid = NULL; |
2719 | INIT_LIST_HEAD(&oo->oo_close_lru); | 3210 | INIT_LIST_HEAD(&oo->oo_close_lru); |
2720 | hash_openowner(oo, clp, strhashval); | 3211 | spin_lock(&clp->cl_lock); |
3212 | ret = find_openstateowner_str_locked(strhashval, open, clp); | ||
3213 | if (ret == NULL) { | ||
3214 | hash_openowner(oo, clp, strhashval); | ||
3215 | ret = oo; | ||
3216 | } else | ||
3217 | nfs4_free_openowner(&oo->oo_owner); | ||
3218 | spin_unlock(&clp->cl_lock); | ||
2721 | return oo; | 3219 | return oo; |
2722 | } | 3220 | } |
2723 | 3221 | ||
2724 | static void init_open_stateid(struct nfs4_ol_stateid *stp, struct nfs4_file *fp, struct nfsd4_open *open) { | 3222 | static void init_open_stateid(struct nfs4_ol_stateid *stp, struct nfs4_file *fp, struct nfsd4_open *open) { |
2725 | struct nfs4_openowner *oo = open->op_openowner; | 3223 | struct nfs4_openowner *oo = open->op_openowner; |
2726 | 3224 | ||
3225 | atomic_inc(&stp->st_stid.sc_count); | ||
2727 | stp->st_stid.sc_type = NFS4_OPEN_STID; | 3226 | stp->st_stid.sc_type = NFS4_OPEN_STID; |
2728 | INIT_LIST_HEAD(&stp->st_lockowners); | 3227 | INIT_LIST_HEAD(&stp->st_locks); |
2729 | list_add(&stp->st_perstateowner, &oo->oo_owner.so_stateids); | ||
2730 | list_add(&stp->st_perfile, &fp->fi_stateids); | ||
2731 | stp->st_stateowner = &oo->oo_owner; | 3228 | stp->st_stateowner = &oo->oo_owner; |
3229 | atomic_inc(&stp->st_stateowner->so_count); | ||
2732 | get_nfs4_file(fp); | 3230 | get_nfs4_file(fp); |
2733 | stp->st_file = fp; | 3231 | stp->st_stid.sc_file = fp; |
2734 | stp->st_access_bmap = 0; | 3232 | stp->st_access_bmap = 0; |
2735 | stp->st_deny_bmap = 0; | 3233 | stp->st_deny_bmap = 0; |
2736 | set_access(open->op_share_access, stp); | ||
2737 | set_deny(open->op_share_deny, stp); | ||
2738 | stp->st_openstp = NULL; | 3234 | stp->st_openstp = NULL; |
3235 | spin_lock(&oo->oo_owner.so_client->cl_lock); | ||
3236 | list_add(&stp->st_perstateowner, &oo->oo_owner.so_stateids); | ||
3237 | spin_lock(&fp->fi_lock); | ||
3238 | list_add(&stp->st_perfile, &fp->fi_stateids); | ||
3239 | spin_unlock(&fp->fi_lock); | ||
3240 | spin_unlock(&oo->oo_owner.so_client->cl_lock); | ||
2739 | } | 3241 | } |
2740 | 3242 | ||
3243 | /* | ||
3244 | * In the 4.0 case we need to keep the owners around a little while to handle | ||
3245 | * CLOSE replay. We still do need to release any file access that is held by | ||
3246 | * them before returning however. | ||
3247 | */ | ||
2741 | static void | 3248 | static void |
2742 | move_to_close_lru(struct nfs4_openowner *oo, struct net *net) | 3249 | move_to_close_lru(struct nfs4_ol_stateid *s, struct net *net) |
2743 | { | 3250 | { |
2744 | struct nfsd_net *nn = net_generic(net, nfsd_net_id); | 3251 | struct nfs4_ol_stateid *last; |
3252 | struct nfs4_openowner *oo = openowner(s->st_stateowner); | ||
3253 | struct nfsd_net *nn = net_generic(s->st_stid.sc_client->net, | ||
3254 | nfsd_net_id); | ||
2745 | 3255 | ||
2746 | dprintk("NFSD: move_to_close_lru nfs4_openowner %p\n", oo); | 3256 | dprintk("NFSD: move_to_close_lru nfs4_openowner %p\n", oo); |
2747 | 3257 | ||
3258 | /* | ||
3259 | * We know that we hold one reference via nfsd4_close, and another | ||
3260 | * "persistent" reference for the client. If the refcount is higher | ||
3261 | * than 2, then there are still calls in progress that are using this | ||
3262 | * stateid. We can't put the sc_file reference until they are finished. | ||
3263 | * Wait for the refcount to drop to 2. Since it has been unhashed, | ||
3264 | * there should be no danger of the refcount going back up again at | ||
3265 | * this point. | ||
3266 | */ | ||
3267 | wait_event(close_wq, atomic_read(&s->st_stid.sc_count) == 2); | ||
3268 | |||
3269 | release_all_access(s); | ||
3270 | if (s->st_stid.sc_file) { | ||
3271 | put_nfs4_file(s->st_stid.sc_file); | ||
3272 | s->st_stid.sc_file = NULL; | ||
3273 | } | ||
3274 | |||
3275 | spin_lock(&nn->client_lock); | ||
3276 | last = oo->oo_last_closed_stid; | ||
3277 | oo->oo_last_closed_stid = s; | ||
2748 | list_move_tail(&oo->oo_close_lru, &nn->close_lru); | 3278 | list_move_tail(&oo->oo_close_lru, &nn->close_lru); |
2749 | oo->oo_time = get_seconds(); | 3279 | oo->oo_time = get_seconds(); |
3280 | spin_unlock(&nn->client_lock); | ||
3281 | if (last) | ||
3282 | nfs4_put_stid(&last->st_stid); | ||
2750 | } | 3283 | } |
2751 | 3284 | ||
2752 | static int | 3285 | /* search file_hashtbl[] for file */ |
2753 | same_owner_str(struct nfs4_stateowner *sop, struct xdr_netobj *owner, | 3286 | static struct nfs4_file * |
2754 | clientid_t *clid) | 3287 | find_file_locked(struct knfsd_fh *fh) |
2755 | { | 3288 | { |
2756 | return (sop->so_owner.len == owner->len) && | 3289 | unsigned int hashval = file_hashval(fh); |
2757 | 0 == memcmp(sop->so_owner.data, owner->data, owner->len) && | 3290 | struct nfs4_file *fp; |
2758 | (sop->so_client->cl_clientid.cl_id == clid->cl_id); | ||
2759 | } | ||
2760 | 3291 | ||
2761 | static struct nfs4_openowner * | 3292 | lockdep_assert_held(&state_lock); |
2762 | find_openstateowner_str(unsigned int hashval, struct nfsd4_open *open, | ||
2763 | bool sessions, struct nfsd_net *nn) | ||
2764 | { | ||
2765 | struct nfs4_stateowner *so; | ||
2766 | struct nfs4_openowner *oo; | ||
2767 | struct nfs4_client *clp; | ||
2768 | 3293 | ||
2769 | list_for_each_entry(so, &nn->ownerstr_hashtbl[hashval], so_strhash) { | 3294 | hlist_for_each_entry(fp, &file_hashtbl[hashval], fi_hash) { |
2770 | if (!so->so_is_open_owner) | 3295 | if (nfsd_fh_match(&fp->fi_fhandle, fh)) { |
2771 | continue; | 3296 | get_nfs4_file(fp); |
2772 | if (same_owner_str(so, &open->op_owner, &open->op_clientid)) { | 3297 | return fp; |
2773 | oo = openowner(so); | ||
2774 | clp = oo->oo_owner.so_client; | ||
2775 | if ((bool)clp->cl_minorversion != sessions) | ||
2776 | return NULL; | ||
2777 | renew_client(oo->oo_owner.so_client); | ||
2778 | return oo; | ||
2779 | } | 3298 | } |
2780 | } | 3299 | } |
2781 | return NULL; | 3300 | return NULL; |
2782 | } | 3301 | } |
2783 | 3302 | ||
2784 | /* search file_hashtbl[] for file */ | ||
2785 | static struct nfs4_file * | 3303 | static struct nfs4_file * |
2786 | find_file(struct inode *ino) | 3304 | find_file(struct knfsd_fh *fh) |
2787 | { | 3305 | { |
2788 | unsigned int hashval = file_hashval(ino); | ||
2789 | struct nfs4_file *fp; | 3306 | struct nfs4_file *fp; |
2790 | 3307 | ||
2791 | spin_lock(&state_lock); | 3308 | spin_lock(&state_lock); |
2792 | hlist_for_each_entry(fp, &file_hashtbl[hashval], fi_hash) { | 3309 | fp = find_file_locked(fh); |
2793 | if (fp->fi_inode == ino) { | 3310 | spin_unlock(&state_lock); |
2794 | get_nfs4_file(fp); | 3311 | return fp; |
2795 | spin_unlock(&state_lock); | 3312 | } |
2796 | return fp; | 3313 | |
2797 | } | 3314 | static struct nfs4_file * |
3315 | find_or_add_file(struct nfs4_file *new, struct knfsd_fh *fh) | ||
3316 | { | ||
3317 | struct nfs4_file *fp; | ||
3318 | |||
3319 | spin_lock(&state_lock); | ||
3320 | fp = find_file_locked(fh); | ||
3321 | if (fp == NULL) { | ||
3322 | nfsd4_init_file(new, fh); | ||
3323 | fp = new; | ||
2798 | } | 3324 | } |
2799 | spin_unlock(&state_lock); | 3325 | spin_unlock(&state_lock); |
2800 | return NULL; | 3326 | |
3327 | return fp; | ||
2801 | } | 3328 | } |
2802 | 3329 | ||
2803 | /* | 3330 | /* |
@@ -2807,47 +3334,53 @@ find_file(struct inode *ino) | |||
2807 | static __be32 | 3334 | static __be32 |
2808 | nfs4_share_conflict(struct svc_fh *current_fh, unsigned int deny_type) | 3335 | nfs4_share_conflict(struct svc_fh *current_fh, unsigned int deny_type) |
2809 | { | 3336 | { |
2810 | struct inode *ino = current_fh->fh_dentry->d_inode; | ||
2811 | struct nfs4_file *fp; | 3337 | struct nfs4_file *fp; |
2812 | struct nfs4_ol_stateid *stp; | 3338 | __be32 ret = nfs_ok; |
2813 | __be32 ret; | ||
2814 | 3339 | ||
2815 | fp = find_file(ino); | 3340 | fp = find_file(¤t_fh->fh_handle); |
2816 | if (!fp) | 3341 | if (!fp) |
2817 | return nfs_ok; | 3342 | return ret; |
2818 | ret = nfserr_locked; | 3343 | /* Check for conflicting share reservations */ |
2819 | /* Search for conflicting share reservations */ | 3344 | spin_lock(&fp->fi_lock); |
2820 | list_for_each_entry(stp, &fp->fi_stateids, st_perfile) { | 3345 | if (fp->fi_share_deny & deny_type) |
2821 | if (test_deny(deny_type, stp) || | 3346 | ret = nfserr_locked; |
2822 | test_deny(NFS4_SHARE_DENY_BOTH, stp)) | 3347 | spin_unlock(&fp->fi_lock); |
2823 | goto out; | ||
2824 | } | ||
2825 | ret = nfs_ok; | ||
2826 | out: | ||
2827 | put_nfs4_file(fp); | 3348 | put_nfs4_file(fp); |
2828 | return ret; | 3349 | return ret; |
2829 | } | 3350 | } |
2830 | 3351 | ||
2831 | static void nfsd_break_one_deleg(struct nfs4_delegation *dp) | 3352 | void nfsd4_prepare_cb_recall(struct nfs4_delegation *dp) |
2832 | { | 3353 | { |
2833 | struct nfs4_client *clp = dp->dl_stid.sc_client; | 3354 | struct nfsd_net *nn = net_generic(dp->dl_stid.sc_client->net, |
2834 | struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); | 3355 | nfsd_net_id); |
2835 | 3356 | ||
2836 | lockdep_assert_held(&state_lock); | 3357 | block_delegations(&dp->dl_stid.sc_file->fi_fhandle); |
2837 | /* We're assuming the state code never drops its reference | 3358 | |
3359 | /* | ||
3360 | * We can't do this in nfsd_break_deleg_cb because it is | ||
3361 | * already holding inode->i_lock. | ||
3362 | * | ||
3363 | * If the dl_time != 0, then we know that it has already been | ||
3364 | * queued for a lease break. Don't queue it again. | ||
3365 | */ | ||
3366 | spin_lock(&state_lock); | ||
3367 | if (dp->dl_time == 0) { | ||
3368 | dp->dl_time = get_seconds(); | ||
3369 | list_add_tail(&dp->dl_recall_lru, &nn->del_recall_lru); | ||
3370 | } | ||
3371 | spin_unlock(&state_lock); | ||
3372 | } | ||
3373 | |||
3374 | static void nfsd_break_one_deleg(struct nfs4_delegation *dp) | ||
3375 | { | ||
3376 | /* | ||
3377 | * We're assuming the state code never drops its reference | ||
2838 | * without first removing the lease. Since we're in this lease | 3378 | * without first removing the lease. Since we're in this lease |
2839 | * callback (and since the lease code is serialized by the kernel | 3379 | * callback (and since the lease code is serialized by the kernel |
2840 | * lock) we know the server hasn't removed the lease yet, we know | 3380 | * lock) we know the server hasn't removed the lease yet, we know |
2841 | * it's safe to take a reference: */ | 3381 | * it's safe to take a reference. |
2842 | atomic_inc(&dp->dl_count); | 3382 | */ |
2843 | 3383 | atomic_inc(&dp->dl_stid.sc_count); | |
2844 | list_add_tail(&dp->dl_recall_lru, &nn->del_recall_lru); | ||
2845 | |||
2846 | /* Only place dl_time is set; protected by i_lock: */ | ||
2847 | dp->dl_time = get_seconds(); | ||
2848 | |||
2849 | block_delegations(&dp->dl_fh); | ||
2850 | |||
2851 | nfsd4_cb_recall(dp); | 3384 | nfsd4_cb_recall(dp); |
2852 | } | 3385 | } |
2853 | 3386 | ||
@@ -2872,11 +3405,20 @@ static void nfsd_break_deleg_cb(struct file_lock *fl) | |||
2872 | */ | 3405 | */ |
2873 | fl->fl_break_time = 0; | 3406 | fl->fl_break_time = 0; |
2874 | 3407 | ||
2875 | spin_lock(&state_lock); | 3408 | spin_lock(&fp->fi_lock); |
2876 | fp->fi_had_conflict = true; | 3409 | fp->fi_had_conflict = true; |
2877 | list_for_each_entry(dp, &fp->fi_delegations, dl_perfile) | 3410 | /* |
2878 | nfsd_break_one_deleg(dp); | 3411 | * If there are no delegations on the list, then we can't count on this |
2879 | spin_unlock(&state_lock); | 3412 | * lease ever being cleaned up. Set the fl_break_time to jiffies so that |
3413 | * time_out_leases will do it ASAP. The fact that fi_had_conflict is now | ||
3414 | * true should keep any new delegations from being hashed. | ||
3415 | */ | ||
3416 | if (list_empty(&fp->fi_delegations)) | ||
3417 | fl->fl_break_time = jiffies; | ||
3418 | else | ||
3419 | list_for_each_entry(dp, &fp->fi_delegations, dl_perfile) | ||
3420 | nfsd_break_one_deleg(dp); | ||
3421 | spin_unlock(&fp->fi_lock); | ||
2880 | } | 3422 | } |
2881 | 3423 | ||
2882 | static | 3424 | static |
@@ -2904,6 +3446,42 @@ static __be32 nfsd4_check_seqid(struct nfsd4_compound_state *cstate, struct nfs4 | |||
2904 | return nfserr_bad_seqid; | 3446 | return nfserr_bad_seqid; |
2905 | } | 3447 | } |
2906 | 3448 | ||
3449 | static __be32 lookup_clientid(clientid_t *clid, | ||
3450 | struct nfsd4_compound_state *cstate, | ||
3451 | struct nfsd_net *nn) | ||
3452 | { | ||
3453 | struct nfs4_client *found; | ||
3454 | |||
3455 | if (cstate->clp) { | ||
3456 | found = cstate->clp; | ||
3457 | if (!same_clid(&found->cl_clientid, clid)) | ||
3458 | return nfserr_stale_clientid; | ||
3459 | return nfs_ok; | ||
3460 | } | ||
3461 | |||
3462 | if (STALE_CLIENTID(clid, nn)) | ||
3463 | return nfserr_stale_clientid; | ||
3464 | |||
3465 | /* | ||
3466 | * For v4.1+ we get the client in the SEQUENCE op. If we don't have one | ||
3467 | * cached already then we know this is for is for v4.0 and "sessions" | ||
3468 | * will be false. | ||
3469 | */ | ||
3470 | WARN_ON_ONCE(cstate->session); | ||
3471 | spin_lock(&nn->client_lock); | ||
3472 | found = find_confirmed_client(clid, false, nn); | ||
3473 | if (!found) { | ||
3474 | spin_unlock(&nn->client_lock); | ||
3475 | return nfserr_expired; | ||
3476 | } | ||
3477 | atomic_inc(&found->cl_refcount); | ||
3478 | spin_unlock(&nn->client_lock); | ||
3479 | |||
3480 | /* Cache the nfs4_client in cstate! */ | ||
3481 | cstate->clp = found; | ||
3482 | return nfs_ok; | ||
3483 | } | ||
3484 | |||
2907 | __be32 | 3485 | __be32 |
2908 | nfsd4_process_open1(struct nfsd4_compound_state *cstate, | 3486 | nfsd4_process_open1(struct nfsd4_compound_state *cstate, |
2909 | struct nfsd4_open *open, struct nfsd_net *nn) | 3487 | struct nfsd4_open *open, struct nfsd_net *nn) |
@@ -2924,19 +3502,19 @@ nfsd4_process_open1(struct nfsd4_compound_state *cstate, | |||
2924 | if (open->op_file == NULL) | 3502 | if (open->op_file == NULL) |
2925 | return nfserr_jukebox; | 3503 | return nfserr_jukebox; |
2926 | 3504 | ||
2927 | strhashval = ownerstr_hashval(clientid->cl_id, &open->op_owner); | 3505 | status = lookup_clientid(clientid, cstate, nn); |
2928 | oo = find_openstateowner_str(strhashval, open, cstate->minorversion, nn); | 3506 | if (status) |
3507 | return status; | ||
3508 | clp = cstate->clp; | ||
3509 | |||
3510 | strhashval = ownerstr_hashval(&open->op_owner); | ||
3511 | oo = find_openstateowner_str(strhashval, open, clp); | ||
2929 | open->op_openowner = oo; | 3512 | open->op_openowner = oo; |
2930 | if (!oo) { | 3513 | if (!oo) { |
2931 | clp = find_confirmed_client(clientid, cstate->minorversion, | ||
2932 | nn); | ||
2933 | if (clp == NULL) | ||
2934 | return nfserr_expired; | ||
2935 | goto new_owner; | 3514 | goto new_owner; |
2936 | } | 3515 | } |
2937 | if (!(oo->oo_flags & NFS4_OO_CONFIRMED)) { | 3516 | if (!(oo->oo_flags & NFS4_OO_CONFIRMED)) { |
2938 | /* Replace unconfirmed owners without checking for replay. */ | 3517 | /* Replace unconfirmed owners without checking for replay. */ |
2939 | clp = oo->oo_owner.so_client; | ||
2940 | release_openowner(oo); | 3518 | release_openowner(oo); |
2941 | open->op_openowner = NULL; | 3519 | open->op_openowner = NULL; |
2942 | goto new_owner; | 3520 | goto new_owner; |
@@ -2944,15 +3522,14 @@ nfsd4_process_open1(struct nfsd4_compound_state *cstate, | |||
2944 | status = nfsd4_check_seqid(cstate, &oo->oo_owner, open->op_seqid); | 3522 | status = nfsd4_check_seqid(cstate, &oo->oo_owner, open->op_seqid); |
2945 | if (status) | 3523 | if (status) |
2946 | return status; | 3524 | return status; |
2947 | clp = oo->oo_owner.so_client; | ||
2948 | goto alloc_stateid; | 3525 | goto alloc_stateid; |
2949 | new_owner: | 3526 | new_owner: |
2950 | oo = alloc_init_open_stateowner(strhashval, clp, open); | 3527 | oo = alloc_init_open_stateowner(strhashval, open, cstate); |
2951 | if (oo == NULL) | 3528 | if (oo == NULL) |
2952 | return nfserr_jukebox; | 3529 | return nfserr_jukebox; |
2953 | open->op_openowner = oo; | 3530 | open->op_openowner = oo; |
2954 | alloc_stateid: | 3531 | alloc_stateid: |
2955 | open->op_stp = nfs4_alloc_stateid(clp); | 3532 | open->op_stp = nfs4_alloc_open_stateid(clp); |
2956 | if (!open->op_stp) | 3533 | if (!open->op_stp) |
2957 | return nfserr_jukebox; | 3534 | return nfserr_jukebox; |
2958 | return nfs_ok; | 3535 | return nfs_ok; |
@@ -2994,14 +3571,18 @@ nfs4_check_deleg(struct nfs4_client *cl, struct nfsd4_open *open, | |||
2994 | { | 3571 | { |
2995 | int flags; | 3572 | int flags; |
2996 | __be32 status = nfserr_bad_stateid; | 3573 | __be32 status = nfserr_bad_stateid; |
3574 | struct nfs4_delegation *deleg; | ||
2997 | 3575 | ||
2998 | *dp = find_deleg_stateid(cl, &open->op_delegate_stateid); | 3576 | deleg = find_deleg_stateid(cl, &open->op_delegate_stateid); |
2999 | if (*dp == NULL) | 3577 | if (deleg == NULL) |
3000 | goto out; | 3578 | goto out; |
3001 | flags = share_access_to_flags(open->op_share_access); | 3579 | flags = share_access_to_flags(open->op_share_access); |
3002 | status = nfs4_check_delegmode(*dp, flags); | 3580 | status = nfs4_check_delegmode(deleg, flags); |
3003 | if (status) | 3581 | if (status) { |
3004 | *dp = NULL; | 3582 | nfs4_put_stid(&deleg->dl_stid); |
3583 | goto out; | ||
3584 | } | ||
3585 | *dp = deleg; | ||
3005 | out: | 3586 | out: |
3006 | if (!nfsd4_is_deleg_cur(open)) | 3587 | if (!nfsd4_is_deleg_cur(open)) |
3007 | return nfs_ok; | 3588 | return nfs_ok; |
@@ -3011,24 +3592,25 @@ out: | |||
3011 | return nfs_ok; | 3592 | return nfs_ok; |
3012 | } | 3593 | } |
3013 | 3594 | ||
3014 | static __be32 | 3595 | static struct nfs4_ol_stateid * |
3015 | nfs4_check_open(struct nfs4_file *fp, struct nfsd4_open *open, struct nfs4_ol_stateid **stpp) | 3596 | nfsd4_find_existing_open(struct nfs4_file *fp, struct nfsd4_open *open) |
3016 | { | 3597 | { |
3017 | struct nfs4_ol_stateid *local; | 3598 | struct nfs4_ol_stateid *local, *ret = NULL; |
3018 | struct nfs4_openowner *oo = open->op_openowner; | 3599 | struct nfs4_openowner *oo = open->op_openowner; |
3019 | 3600 | ||
3601 | spin_lock(&fp->fi_lock); | ||
3020 | list_for_each_entry(local, &fp->fi_stateids, st_perfile) { | 3602 | list_for_each_entry(local, &fp->fi_stateids, st_perfile) { |
3021 | /* ignore lock owners */ | 3603 | /* ignore lock owners */ |
3022 | if (local->st_stateowner->so_is_open_owner == 0) | 3604 | if (local->st_stateowner->so_is_open_owner == 0) |
3023 | continue; | 3605 | continue; |
3024 | /* remember if we have seen this open owner */ | 3606 | if (local->st_stateowner == &oo->oo_owner) { |
3025 | if (local->st_stateowner == &oo->oo_owner) | 3607 | ret = local; |
3026 | *stpp = local; | 3608 | atomic_inc(&ret->st_stid.sc_count); |
3027 | /* check for conflicting share reservations */ | 3609 | break; |
3028 | if (!test_share(local, open)) | 3610 | } |
3029 | return nfserr_share_denied; | ||
3030 | } | 3611 | } |
3031 | return nfs_ok; | 3612 | spin_unlock(&fp->fi_lock); |
3613 | return ret; | ||
3032 | } | 3614 | } |
3033 | 3615 | ||
3034 | static inline int nfs4_access_to_access(u32 nfs4_access) | 3616 | static inline int nfs4_access_to_access(u32 nfs4_access) |
@@ -3042,24 +3624,6 @@ static inline int nfs4_access_to_access(u32 nfs4_access) | |||
3042 | return flags; | 3624 | return flags; |
3043 | } | 3625 | } |
3044 | 3626 | ||
3045 | static __be32 nfs4_get_vfs_file(struct svc_rqst *rqstp, struct nfs4_file *fp, | ||
3046 | struct svc_fh *cur_fh, struct nfsd4_open *open) | ||
3047 | { | ||
3048 | __be32 status; | ||
3049 | int oflag = nfs4_access_to_omode(open->op_share_access); | ||
3050 | int access = nfs4_access_to_access(open->op_share_access); | ||
3051 | |||
3052 | if (!fp->fi_fds[oflag]) { | ||
3053 | status = nfsd_open(rqstp, cur_fh, S_IFREG, access, | ||
3054 | &fp->fi_fds[oflag]); | ||
3055 | if (status) | ||
3056 | return status; | ||
3057 | } | ||
3058 | nfs4_file_get_access(fp, oflag); | ||
3059 | |||
3060 | return nfs_ok; | ||
3061 | } | ||
3062 | |||
3063 | static inline __be32 | 3627 | static inline __be32 |
3064 | nfsd4_truncate(struct svc_rqst *rqstp, struct svc_fh *fh, | 3628 | nfsd4_truncate(struct svc_rqst *rqstp, struct svc_fh *fh, |
3065 | struct nfsd4_open *open) | 3629 | struct nfsd4_open *open) |
@@ -3075,34 +3639,99 @@ nfsd4_truncate(struct svc_rqst *rqstp, struct svc_fh *fh, | |||
3075 | return nfsd_setattr(rqstp, fh, &iattr, 0, (time_t)0); | 3639 | return nfsd_setattr(rqstp, fh, &iattr, 0, (time_t)0); |
3076 | } | 3640 | } |
3077 | 3641 | ||
3078 | static __be32 | 3642 | static __be32 nfs4_get_vfs_file(struct svc_rqst *rqstp, struct nfs4_file *fp, |
3079 | nfs4_upgrade_open(struct svc_rqst *rqstp, struct nfs4_file *fp, struct svc_fh *cur_fh, struct nfs4_ol_stateid *stp, struct nfsd4_open *open) | 3643 | struct svc_fh *cur_fh, struct nfs4_ol_stateid *stp, |
3644 | struct nfsd4_open *open) | ||
3080 | { | 3645 | { |
3081 | u32 op_share_access = open->op_share_access; | 3646 | struct file *filp = NULL; |
3082 | bool new_access; | ||
3083 | __be32 status; | 3647 | __be32 status; |
3648 | int oflag = nfs4_access_to_omode(open->op_share_access); | ||
3649 | int access = nfs4_access_to_access(open->op_share_access); | ||
3650 | unsigned char old_access_bmap, old_deny_bmap; | ||
3084 | 3651 | ||
3085 | new_access = !test_access(op_share_access, stp); | 3652 | spin_lock(&fp->fi_lock); |
3086 | if (new_access) { | 3653 | |
3087 | status = nfs4_get_vfs_file(rqstp, fp, cur_fh, open); | 3654 | /* |
3088 | if (status) | 3655 | * Are we trying to set a deny mode that would conflict with |
3089 | return status; | 3656 | * current access? |
3657 | */ | ||
3658 | status = nfs4_file_check_deny(fp, open->op_share_deny); | ||
3659 | if (status != nfs_ok) { | ||
3660 | spin_unlock(&fp->fi_lock); | ||
3661 | goto out; | ||
3090 | } | 3662 | } |
3091 | status = nfsd4_truncate(rqstp, cur_fh, open); | 3663 | |
3092 | if (status) { | 3664 | /* set access to the file */ |
3093 | if (new_access) { | 3665 | status = nfs4_file_get_access(fp, open->op_share_access); |
3094 | int oflag = nfs4_access_to_omode(op_share_access); | 3666 | if (status != nfs_ok) { |
3095 | nfs4_file_put_access(fp, oflag); | 3667 | spin_unlock(&fp->fi_lock); |
3096 | } | 3668 | goto out; |
3097 | return status; | ||
3098 | } | 3669 | } |
3099 | /* remember the open */ | 3670 | |
3100 | set_access(op_share_access, stp); | 3671 | /* Set access bits in stateid */ |
3672 | old_access_bmap = stp->st_access_bmap; | ||
3673 | set_access(open->op_share_access, stp); | ||
3674 | |||
3675 | /* Set new deny mask */ | ||
3676 | old_deny_bmap = stp->st_deny_bmap; | ||
3101 | set_deny(open->op_share_deny, stp); | 3677 | set_deny(open->op_share_deny, stp); |
3678 | fp->fi_share_deny |= (open->op_share_deny & NFS4_SHARE_DENY_BOTH); | ||
3102 | 3679 | ||
3103 | return nfs_ok; | 3680 | if (!fp->fi_fds[oflag]) { |
3681 | spin_unlock(&fp->fi_lock); | ||
3682 | status = nfsd_open(rqstp, cur_fh, S_IFREG, access, &filp); | ||
3683 | if (status) | ||
3684 | goto out_put_access; | ||
3685 | spin_lock(&fp->fi_lock); | ||
3686 | if (!fp->fi_fds[oflag]) { | ||
3687 | fp->fi_fds[oflag] = filp; | ||
3688 | filp = NULL; | ||
3689 | } | ||
3690 | } | ||
3691 | spin_unlock(&fp->fi_lock); | ||
3692 | if (filp) | ||
3693 | fput(filp); | ||
3694 | |||
3695 | status = nfsd4_truncate(rqstp, cur_fh, open); | ||
3696 | if (status) | ||
3697 | goto out_put_access; | ||
3698 | out: | ||
3699 | return status; | ||
3700 | out_put_access: | ||
3701 | stp->st_access_bmap = old_access_bmap; | ||
3702 | nfs4_file_put_access(fp, open->op_share_access); | ||
3703 | reset_union_bmap_deny(bmap_to_share_mode(old_deny_bmap), stp); | ||
3704 | goto out; | ||
3104 | } | 3705 | } |
3105 | 3706 | ||
3707 | static __be32 | ||
3708 | nfs4_upgrade_open(struct svc_rqst *rqstp, struct nfs4_file *fp, struct svc_fh *cur_fh, struct nfs4_ol_stateid *stp, struct nfsd4_open *open) | ||
3709 | { | ||
3710 | __be32 status; | ||
3711 | unsigned char old_deny_bmap; | ||
3712 | |||
3713 | if (!test_access(open->op_share_access, stp)) | ||
3714 | return nfs4_get_vfs_file(rqstp, fp, cur_fh, stp, open); | ||
3715 | |||
3716 | /* test and set deny mode */ | ||
3717 | spin_lock(&fp->fi_lock); | ||
3718 | status = nfs4_file_check_deny(fp, open->op_share_deny); | ||
3719 | if (status == nfs_ok) { | ||
3720 | old_deny_bmap = stp->st_deny_bmap; | ||
3721 | set_deny(open->op_share_deny, stp); | ||
3722 | fp->fi_share_deny |= | ||
3723 | (open->op_share_deny & NFS4_SHARE_DENY_BOTH); | ||
3724 | } | ||
3725 | spin_unlock(&fp->fi_lock); | ||
3726 | |||
3727 | if (status != nfs_ok) | ||
3728 | return status; | ||
3729 | |||
3730 | status = nfsd4_truncate(rqstp, cur_fh, open); | ||
3731 | if (status != nfs_ok) | ||
3732 | reset_union_bmap_deny(old_deny_bmap, stp); | ||
3733 | return status; | ||
3734 | } | ||
3106 | 3735 | ||
3107 | static void | 3736 | static void |
3108 | nfs4_set_claim_prev(struct nfsd4_open *open, bool has_session) | 3737 | nfs4_set_claim_prev(struct nfsd4_open *open, bool has_session) |
@@ -3123,7 +3752,7 @@ static bool nfsd4_cb_channel_good(struct nfs4_client *clp) | |||
3123 | return clp->cl_minorversion && clp->cl_cb_state == NFSD4_CB_UNKNOWN; | 3752 | return clp->cl_minorversion && clp->cl_cb_state == NFSD4_CB_UNKNOWN; |
3124 | } | 3753 | } |
3125 | 3754 | ||
3126 | static struct file_lock *nfs4_alloc_init_lease(struct nfs4_delegation *dp, int flag) | 3755 | static struct file_lock *nfs4_alloc_init_lease(struct nfs4_file *fp, int flag) |
3127 | { | 3756 | { |
3128 | struct file_lock *fl; | 3757 | struct file_lock *fl; |
3129 | 3758 | ||
@@ -3135,53 +3764,101 @@ static struct file_lock *nfs4_alloc_init_lease(struct nfs4_delegation *dp, int f | |||
3135 | fl->fl_flags = FL_DELEG; | 3764 | fl->fl_flags = FL_DELEG; |
3136 | fl->fl_type = flag == NFS4_OPEN_DELEGATE_READ? F_RDLCK: F_WRLCK; | 3765 | fl->fl_type = flag == NFS4_OPEN_DELEGATE_READ? F_RDLCK: F_WRLCK; |
3137 | fl->fl_end = OFFSET_MAX; | 3766 | fl->fl_end = OFFSET_MAX; |
3138 | fl->fl_owner = (fl_owner_t)(dp->dl_file); | 3767 | fl->fl_owner = (fl_owner_t)fp; |
3139 | fl->fl_pid = current->tgid; | 3768 | fl->fl_pid = current->tgid; |
3140 | return fl; | 3769 | return fl; |
3141 | } | 3770 | } |
3142 | 3771 | ||
3143 | static int nfs4_setlease(struct nfs4_delegation *dp) | 3772 | static int nfs4_setlease(struct nfs4_delegation *dp) |
3144 | { | 3773 | { |
3145 | struct nfs4_file *fp = dp->dl_file; | 3774 | struct nfs4_file *fp = dp->dl_stid.sc_file; |
3146 | struct file_lock *fl; | 3775 | struct file_lock *fl; |
3147 | int status; | 3776 | struct file *filp; |
3777 | int status = 0; | ||
3148 | 3778 | ||
3149 | fl = nfs4_alloc_init_lease(dp, NFS4_OPEN_DELEGATE_READ); | 3779 | fl = nfs4_alloc_init_lease(fp, NFS4_OPEN_DELEGATE_READ); |
3150 | if (!fl) | 3780 | if (!fl) |
3151 | return -ENOMEM; | 3781 | return -ENOMEM; |
3152 | fl->fl_file = find_readable_file(fp); | 3782 | filp = find_readable_file(fp); |
3153 | status = vfs_setlease(fl->fl_file, fl->fl_type, &fl); | 3783 | if (!filp) { |
3154 | if (status) | 3784 | /* We should always have a readable file here */ |
3155 | goto out_free; | 3785 | WARN_ON_ONCE(1); |
3786 | return -EBADF; | ||
3787 | } | ||
3788 | fl->fl_file = filp; | ||
3789 | status = vfs_setlease(filp, fl->fl_type, &fl); | ||
3790 | if (status) { | ||
3791 | locks_free_lock(fl); | ||
3792 | goto out_fput; | ||
3793 | } | ||
3794 | spin_lock(&state_lock); | ||
3795 | spin_lock(&fp->fi_lock); | ||
3796 | /* Did the lease get broken before we took the lock? */ | ||
3797 | status = -EAGAIN; | ||
3798 | if (fp->fi_had_conflict) | ||
3799 | goto out_unlock; | ||
3800 | /* Race breaker */ | ||
3801 | if (fp->fi_lease) { | ||
3802 | status = 0; | ||
3803 | atomic_inc(&fp->fi_delegees); | ||
3804 | hash_delegation_locked(dp, fp); | ||
3805 | goto out_unlock; | ||
3806 | } | ||
3156 | fp->fi_lease = fl; | 3807 | fp->fi_lease = fl; |
3157 | fp->fi_deleg_file = get_file(fl->fl_file); | 3808 | fp->fi_deleg_file = filp; |
3158 | atomic_set(&fp->fi_delegees, 1); | 3809 | atomic_set(&fp->fi_delegees, 1); |
3159 | spin_lock(&state_lock); | ||
3160 | hash_delegation_locked(dp, fp); | 3810 | hash_delegation_locked(dp, fp); |
3811 | spin_unlock(&fp->fi_lock); | ||
3161 | spin_unlock(&state_lock); | 3812 | spin_unlock(&state_lock); |
3162 | return 0; | 3813 | return 0; |
3163 | out_free: | 3814 | out_unlock: |
3164 | locks_free_lock(fl); | 3815 | spin_unlock(&fp->fi_lock); |
3816 | spin_unlock(&state_lock); | ||
3817 | out_fput: | ||
3818 | fput(filp); | ||
3165 | return status; | 3819 | return status; |
3166 | } | 3820 | } |
3167 | 3821 | ||
3168 | static int nfs4_set_delegation(struct nfs4_delegation *dp, struct nfs4_file *fp) | 3822 | static struct nfs4_delegation * |
3823 | nfs4_set_delegation(struct nfs4_client *clp, struct svc_fh *fh, | ||
3824 | struct nfs4_file *fp) | ||
3169 | { | 3825 | { |
3826 | int status; | ||
3827 | struct nfs4_delegation *dp; | ||
3828 | |||
3170 | if (fp->fi_had_conflict) | 3829 | if (fp->fi_had_conflict) |
3171 | return -EAGAIN; | 3830 | return ERR_PTR(-EAGAIN); |
3831 | |||
3832 | dp = alloc_init_deleg(clp, fh); | ||
3833 | if (!dp) | ||
3834 | return ERR_PTR(-ENOMEM); | ||
3835 | |||
3172 | get_nfs4_file(fp); | 3836 | get_nfs4_file(fp); |
3173 | dp->dl_file = fp; | ||
3174 | if (!fp->fi_lease) | ||
3175 | return nfs4_setlease(dp); | ||
3176 | spin_lock(&state_lock); | 3837 | spin_lock(&state_lock); |
3838 | spin_lock(&fp->fi_lock); | ||
3839 | dp->dl_stid.sc_file = fp; | ||
3840 | if (!fp->fi_lease) { | ||
3841 | spin_unlock(&fp->fi_lock); | ||
3842 | spin_unlock(&state_lock); | ||
3843 | status = nfs4_setlease(dp); | ||
3844 | goto out; | ||
3845 | } | ||
3177 | atomic_inc(&fp->fi_delegees); | 3846 | atomic_inc(&fp->fi_delegees); |
3178 | if (fp->fi_had_conflict) { | 3847 | if (fp->fi_had_conflict) { |
3179 | spin_unlock(&state_lock); | 3848 | status = -EAGAIN; |
3180 | return -EAGAIN; | 3849 | goto out_unlock; |
3181 | } | 3850 | } |
3182 | hash_delegation_locked(dp, fp); | 3851 | hash_delegation_locked(dp, fp); |
3852 | status = 0; | ||
3853 | out_unlock: | ||
3854 | spin_unlock(&fp->fi_lock); | ||
3183 | spin_unlock(&state_lock); | 3855 | spin_unlock(&state_lock); |
3184 | return 0; | 3856 | out: |
3857 | if (status) { | ||
3858 | nfs4_put_stid(&dp->dl_stid); | ||
3859 | return ERR_PTR(status); | ||
3860 | } | ||
3861 | return dp; | ||
3185 | } | 3862 | } |
3186 | 3863 | ||
3187 | static void nfsd4_open_deleg_none_ext(struct nfsd4_open *open, int status) | 3864 | static void nfsd4_open_deleg_none_ext(struct nfsd4_open *open, int status) |
@@ -3212,11 +3889,12 @@ static void nfsd4_open_deleg_none_ext(struct nfsd4_open *open, int status) | |||
3212 | * proper support for them. | 3889 | * proper support for them. |
3213 | */ | 3890 | */ |
3214 | static void | 3891 | static void |
3215 | nfs4_open_delegation(struct net *net, struct svc_fh *fh, | 3892 | nfs4_open_delegation(struct svc_fh *fh, struct nfsd4_open *open, |
3216 | struct nfsd4_open *open, struct nfs4_ol_stateid *stp) | 3893 | struct nfs4_ol_stateid *stp) |
3217 | { | 3894 | { |
3218 | struct nfs4_delegation *dp; | 3895 | struct nfs4_delegation *dp; |
3219 | struct nfs4_openowner *oo = container_of(stp->st_stateowner, struct nfs4_openowner, oo_owner); | 3896 | struct nfs4_openowner *oo = openowner(stp->st_stateowner); |
3897 | struct nfs4_client *clp = stp->st_stid.sc_client; | ||
3220 | int cb_up; | 3898 | int cb_up; |
3221 | int status = 0; | 3899 | int status = 0; |
3222 | 3900 | ||
@@ -3235,7 +3913,7 @@ nfs4_open_delegation(struct net *net, struct svc_fh *fh, | |||
3235 | * Let's not give out any delegations till everyone's | 3913 | * Let's not give out any delegations till everyone's |
3236 | * had the chance to reclaim theirs.... | 3914 | * had the chance to reclaim theirs.... |
3237 | */ | 3915 | */ |
3238 | if (locks_in_grace(net)) | 3916 | if (locks_in_grace(clp->net)) |
3239 | goto out_no_deleg; | 3917 | goto out_no_deleg; |
3240 | if (!cb_up || !(oo->oo_flags & NFS4_OO_CONFIRMED)) | 3918 | if (!cb_up || !(oo->oo_flags & NFS4_OO_CONFIRMED)) |
3241 | goto out_no_deleg; | 3919 | goto out_no_deleg; |
@@ -3254,21 +3932,17 @@ nfs4_open_delegation(struct net *net, struct svc_fh *fh, | |||
3254 | default: | 3932 | default: |
3255 | goto out_no_deleg; | 3933 | goto out_no_deleg; |
3256 | } | 3934 | } |
3257 | dp = alloc_init_deleg(oo->oo_owner.so_client, stp, fh); | 3935 | dp = nfs4_set_delegation(clp, fh, stp->st_stid.sc_file); |
3258 | if (dp == NULL) | 3936 | if (IS_ERR(dp)) |
3259 | goto out_no_deleg; | 3937 | goto out_no_deleg; |
3260 | status = nfs4_set_delegation(dp, stp->st_file); | ||
3261 | if (status) | ||
3262 | goto out_free; | ||
3263 | 3938 | ||
3264 | memcpy(&open->op_delegate_stateid, &dp->dl_stid.sc_stateid, sizeof(dp->dl_stid.sc_stateid)); | 3939 | memcpy(&open->op_delegate_stateid, &dp->dl_stid.sc_stateid, sizeof(dp->dl_stid.sc_stateid)); |
3265 | 3940 | ||
3266 | dprintk("NFSD: delegation stateid=" STATEID_FMT "\n", | 3941 | dprintk("NFSD: delegation stateid=" STATEID_FMT "\n", |
3267 | STATEID_VAL(&dp->dl_stid.sc_stateid)); | 3942 | STATEID_VAL(&dp->dl_stid.sc_stateid)); |
3268 | open->op_delegate_type = NFS4_OPEN_DELEGATE_READ; | 3943 | open->op_delegate_type = NFS4_OPEN_DELEGATE_READ; |
3944 | nfs4_put_stid(&dp->dl_stid); | ||
3269 | return; | 3945 | return; |
3270 | out_free: | ||
3271 | destroy_delegation(dp); | ||
3272 | out_no_deleg: | 3946 | out_no_deleg: |
3273 | open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE; | 3947 | open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE; |
3274 | if (open->op_claim_type == NFS4_OPEN_CLAIM_PREVIOUS && | 3948 | if (open->op_claim_type == NFS4_OPEN_CLAIM_PREVIOUS && |
@@ -3301,16 +3975,12 @@ static void nfsd4_deleg_xgrade_none_ext(struct nfsd4_open *open, | |||
3301 | */ | 3975 | */ |
3302 | } | 3976 | } |
3303 | 3977 | ||
3304 | /* | ||
3305 | * called with nfs4_lock_state() held. | ||
3306 | */ | ||
3307 | __be32 | 3978 | __be32 |
3308 | nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_open *open) | 3979 | nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_open *open) |
3309 | { | 3980 | { |
3310 | struct nfsd4_compoundres *resp = rqstp->rq_resp; | 3981 | struct nfsd4_compoundres *resp = rqstp->rq_resp; |
3311 | struct nfs4_client *cl = open->op_openowner->oo_owner.so_client; | 3982 | struct nfs4_client *cl = open->op_openowner->oo_owner.so_client; |
3312 | struct nfs4_file *fp = NULL; | 3983 | struct nfs4_file *fp = NULL; |
3313 | struct inode *ino = current_fh->fh_dentry->d_inode; | ||
3314 | struct nfs4_ol_stateid *stp = NULL; | 3984 | struct nfs4_ol_stateid *stp = NULL; |
3315 | struct nfs4_delegation *dp = NULL; | 3985 | struct nfs4_delegation *dp = NULL; |
3316 | __be32 status; | 3986 | __be32 status; |
@@ -3320,21 +3990,18 @@ nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nf | |||
3320 | * and check for delegations in the process of being recalled. | 3990 | * and check for delegations in the process of being recalled. |
3321 | * If not found, create the nfs4_file struct | 3991 | * If not found, create the nfs4_file struct |
3322 | */ | 3992 | */ |
3323 | fp = find_file(ino); | 3993 | fp = find_or_add_file(open->op_file, ¤t_fh->fh_handle); |
3324 | if (fp) { | 3994 | if (fp != open->op_file) { |
3325 | if ((status = nfs4_check_open(fp, open, &stp))) | ||
3326 | goto out; | ||
3327 | status = nfs4_check_deleg(cl, open, &dp); | 3995 | status = nfs4_check_deleg(cl, open, &dp); |
3328 | if (status) | 3996 | if (status) |
3329 | goto out; | 3997 | goto out; |
3998 | stp = nfsd4_find_existing_open(fp, open); | ||
3330 | } else { | 3999 | } else { |
4000 | open->op_file = NULL; | ||
3331 | status = nfserr_bad_stateid; | 4001 | status = nfserr_bad_stateid; |
3332 | if (nfsd4_is_deleg_cur(open)) | 4002 | if (nfsd4_is_deleg_cur(open)) |
3333 | goto out; | 4003 | goto out; |
3334 | status = nfserr_jukebox; | 4004 | status = nfserr_jukebox; |
3335 | fp = open->op_file; | ||
3336 | open->op_file = NULL; | ||
3337 | nfsd4_init_file(fp, ino); | ||
3338 | } | 4005 | } |
3339 | 4006 | ||
3340 | /* | 4007 | /* |
@@ -3347,22 +4014,19 @@ nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nf | |||
3347 | if (status) | 4014 | if (status) |
3348 | goto out; | 4015 | goto out; |
3349 | } else { | 4016 | } else { |
3350 | status = nfs4_get_vfs_file(rqstp, fp, current_fh, open); | ||
3351 | if (status) | ||
3352 | goto out; | ||
3353 | status = nfsd4_truncate(rqstp, current_fh, open); | ||
3354 | if (status) | ||
3355 | goto out; | ||
3356 | stp = open->op_stp; | 4017 | stp = open->op_stp; |
3357 | open->op_stp = NULL; | 4018 | open->op_stp = NULL; |
3358 | init_open_stateid(stp, fp, open); | 4019 | init_open_stateid(stp, fp, open); |
4020 | status = nfs4_get_vfs_file(rqstp, fp, current_fh, stp, open); | ||
4021 | if (status) { | ||
4022 | release_open_stateid(stp); | ||
4023 | goto out; | ||
4024 | } | ||
3359 | } | 4025 | } |
3360 | update_stateid(&stp->st_stid.sc_stateid); | 4026 | update_stateid(&stp->st_stid.sc_stateid); |
3361 | memcpy(&open->op_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t)); | 4027 | memcpy(&open->op_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t)); |
3362 | 4028 | ||
3363 | if (nfsd4_has_session(&resp->cstate)) { | 4029 | if (nfsd4_has_session(&resp->cstate)) { |
3364 | open->op_openowner->oo_flags |= NFS4_OO_CONFIRMED; | ||
3365 | |||
3366 | if (open->op_deleg_want & NFS4_SHARE_WANT_NO_DELEG) { | 4030 | if (open->op_deleg_want & NFS4_SHARE_WANT_NO_DELEG) { |
3367 | open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT; | 4031 | open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT; |
3368 | open->op_why_no_deleg = WND4_NOT_WANTED; | 4032 | open->op_why_no_deleg = WND4_NOT_WANTED; |
@@ -3374,7 +4038,7 @@ nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nf | |||
3374 | * Attempt to hand out a delegation. No error return, because the | 4038 | * Attempt to hand out a delegation. No error return, because the |
3375 | * OPEN succeeds even if we fail. | 4039 | * OPEN succeeds even if we fail. |
3376 | */ | 4040 | */ |
3377 | nfs4_open_delegation(SVC_NET(rqstp), current_fh, open, stp); | 4041 | nfs4_open_delegation(current_fh, open, stp); |
3378 | nodeleg: | 4042 | nodeleg: |
3379 | status = nfs_ok; | 4043 | status = nfs_ok; |
3380 | 4044 | ||
@@ -3397,41 +4061,27 @@ out: | |||
3397 | if (!(open->op_openowner->oo_flags & NFS4_OO_CONFIRMED) && | 4061 | if (!(open->op_openowner->oo_flags & NFS4_OO_CONFIRMED) && |
3398 | !nfsd4_has_session(&resp->cstate)) | 4062 | !nfsd4_has_session(&resp->cstate)) |
3399 | open->op_rflags |= NFS4_OPEN_RESULT_CONFIRM; | 4063 | open->op_rflags |= NFS4_OPEN_RESULT_CONFIRM; |
4064 | if (dp) | ||
4065 | nfs4_put_stid(&dp->dl_stid); | ||
4066 | if (stp) | ||
4067 | nfs4_put_stid(&stp->st_stid); | ||
3400 | 4068 | ||
3401 | return status; | 4069 | return status; |
3402 | } | 4070 | } |
3403 | 4071 | ||
3404 | void nfsd4_cleanup_open_state(struct nfsd4_open *open, __be32 status) | 4072 | void nfsd4_cleanup_open_state(struct nfsd4_compound_state *cstate, |
4073 | struct nfsd4_open *open, __be32 status) | ||
3405 | { | 4074 | { |
3406 | if (open->op_openowner) { | 4075 | if (open->op_openowner) { |
3407 | struct nfs4_openowner *oo = open->op_openowner; | 4076 | struct nfs4_stateowner *so = &open->op_openowner->oo_owner; |
3408 | 4077 | ||
3409 | if (!list_empty(&oo->oo_owner.so_stateids)) | 4078 | nfsd4_cstate_assign_replay(cstate, so); |
3410 | list_del_init(&oo->oo_close_lru); | 4079 | nfs4_put_stateowner(so); |
3411 | if (oo->oo_flags & NFS4_OO_NEW) { | ||
3412 | if (status) { | ||
3413 | release_openowner(oo); | ||
3414 | open->op_openowner = NULL; | ||
3415 | } else | ||
3416 | oo->oo_flags &= ~NFS4_OO_NEW; | ||
3417 | } | ||
3418 | } | 4080 | } |
3419 | if (open->op_file) | 4081 | if (open->op_file) |
3420 | nfsd4_free_file(open->op_file); | 4082 | nfsd4_free_file(open->op_file); |
3421 | if (open->op_stp) | 4083 | if (open->op_stp) |
3422 | free_generic_stateid(open->op_stp); | 4084 | nfs4_put_stid(&open->op_stp->st_stid); |
3423 | } | ||
3424 | |||
3425 | static __be32 lookup_clientid(clientid_t *clid, bool session, struct nfsd_net *nn, struct nfs4_client **clp) | ||
3426 | { | ||
3427 | struct nfs4_client *found; | ||
3428 | |||
3429 | if (STALE_CLIENTID(clid, nn)) | ||
3430 | return nfserr_stale_clientid; | ||
3431 | found = find_confirmed_client(clid, session, nn); | ||
3432 | if (clp) | ||
3433 | *clp = found; | ||
3434 | return found ? nfs_ok : nfserr_expired; | ||
3435 | } | 4085 | } |
3436 | 4086 | ||
3437 | __be32 | 4087 | __be32 |
@@ -3442,19 +4092,18 @@ nfsd4_renew(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, | |||
3442 | __be32 status; | 4092 | __be32 status; |
3443 | struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); | 4093 | struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); |
3444 | 4094 | ||
3445 | nfs4_lock_state(); | ||
3446 | dprintk("process_renew(%08x/%08x): starting\n", | 4095 | dprintk("process_renew(%08x/%08x): starting\n", |
3447 | clid->cl_boot, clid->cl_id); | 4096 | clid->cl_boot, clid->cl_id); |
3448 | status = lookup_clientid(clid, cstate->minorversion, nn, &clp); | 4097 | status = lookup_clientid(clid, cstate, nn); |
3449 | if (status) | 4098 | if (status) |
3450 | goto out; | 4099 | goto out; |
4100 | clp = cstate->clp; | ||
3451 | status = nfserr_cb_path_down; | 4101 | status = nfserr_cb_path_down; |
3452 | if (!list_empty(&clp->cl_delegations) | 4102 | if (!list_empty(&clp->cl_delegations) |
3453 | && clp->cl_cb_state != NFSD4_CB_UP) | 4103 | && clp->cl_cb_state != NFSD4_CB_UP) |
3454 | goto out; | 4104 | goto out; |
3455 | status = nfs_ok; | 4105 | status = nfs_ok; |
3456 | out: | 4106 | out: |
3457 | nfs4_unlock_state(); | ||
3458 | return status; | 4107 | return status; |
3459 | } | 4108 | } |
3460 | 4109 | ||
@@ -3483,12 +4132,11 @@ nfs4_laundromat(struct nfsd_net *nn) | |||
3483 | struct nfs4_client *clp; | 4132 | struct nfs4_client *clp; |
3484 | struct nfs4_openowner *oo; | 4133 | struct nfs4_openowner *oo; |
3485 | struct nfs4_delegation *dp; | 4134 | struct nfs4_delegation *dp; |
4135 | struct nfs4_ol_stateid *stp; | ||
3486 | struct list_head *pos, *next, reaplist; | 4136 | struct list_head *pos, *next, reaplist; |
3487 | time_t cutoff = get_seconds() - nn->nfsd4_lease; | 4137 | time_t cutoff = get_seconds() - nn->nfsd4_lease; |
3488 | time_t t, new_timeo = nn->nfsd4_lease; | 4138 | time_t t, new_timeo = nn->nfsd4_lease; |
3489 | 4139 | ||
3490 | nfs4_lock_state(); | ||
3491 | |||
3492 | dprintk("NFSD: laundromat service - starting\n"); | 4140 | dprintk("NFSD: laundromat service - starting\n"); |
3493 | nfsd4_end_grace(nn); | 4141 | nfsd4_end_grace(nn); |
3494 | INIT_LIST_HEAD(&reaplist); | 4142 | INIT_LIST_HEAD(&reaplist); |
@@ -3505,13 +4153,14 @@ nfs4_laundromat(struct nfsd_net *nn) | |||
3505 | clp->cl_clientid.cl_id); | 4153 | clp->cl_clientid.cl_id); |
3506 | continue; | 4154 | continue; |
3507 | } | 4155 | } |
3508 | list_move(&clp->cl_lru, &reaplist); | 4156 | list_add(&clp->cl_lru, &reaplist); |
3509 | } | 4157 | } |
3510 | spin_unlock(&nn->client_lock); | 4158 | spin_unlock(&nn->client_lock); |
3511 | list_for_each_safe(pos, next, &reaplist) { | 4159 | list_for_each_safe(pos, next, &reaplist) { |
3512 | clp = list_entry(pos, struct nfs4_client, cl_lru); | 4160 | clp = list_entry(pos, struct nfs4_client, cl_lru); |
3513 | dprintk("NFSD: purging unused client (clientid %08x)\n", | 4161 | dprintk("NFSD: purging unused client (clientid %08x)\n", |
3514 | clp->cl_clientid.cl_id); | 4162 | clp->cl_clientid.cl_id); |
4163 | list_del_init(&clp->cl_lru); | ||
3515 | expire_client(clp); | 4164 | expire_client(clp); |
3516 | } | 4165 | } |
3517 | spin_lock(&state_lock); | 4166 | spin_lock(&state_lock); |
@@ -3524,24 +4173,37 @@ nfs4_laundromat(struct nfsd_net *nn) | |||
3524 | new_timeo = min(new_timeo, t); | 4173 | new_timeo = min(new_timeo, t); |
3525 | break; | 4174 | break; |
3526 | } | 4175 | } |
3527 | list_move(&dp->dl_recall_lru, &reaplist); | 4176 | unhash_delegation_locked(dp); |
4177 | list_add(&dp->dl_recall_lru, &reaplist); | ||
3528 | } | 4178 | } |
3529 | spin_unlock(&state_lock); | 4179 | spin_unlock(&state_lock); |
3530 | list_for_each_safe(pos, next, &reaplist) { | 4180 | while (!list_empty(&reaplist)) { |
3531 | dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru); | 4181 | dp = list_first_entry(&reaplist, struct nfs4_delegation, |
4182 | dl_recall_lru); | ||
4183 | list_del_init(&dp->dl_recall_lru); | ||
3532 | revoke_delegation(dp); | 4184 | revoke_delegation(dp); |
3533 | } | 4185 | } |
3534 | list_for_each_safe(pos, next, &nn->close_lru) { | 4186 | |
3535 | oo = container_of(pos, struct nfs4_openowner, oo_close_lru); | 4187 | spin_lock(&nn->client_lock); |
3536 | if (time_after((unsigned long)oo->oo_time, (unsigned long)cutoff)) { | 4188 | while (!list_empty(&nn->close_lru)) { |
4189 | oo = list_first_entry(&nn->close_lru, struct nfs4_openowner, | ||
4190 | oo_close_lru); | ||
4191 | if (time_after((unsigned long)oo->oo_time, | ||
4192 | (unsigned long)cutoff)) { | ||
3537 | t = oo->oo_time - cutoff; | 4193 | t = oo->oo_time - cutoff; |
3538 | new_timeo = min(new_timeo, t); | 4194 | new_timeo = min(new_timeo, t); |
3539 | break; | 4195 | break; |
3540 | } | 4196 | } |
3541 | release_openowner(oo); | 4197 | list_del_init(&oo->oo_close_lru); |
4198 | stp = oo->oo_last_closed_stid; | ||
4199 | oo->oo_last_closed_stid = NULL; | ||
4200 | spin_unlock(&nn->client_lock); | ||
4201 | nfs4_put_stid(&stp->st_stid); | ||
4202 | spin_lock(&nn->client_lock); | ||
3542 | } | 4203 | } |
4204 | spin_unlock(&nn->client_lock); | ||
4205 | |||
3543 | new_timeo = max_t(time_t, new_timeo, NFSD_LAUNDROMAT_MINTIMEOUT); | 4206 | new_timeo = max_t(time_t, new_timeo, NFSD_LAUNDROMAT_MINTIMEOUT); |
3544 | nfs4_unlock_state(); | ||
3545 | return new_timeo; | 4207 | return new_timeo; |
3546 | } | 4208 | } |
3547 | 4209 | ||
@@ -3564,7 +4226,7 @@ laundromat_main(struct work_struct *laundry) | |||
3564 | 4226 | ||
3565 | static inline __be32 nfs4_check_fh(struct svc_fh *fhp, struct nfs4_ol_stateid *stp) | 4227 | static inline __be32 nfs4_check_fh(struct svc_fh *fhp, struct nfs4_ol_stateid *stp) |
3566 | { | 4228 | { |
3567 | if (fhp->fh_dentry->d_inode != stp->st_file->fi_inode) | 4229 | if (!nfsd_fh_match(&fhp->fh_handle, &stp->st_stid.sc_file->fi_fhandle)) |
3568 | return nfserr_bad_stateid; | 4230 | return nfserr_bad_stateid; |
3569 | return nfs_ok; | 4231 | return nfs_ok; |
3570 | } | 4232 | } |
@@ -3666,10 +4328,10 @@ static __be32 nfsd4_validate_stateid(struct nfs4_client *cl, stateid_t *stateid) | |||
3666 | { | 4328 | { |
3667 | struct nfs4_stid *s; | 4329 | struct nfs4_stid *s; |
3668 | struct nfs4_ol_stateid *ols; | 4330 | struct nfs4_ol_stateid *ols; |
3669 | __be32 status; | 4331 | __be32 status = nfserr_bad_stateid; |
3670 | 4332 | ||
3671 | if (ZERO_STATEID(stateid) || ONE_STATEID(stateid)) | 4333 | if (ZERO_STATEID(stateid) || ONE_STATEID(stateid)) |
3672 | return nfserr_bad_stateid; | 4334 | return status; |
3673 | /* Client debugging aid. */ | 4335 | /* Client debugging aid. */ |
3674 | if (!same_clid(&stateid->si_opaque.so_clid, &cl->cl_clientid)) { | 4336 | if (!same_clid(&stateid->si_opaque.so_clid, &cl->cl_clientid)) { |
3675 | char addr_str[INET6_ADDRSTRLEN]; | 4337 | char addr_str[INET6_ADDRSTRLEN]; |
@@ -3677,53 +4339,62 @@ static __be32 nfsd4_validate_stateid(struct nfs4_client *cl, stateid_t *stateid) | |||
3677 | sizeof(addr_str)); | 4339 | sizeof(addr_str)); |
3678 | pr_warn_ratelimited("NFSD: client %s testing state ID " | 4340 | pr_warn_ratelimited("NFSD: client %s testing state ID " |
3679 | "with incorrect client ID\n", addr_str); | 4341 | "with incorrect client ID\n", addr_str); |
3680 | return nfserr_bad_stateid; | 4342 | return status; |
3681 | } | 4343 | } |
3682 | s = find_stateid(cl, stateid); | 4344 | spin_lock(&cl->cl_lock); |
4345 | s = find_stateid_locked(cl, stateid); | ||
3683 | if (!s) | 4346 | if (!s) |
3684 | return nfserr_bad_stateid; | 4347 | goto out_unlock; |
3685 | status = check_stateid_generation(stateid, &s->sc_stateid, 1); | 4348 | status = check_stateid_generation(stateid, &s->sc_stateid, 1); |
3686 | if (status) | 4349 | if (status) |
3687 | return status; | 4350 | goto out_unlock; |
3688 | switch (s->sc_type) { | 4351 | switch (s->sc_type) { |
3689 | case NFS4_DELEG_STID: | 4352 | case NFS4_DELEG_STID: |
3690 | return nfs_ok; | 4353 | status = nfs_ok; |
4354 | break; | ||
3691 | case NFS4_REVOKED_DELEG_STID: | 4355 | case NFS4_REVOKED_DELEG_STID: |
3692 | return nfserr_deleg_revoked; | 4356 | status = nfserr_deleg_revoked; |
4357 | break; | ||
3693 | case NFS4_OPEN_STID: | 4358 | case NFS4_OPEN_STID: |
3694 | case NFS4_LOCK_STID: | 4359 | case NFS4_LOCK_STID: |
3695 | ols = openlockstateid(s); | 4360 | ols = openlockstateid(s); |
3696 | if (ols->st_stateowner->so_is_open_owner | 4361 | if (ols->st_stateowner->so_is_open_owner |
3697 | && !(openowner(ols->st_stateowner)->oo_flags | 4362 | && !(openowner(ols->st_stateowner)->oo_flags |
3698 | & NFS4_OO_CONFIRMED)) | 4363 | & NFS4_OO_CONFIRMED)) |
3699 | return nfserr_bad_stateid; | 4364 | status = nfserr_bad_stateid; |
3700 | return nfs_ok; | 4365 | else |
4366 | status = nfs_ok; | ||
4367 | break; | ||
3701 | default: | 4368 | default: |
3702 | printk("unknown stateid type %x\n", s->sc_type); | 4369 | printk("unknown stateid type %x\n", s->sc_type); |
4370 | /* Fallthrough */ | ||
3703 | case NFS4_CLOSED_STID: | 4371 | case NFS4_CLOSED_STID: |
3704 | return nfserr_bad_stateid; | 4372 | case NFS4_CLOSED_DELEG_STID: |
4373 | status = nfserr_bad_stateid; | ||
3705 | } | 4374 | } |
4375 | out_unlock: | ||
4376 | spin_unlock(&cl->cl_lock); | ||
4377 | return status; | ||
3706 | } | 4378 | } |
3707 | 4379 | ||
3708 | static __be32 nfsd4_lookup_stateid(stateid_t *stateid, unsigned char typemask, | 4380 | static __be32 |
3709 | struct nfs4_stid **s, bool sessions, | 4381 | nfsd4_lookup_stateid(struct nfsd4_compound_state *cstate, |
3710 | struct nfsd_net *nn) | 4382 | stateid_t *stateid, unsigned char typemask, |
4383 | struct nfs4_stid **s, struct nfsd_net *nn) | ||
3711 | { | 4384 | { |
3712 | struct nfs4_client *cl; | ||
3713 | __be32 status; | 4385 | __be32 status; |
3714 | 4386 | ||
3715 | if (ZERO_STATEID(stateid) || ONE_STATEID(stateid)) | 4387 | if (ZERO_STATEID(stateid) || ONE_STATEID(stateid)) |
3716 | return nfserr_bad_stateid; | 4388 | return nfserr_bad_stateid; |
3717 | status = lookup_clientid(&stateid->si_opaque.so_clid, sessions, | 4389 | status = lookup_clientid(&stateid->si_opaque.so_clid, cstate, nn); |
3718 | nn, &cl); | ||
3719 | if (status == nfserr_stale_clientid) { | 4390 | if (status == nfserr_stale_clientid) { |
3720 | if (sessions) | 4391 | if (cstate->session) |
3721 | return nfserr_bad_stateid; | 4392 | return nfserr_bad_stateid; |
3722 | return nfserr_stale_stateid; | 4393 | return nfserr_stale_stateid; |
3723 | } | 4394 | } |
3724 | if (status) | 4395 | if (status) |
3725 | return status; | 4396 | return status; |
3726 | *s = find_stateid_by_type(cl, stateid, typemask); | 4397 | *s = find_stateid_by_type(cstate->clp, stateid, typemask); |
3727 | if (!*s) | 4398 | if (!*s) |
3728 | return nfserr_bad_stateid; | 4399 | return nfserr_bad_stateid; |
3729 | return nfs_ok; | 4400 | return nfs_ok; |
@@ -3754,12 +4425,11 @@ nfs4_preprocess_stateid_op(struct net *net, struct nfsd4_compound_state *cstate, | |||
3754 | if (ZERO_STATEID(stateid) || ONE_STATEID(stateid)) | 4425 | if (ZERO_STATEID(stateid) || ONE_STATEID(stateid)) |
3755 | return check_special_stateids(net, current_fh, stateid, flags); | 4426 | return check_special_stateids(net, current_fh, stateid, flags); |
3756 | 4427 | ||
3757 | nfs4_lock_state(); | 4428 | status = nfsd4_lookup_stateid(cstate, stateid, |
3758 | 4429 | NFS4_DELEG_STID|NFS4_OPEN_STID|NFS4_LOCK_STID, | |
3759 | status = nfsd4_lookup_stateid(stateid, NFS4_DELEG_STID|NFS4_OPEN_STID|NFS4_LOCK_STID, | 4430 | &s, nn); |
3760 | &s, cstate->minorversion, nn); | ||
3761 | if (status) | 4431 | if (status) |
3762 | goto out; | 4432 | return status; |
3763 | status = check_stateid_generation(stateid, &s->sc_stateid, nfsd4_has_session(cstate)); | 4433 | status = check_stateid_generation(stateid, &s->sc_stateid, nfsd4_has_session(cstate)); |
3764 | if (status) | 4434 | if (status) |
3765 | goto out; | 4435 | goto out; |
@@ -3770,12 +4440,13 @@ nfs4_preprocess_stateid_op(struct net *net, struct nfsd4_compound_state *cstate, | |||
3770 | if (status) | 4440 | if (status) |
3771 | goto out; | 4441 | goto out; |
3772 | if (filpp) { | 4442 | if (filpp) { |
3773 | file = dp->dl_file->fi_deleg_file; | 4443 | file = dp->dl_stid.sc_file->fi_deleg_file; |
3774 | if (!file) { | 4444 | if (!file) { |
3775 | WARN_ON_ONCE(1); | 4445 | WARN_ON_ONCE(1); |
3776 | status = nfserr_serverfault; | 4446 | status = nfserr_serverfault; |
3777 | goto out; | 4447 | goto out; |
3778 | } | 4448 | } |
4449 | get_file(file); | ||
3779 | } | 4450 | } |
3780 | break; | 4451 | break; |
3781 | case NFS4_OPEN_STID: | 4452 | case NFS4_OPEN_STID: |
@@ -3791,10 +4462,12 @@ nfs4_preprocess_stateid_op(struct net *net, struct nfsd4_compound_state *cstate, | |||
3791 | if (status) | 4462 | if (status) |
3792 | goto out; | 4463 | goto out; |
3793 | if (filpp) { | 4464 | if (filpp) { |
4465 | struct nfs4_file *fp = stp->st_stid.sc_file; | ||
4466 | |||
3794 | if (flags & RD_STATE) | 4467 | if (flags & RD_STATE) |
3795 | file = find_readable_file(stp->st_file); | 4468 | file = find_readable_file(fp); |
3796 | else | 4469 | else |
3797 | file = find_writeable_file(stp->st_file); | 4470 | file = find_writeable_file(fp); |
3798 | } | 4471 | } |
3799 | break; | 4472 | break; |
3800 | default: | 4473 | default: |
@@ -3803,28 +4476,12 @@ nfs4_preprocess_stateid_op(struct net *net, struct nfsd4_compound_state *cstate, | |||
3803 | } | 4476 | } |
3804 | status = nfs_ok; | 4477 | status = nfs_ok; |
3805 | if (file) | 4478 | if (file) |
3806 | *filpp = get_file(file); | 4479 | *filpp = file; |
3807 | out: | 4480 | out: |
3808 | nfs4_unlock_state(); | 4481 | nfs4_put_stid(s); |
3809 | return status; | 4482 | return status; |
3810 | } | 4483 | } |
3811 | 4484 | ||
3812 | static __be32 | ||
3813 | nfsd4_free_lock_stateid(struct nfs4_ol_stateid *stp) | ||
3814 | { | ||
3815 | struct nfs4_lockowner *lo = lockowner(stp->st_stateowner); | ||
3816 | |||
3817 | if (check_for_locks(stp->st_file, lo)) | ||
3818 | return nfserr_locks_held; | ||
3819 | /* | ||
3820 | * Currently there's a 1-1 lock stateid<->lockowner | ||
3821 | * correspondance, and we have to delete the lockowner when we | ||
3822 | * delete the lock stateid: | ||
3823 | */ | ||
3824 | release_lockowner(lo); | ||
3825 | return nfs_ok; | ||
3826 | } | ||
3827 | |||
3828 | /* | 4485 | /* |
3829 | * Test if the stateid is valid | 4486 | * Test if the stateid is valid |
3830 | */ | 4487 | */ |
@@ -3835,11 +4492,9 @@ nfsd4_test_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, | |||
3835 | struct nfsd4_test_stateid_id *stateid; | 4492 | struct nfsd4_test_stateid_id *stateid; |
3836 | struct nfs4_client *cl = cstate->session->se_client; | 4493 | struct nfs4_client *cl = cstate->session->se_client; |
3837 | 4494 | ||
3838 | nfs4_lock_state(); | ||
3839 | list_for_each_entry(stateid, &test_stateid->ts_stateid_list, ts_id_list) | 4495 | list_for_each_entry(stateid, &test_stateid->ts_stateid_list, ts_id_list) |
3840 | stateid->ts_id_status = | 4496 | stateid->ts_id_status = |
3841 | nfsd4_validate_stateid(cl, &stateid->ts_id_stateid); | 4497 | nfsd4_validate_stateid(cl, &stateid->ts_id_stateid); |
3842 | nfs4_unlock_state(); | ||
3843 | 4498 | ||
3844 | return nfs_ok; | 4499 | return nfs_ok; |
3845 | } | 4500 | } |
@@ -3851,37 +4506,50 @@ nfsd4_free_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, | |||
3851 | stateid_t *stateid = &free_stateid->fr_stateid; | 4506 | stateid_t *stateid = &free_stateid->fr_stateid; |
3852 | struct nfs4_stid *s; | 4507 | struct nfs4_stid *s; |
3853 | struct nfs4_delegation *dp; | 4508 | struct nfs4_delegation *dp; |
4509 | struct nfs4_ol_stateid *stp; | ||
3854 | struct nfs4_client *cl = cstate->session->se_client; | 4510 | struct nfs4_client *cl = cstate->session->se_client; |
3855 | __be32 ret = nfserr_bad_stateid; | 4511 | __be32 ret = nfserr_bad_stateid; |
3856 | 4512 | ||
3857 | nfs4_lock_state(); | 4513 | spin_lock(&cl->cl_lock); |
3858 | s = find_stateid(cl, stateid); | 4514 | s = find_stateid_locked(cl, stateid); |
3859 | if (!s) | 4515 | if (!s) |
3860 | goto out; | 4516 | goto out_unlock; |
3861 | switch (s->sc_type) { | 4517 | switch (s->sc_type) { |
3862 | case NFS4_DELEG_STID: | 4518 | case NFS4_DELEG_STID: |
3863 | ret = nfserr_locks_held; | 4519 | ret = nfserr_locks_held; |
3864 | goto out; | 4520 | break; |
3865 | case NFS4_OPEN_STID: | 4521 | case NFS4_OPEN_STID: |
3866 | case NFS4_LOCK_STID: | ||
3867 | ret = check_stateid_generation(stateid, &s->sc_stateid, 1); | 4522 | ret = check_stateid_generation(stateid, &s->sc_stateid, 1); |
3868 | if (ret) | 4523 | if (ret) |
3869 | goto out; | 4524 | break; |
3870 | if (s->sc_type == NFS4_LOCK_STID) | 4525 | ret = nfserr_locks_held; |
3871 | ret = nfsd4_free_lock_stateid(openlockstateid(s)); | ||
3872 | else | ||
3873 | ret = nfserr_locks_held; | ||
3874 | break; | 4526 | break; |
4527 | case NFS4_LOCK_STID: | ||
4528 | ret = check_stateid_generation(stateid, &s->sc_stateid, 1); | ||
4529 | if (ret) | ||
4530 | break; | ||
4531 | stp = openlockstateid(s); | ||
4532 | ret = nfserr_locks_held; | ||
4533 | if (check_for_locks(stp->st_stid.sc_file, | ||
4534 | lockowner(stp->st_stateowner))) | ||
4535 | break; | ||
4536 | unhash_lock_stateid(stp); | ||
4537 | spin_unlock(&cl->cl_lock); | ||
4538 | nfs4_put_stid(s); | ||
4539 | ret = nfs_ok; | ||
4540 | goto out; | ||
3875 | case NFS4_REVOKED_DELEG_STID: | 4541 | case NFS4_REVOKED_DELEG_STID: |
3876 | dp = delegstateid(s); | 4542 | dp = delegstateid(s); |
3877 | destroy_revoked_delegation(dp); | 4543 | list_del_init(&dp->dl_recall_lru); |
4544 | spin_unlock(&cl->cl_lock); | ||
4545 | nfs4_put_stid(s); | ||
3878 | ret = nfs_ok; | 4546 | ret = nfs_ok; |
3879 | break; | 4547 | goto out; |
3880 | default: | 4548 | /* Default falls through and returns nfserr_bad_stateid */ |
3881 | ret = nfserr_bad_stateid; | ||
3882 | } | 4549 | } |
4550 | out_unlock: | ||
4551 | spin_unlock(&cl->cl_lock); | ||
3883 | out: | 4552 | out: |
3884 | nfs4_unlock_state(); | ||
3885 | return ret; | 4553 | return ret; |
3886 | } | 4554 | } |
3887 | 4555 | ||
@@ -3926,20 +4594,24 @@ nfs4_preprocess_seqid_op(struct nfsd4_compound_state *cstate, u32 seqid, | |||
3926 | { | 4594 | { |
3927 | __be32 status; | 4595 | __be32 status; |
3928 | struct nfs4_stid *s; | 4596 | struct nfs4_stid *s; |
4597 | struct nfs4_ol_stateid *stp = NULL; | ||
3929 | 4598 | ||
3930 | dprintk("NFSD: %s: seqid=%d stateid = " STATEID_FMT "\n", __func__, | 4599 | dprintk("NFSD: %s: seqid=%d stateid = " STATEID_FMT "\n", __func__, |
3931 | seqid, STATEID_VAL(stateid)); | 4600 | seqid, STATEID_VAL(stateid)); |
3932 | 4601 | ||
3933 | *stpp = NULL; | 4602 | *stpp = NULL; |
3934 | status = nfsd4_lookup_stateid(stateid, typemask, &s, | 4603 | status = nfsd4_lookup_stateid(cstate, stateid, typemask, &s, nn); |
3935 | cstate->minorversion, nn); | ||
3936 | if (status) | 4604 | if (status) |
3937 | return status; | 4605 | return status; |
3938 | *stpp = openlockstateid(s); | 4606 | stp = openlockstateid(s); |
3939 | if (!nfsd4_has_session(cstate)) | 4607 | nfsd4_cstate_assign_replay(cstate, stp->st_stateowner); |
3940 | cstate->replay_owner = (*stpp)->st_stateowner; | ||
3941 | 4608 | ||
3942 | return nfs4_seqid_op_checks(cstate, stateid, seqid, *stpp); | 4609 | status = nfs4_seqid_op_checks(cstate, stateid, seqid, stp); |
4610 | if (!status) | ||
4611 | *stpp = stp; | ||
4612 | else | ||
4613 | nfs4_put_stid(&stp->st_stid); | ||
4614 | return status; | ||
3943 | } | 4615 | } |
3944 | 4616 | ||
3945 | static __be32 nfs4_preprocess_confirmed_seqid_op(struct nfsd4_compound_state *cstate, u32 seqid, | 4617 | static __be32 nfs4_preprocess_confirmed_seqid_op(struct nfsd4_compound_state *cstate, u32 seqid, |
@@ -3947,14 +4619,18 @@ static __be32 nfs4_preprocess_confirmed_seqid_op(struct nfsd4_compound_state *cs | |||
3947 | { | 4619 | { |
3948 | __be32 status; | 4620 | __be32 status; |
3949 | struct nfs4_openowner *oo; | 4621 | struct nfs4_openowner *oo; |
4622 | struct nfs4_ol_stateid *stp; | ||
3950 | 4623 | ||
3951 | status = nfs4_preprocess_seqid_op(cstate, seqid, stateid, | 4624 | status = nfs4_preprocess_seqid_op(cstate, seqid, stateid, |
3952 | NFS4_OPEN_STID, stpp, nn); | 4625 | NFS4_OPEN_STID, &stp, nn); |
3953 | if (status) | 4626 | if (status) |
3954 | return status; | 4627 | return status; |
3955 | oo = openowner((*stpp)->st_stateowner); | 4628 | oo = openowner(stp->st_stateowner); |
3956 | if (!(oo->oo_flags & NFS4_OO_CONFIRMED)) | 4629 | if (!(oo->oo_flags & NFS4_OO_CONFIRMED)) { |
4630 | nfs4_put_stid(&stp->st_stid); | ||
3957 | return nfserr_bad_stateid; | 4631 | return nfserr_bad_stateid; |
4632 | } | ||
4633 | *stpp = stp; | ||
3958 | return nfs_ok; | 4634 | return nfs_ok; |
3959 | } | 4635 | } |
3960 | 4636 | ||
@@ -3974,8 +4650,6 @@ nfsd4_open_confirm(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, | |||
3974 | if (status) | 4650 | if (status) |
3975 | return status; | 4651 | return status; |
3976 | 4652 | ||
3977 | nfs4_lock_state(); | ||
3978 | |||
3979 | status = nfs4_preprocess_seqid_op(cstate, | 4653 | status = nfs4_preprocess_seqid_op(cstate, |
3980 | oc->oc_seqid, &oc->oc_req_stateid, | 4654 | oc->oc_seqid, &oc->oc_req_stateid, |
3981 | NFS4_OPEN_STID, &stp, nn); | 4655 | NFS4_OPEN_STID, &stp, nn); |
@@ -3984,7 +4658,7 @@ nfsd4_open_confirm(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, | |||
3984 | oo = openowner(stp->st_stateowner); | 4658 | oo = openowner(stp->st_stateowner); |
3985 | status = nfserr_bad_stateid; | 4659 | status = nfserr_bad_stateid; |
3986 | if (oo->oo_flags & NFS4_OO_CONFIRMED) | 4660 | if (oo->oo_flags & NFS4_OO_CONFIRMED) |
3987 | goto out; | 4661 | goto put_stateid; |
3988 | oo->oo_flags |= NFS4_OO_CONFIRMED; | 4662 | oo->oo_flags |= NFS4_OO_CONFIRMED; |
3989 | update_stateid(&stp->st_stid.sc_stateid); | 4663 | update_stateid(&stp->st_stid.sc_stateid); |
3990 | memcpy(&oc->oc_resp_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t)); | 4664 | memcpy(&oc->oc_resp_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t)); |
@@ -3993,10 +4667,10 @@ nfsd4_open_confirm(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, | |||
3993 | 4667 | ||
3994 | nfsd4_client_record_create(oo->oo_owner.so_client); | 4668 | nfsd4_client_record_create(oo->oo_owner.so_client); |
3995 | status = nfs_ok; | 4669 | status = nfs_ok; |
4670 | put_stateid: | ||
4671 | nfs4_put_stid(&stp->st_stid); | ||
3996 | out: | 4672 | out: |
3997 | nfsd4_bump_seqid(cstate, status); | 4673 | nfsd4_bump_seqid(cstate, status); |
3998 | if (!cstate->replay_owner) | ||
3999 | nfs4_unlock_state(); | ||
4000 | return status; | 4674 | return status; |
4001 | } | 4675 | } |
4002 | 4676 | ||
@@ -4004,7 +4678,7 @@ static inline void nfs4_stateid_downgrade_bit(struct nfs4_ol_stateid *stp, u32 a | |||
4004 | { | 4678 | { |
4005 | if (!test_access(access, stp)) | 4679 | if (!test_access(access, stp)) |
4006 | return; | 4680 | return; |
4007 | nfs4_file_put_access(stp->st_file, nfs4_access_to_omode(access)); | 4681 | nfs4_file_put_access(stp->st_stid.sc_file, access); |
4008 | clear_access(access, stp); | 4682 | clear_access(access, stp); |
4009 | } | 4683 | } |
4010 | 4684 | ||
@@ -4026,16 +4700,6 @@ static inline void nfs4_stateid_downgrade(struct nfs4_ol_stateid *stp, u32 to_ac | |||
4026 | } | 4700 | } |
4027 | } | 4701 | } |
4028 | 4702 | ||
4029 | static void | ||
4030 | reset_union_bmap_deny(unsigned long deny, struct nfs4_ol_stateid *stp) | ||
4031 | { | ||
4032 | int i; | ||
4033 | for (i = 0; i < 4; i++) { | ||
4034 | if ((i & deny) != i) | ||
4035 | clear_deny(i, stp); | ||
4036 | } | ||
4037 | } | ||
4038 | |||
4039 | __be32 | 4703 | __be32 |
4040 | nfsd4_open_downgrade(struct svc_rqst *rqstp, | 4704 | nfsd4_open_downgrade(struct svc_rqst *rqstp, |
4041 | struct nfsd4_compound_state *cstate, | 4705 | struct nfsd4_compound_state *cstate, |
@@ -4053,21 +4717,20 @@ nfsd4_open_downgrade(struct svc_rqst *rqstp, | |||
4053 | dprintk("NFSD: %s: od_deleg_want=0x%x ignored\n", __func__, | 4717 | dprintk("NFSD: %s: od_deleg_want=0x%x ignored\n", __func__, |
4054 | od->od_deleg_want); | 4718 | od->od_deleg_want); |
4055 | 4719 | ||
4056 | nfs4_lock_state(); | ||
4057 | status = nfs4_preprocess_confirmed_seqid_op(cstate, od->od_seqid, | 4720 | status = nfs4_preprocess_confirmed_seqid_op(cstate, od->od_seqid, |
4058 | &od->od_stateid, &stp, nn); | 4721 | &od->od_stateid, &stp, nn); |
4059 | if (status) | 4722 | if (status) |
4060 | goto out; | 4723 | goto out; |
4061 | status = nfserr_inval; | 4724 | status = nfserr_inval; |
4062 | if (!test_access(od->od_share_access, stp)) { | 4725 | if (!test_access(od->od_share_access, stp)) { |
4063 | dprintk("NFSD: access not a subset current bitmap: 0x%lx, input access=%08x\n", | 4726 | dprintk("NFSD: access not a subset of current bitmap: 0x%hhx, input access=%08x\n", |
4064 | stp->st_access_bmap, od->od_share_access); | 4727 | stp->st_access_bmap, od->od_share_access); |
4065 | goto out; | 4728 | goto put_stateid; |
4066 | } | 4729 | } |
4067 | if (!test_deny(od->od_share_deny, stp)) { | 4730 | if (!test_deny(od->od_share_deny, stp)) { |
4068 | dprintk("NFSD:deny not a subset current bitmap: 0x%lx, input deny=%08x\n", | 4731 | dprintk("NFSD: deny not a subset of current bitmap: 0x%hhx, input deny=%08x\n", |
4069 | stp->st_deny_bmap, od->od_share_deny); | 4732 | stp->st_deny_bmap, od->od_share_deny); |
4070 | goto out; | 4733 | goto put_stateid; |
4071 | } | 4734 | } |
4072 | nfs4_stateid_downgrade(stp, od->od_share_access); | 4735 | nfs4_stateid_downgrade(stp, od->od_share_access); |
4073 | 4736 | ||
@@ -4076,17 +4739,31 @@ nfsd4_open_downgrade(struct svc_rqst *rqstp, | |||
4076 | update_stateid(&stp->st_stid.sc_stateid); | 4739 | update_stateid(&stp->st_stid.sc_stateid); |
4077 | memcpy(&od->od_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t)); | 4740 | memcpy(&od->od_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t)); |
4078 | status = nfs_ok; | 4741 | status = nfs_ok; |
4742 | put_stateid: | ||
4743 | nfs4_put_stid(&stp->st_stid); | ||
4079 | out: | 4744 | out: |
4080 | nfsd4_bump_seqid(cstate, status); | 4745 | nfsd4_bump_seqid(cstate, status); |
4081 | if (!cstate->replay_owner) | ||
4082 | nfs4_unlock_state(); | ||
4083 | return status; | 4746 | return status; |
4084 | } | 4747 | } |
4085 | 4748 | ||
4086 | static void nfsd4_close_open_stateid(struct nfs4_ol_stateid *s) | 4749 | static void nfsd4_close_open_stateid(struct nfs4_ol_stateid *s) |
4087 | { | 4750 | { |
4088 | unhash_open_stateid(s); | 4751 | struct nfs4_client *clp = s->st_stid.sc_client; |
4752 | LIST_HEAD(reaplist); | ||
4753 | |||
4089 | s->st_stid.sc_type = NFS4_CLOSED_STID; | 4754 | s->st_stid.sc_type = NFS4_CLOSED_STID; |
4755 | spin_lock(&clp->cl_lock); | ||
4756 | unhash_open_stateid(s, &reaplist); | ||
4757 | |||
4758 | if (clp->cl_minorversion) { | ||
4759 | put_ol_stateid_locked(s, &reaplist); | ||
4760 | spin_unlock(&clp->cl_lock); | ||
4761 | free_ol_stateid_reaplist(&reaplist); | ||
4762 | } else { | ||
4763 | spin_unlock(&clp->cl_lock); | ||
4764 | free_ol_stateid_reaplist(&reaplist); | ||
4765 | move_to_close_lru(s, clp->net); | ||
4766 | } | ||
4090 | } | 4767 | } |
4091 | 4768 | ||
4092 | /* | 4769 | /* |
@@ -4097,7 +4774,6 @@ nfsd4_close(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, | |||
4097 | struct nfsd4_close *close) | 4774 | struct nfsd4_close *close) |
4098 | { | 4775 | { |
4099 | __be32 status; | 4776 | __be32 status; |
4100 | struct nfs4_openowner *oo; | ||
4101 | struct nfs4_ol_stateid *stp; | 4777 | struct nfs4_ol_stateid *stp; |
4102 | struct net *net = SVC_NET(rqstp); | 4778 | struct net *net = SVC_NET(rqstp); |
4103 | struct nfsd_net *nn = net_generic(net, nfsd_net_id); | 4779 | struct nfsd_net *nn = net_generic(net, nfsd_net_id); |
@@ -4105,7 +4781,6 @@ nfsd4_close(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, | |||
4105 | dprintk("NFSD: nfsd4_close on file %pd\n", | 4781 | dprintk("NFSD: nfsd4_close on file %pd\n", |
4106 | cstate->current_fh.fh_dentry); | 4782 | cstate->current_fh.fh_dentry); |
4107 | 4783 | ||
4108 | nfs4_lock_state(); | ||
4109 | status = nfs4_preprocess_seqid_op(cstate, close->cl_seqid, | 4784 | status = nfs4_preprocess_seqid_op(cstate, close->cl_seqid, |
4110 | &close->cl_stateid, | 4785 | &close->cl_stateid, |
4111 | NFS4_OPEN_STID|NFS4_CLOSED_STID, | 4786 | NFS4_OPEN_STID|NFS4_CLOSED_STID, |
@@ -4113,31 +4788,14 @@ nfsd4_close(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, | |||
4113 | nfsd4_bump_seqid(cstate, status); | 4788 | nfsd4_bump_seqid(cstate, status); |
4114 | if (status) | 4789 | if (status) |
4115 | goto out; | 4790 | goto out; |
4116 | oo = openowner(stp->st_stateowner); | ||
4117 | update_stateid(&stp->st_stid.sc_stateid); | 4791 | update_stateid(&stp->st_stid.sc_stateid); |
4118 | memcpy(&close->cl_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t)); | 4792 | memcpy(&close->cl_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t)); |
4119 | 4793 | ||
4120 | nfsd4_close_open_stateid(stp); | 4794 | nfsd4_close_open_stateid(stp); |
4121 | 4795 | ||
4122 | if (cstate->minorversion) | 4796 | /* put reference from nfs4_preprocess_seqid_op */ |
4123 | free_generic_stateid(stp); | 4797 | nfs4_put_stid(&stp->st_stid); |
4124 | else | ||
4125 | oo->oo_last_closed_stid = stp; | ||
4126 | |||
4127 | if (list_empty(&oo->oo_owner.so_stateids)) { | ||
4128 | if (cstate->minorversion) | ||
4129 | release_openowner(oo); | ||
4130 | else { | ||
4131 | /* | ||
4132 | * In the 4.0 case we need to keep the owners around a | ||
4133 | * little while to handle CLOSE replay. | ||
4134 | */ | ||
4135 | move_to_close_lru(oo, SVC_NET(rqstp)); | ||
4136 | } | ||
4137 | } | ||
4138 | out: | 4798 | out: |
4139 | if (!cstate->replay_owner) | ||
4140 | nfs4_unlock_state(); | ||
4141 | return status; | 4799 | return status; |
4142 | } | 4800 | } |
4143 | 4801 | ||
@@ -4154,28 +4812,24 @@ nfsd4_delegreturn(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, | |||
4154 | if ((status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0))) | 4812 | if ((status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0))) |
4155 | return status; | 4813 | return status; |
4156 | 4814 | ||
4157 | nfs4_lock_state(); | 4815 | status = nfsd4_lookup_stateid(cstate, stateid, NFS4_DELEG_STID, &s, nn); |
4158 | status = nfsd4_lookup_stateid(stateid, NFS4_DELEG_STID, &s, | ||
4159 | cstate->minorversion, nn); | ||
4160 | if (status) | 4816 | if (status) |
4161 | goto out; | 4817 | goto out; |
4162 | dp = delegstateid(s); | 4818 | dp = delegstateid(s); |
4163 | status = check_stateid_generation(stateid, &dp->dl_stid.sc_stateid, nfsd4_has_session(cstate)); | 4819 | status = check_stateid_generation(stateid, &dp->dl_stid.sc_stateid, nfsd4_has_session(cstate)); |
4164 | if (status) | 4820 | if (status) |
4165 | goto out; | 4821 | goto put_stateid; |
4166 | 4822 | ||
4167 | destroy_delegation(dp); | 4823 | destroy_delegation(dp); |
4824 | put_stateid: | ||
4825 | nfs4_put_stid(&dp->dl_stid); | ||
4168 | out: | 4826 | out: |
4169 | nfs4_unlock_state(); | ||
4170 | |||
4171 | return status; | 4827 | return status; |
4172 | } | 4828 | } |
4173 | 4829 | ||
4174 | 4830 | ||
4175 | #define LOFF_OVERFLOW(start, len) ((u64)(len) > ~(u64)(start)) | 4831 | #define LOFF_OVERFLOW(start, len) ((u64)(len) > ~(u64)(start)) |
4176 | 4832 | ||
4177 | #define LOCKOWNER_INO_HASH_MASK (LOCKOWNER_INO_HASH_SIZE - 1) | ||
4178 | |||
4179 | static inline u64 | 4833 | static inline u64 |
4180 | end_offset(u64 start, u64 len) | 4834 | end_offset(u64 start, u64 len) |
4181 | { | 4835 | { |
@@ -4196,13 +4850,6 @@ last_byte_offset(u64 start, u64 len) | |||
4196 | return end > start ? end - 1: NFS4_MAX_UINT64; | 4850 | return end > start ? end - 1: NFS4_MAX_UINT64; |
4197 | } | 4851 | } |
4198 | 4852 | ||
4199 | static unsigned int lockowner_ino_hashval(struct inode *inode, u32 cl_id, struct xdr_netobj *ownername) | ||
4200 | { | ||
4201 | return (file_hashval(inode) + cl_id | ||
4202 | + opaque_hashval(ownername->data, ownername->len)) | ||
4203 | & LOCKOWNER_INO_HASH_MASK; | ||
4204 | } | ||
4205 | |||
4206 | /* | 4853 | /* |
4207 | * TODO: Linux file offsets are _signed_ 64-bit quantities, which means that | 4854 | * TODO: Linux file offsets are _signed_ 64-bit quantities, which means that |
4208 | * we can't properly handle lock requests that go beyond the (2^63 - 1)-th | 4855 | * we can't properly handle lock requests that go beyond the (2^63 - 1)-th |
@@ -4255,47 +4902,56 @@ nevermind: | |||
4255 | deny->ld_type = NFS4_WRITE_LT; | 4902 | deny->ld_type = NFS4_WRITE_LT; |
4256 | } | 4903 | } |
4257 | 4904 | ||
4258 | static bool same_lockowner_ino(struct nfs4_lockowner *lo, struct inode *inode, clientid_t *clid, struct xdr_netobj *owner) | 4905 | static struct nfs4_lockowner * |
4906 | find_lockowner_str_locked(clientid_t *clid, struct xdr_netobj *owner, | ||
4907 | struct nfs4_client *clp) | ||
4259 | { | 4908 | { |
4260 | struct nfs4_ol_stateid *lst; | 4909 | unsigned int strhashval = ownerstr_hashval(owner); |
4910 | struct nfs4_stateowner *so; | ||
4261 | 4911 | ||
4262 | if (!same_owner_str(&lo->lo_owner, owner, clid)) | 4912 | lockdep_assert_held(&clp->cl_lock); |
4263 | return false; | 4913 | |
4264 | if (list_empty(&lo->lo_owner.so_stateids)) { | 4914 | list_for_each_entry(so, &clp->cl_ownerstr_hashtbl[strhashval], |
4265 | WARN_ON_ONCE(1); | 4915 | so_strhash) { |
4266 | return false; | 4916 | if (so->so_is_open_owner) |
4917 | continue; | ||
4918 | if (!same_owner_str(so, owner)) | ||
4919 | continue; | ||
4920 | atomic_inc(&so->so_count); | ||
4921 | return lockowner(so); | ||
4267 | } | 4922 | } |
4268 | lst = list_first_entry(&lo->lo_owner.so_stateids, | 4923 | return NULL; |
4269 | struct nfs4_ol_stateid, st_perstateowner); | ||
4270 | return lst->st_file->fi_inode == inode; | ||
4271 | } | 4924 | } |
4272 | 4925 | ||
4273 | static struct nfs4_lockowner * | 4926 | static struct nfs4_lockowner * |
4274 | find_lockowner_str(struct inode *inode, clientid_t *clid, | 4927 | find_lockowner_str(clientid_t *clid, struct xdr_netobj *owner, |
4275 | struct xdr_netobj *owner, struct nfsd_net *nn) | 4928 | struct nfs4_client *clp) |
4276 | { | 4929 | { |
4277 | unsigned int hashval = lockowner_ino_hashval(inode, clid->cl_id, owner); | ||
4278 | struct nfs4_lockowner *lo; | 4930 | struct nfs4_lockowner *lo; |
4279 | 4931 | ||
4280 | list_for_each_entry(lo, &nn->lockowner_ino_hashtbl[hashval], lo_owner_ino_hash) { | 4932 | spin_lock(&clp->cl_lock); |
4281 | if (same_lockowner_ino(lo, inode, clid, owner)) | 4933 | lo = find_lockowner_str_locked(clid, owner, clp); |
4282 | return lo; | 4934 | spin_unlock(&clp->cl_lock); |
4283 | } | 4935 | return lo; |
4284 | return NULL; | ||
4285 | } | 4936 | } |
4286 | 4937 | ||
4287 | static void hash_lockowner(struct nfs4_lockowner *lo, unsigned int strhashval, struct nfs4_client *clp, struct nfs4_ol_stateid *open_stp) | 4938 | static void nfs4_unhash_lockowner(struct nfs4_stateowner *sop) |
4288 | { | 4939 | { |
4289 | struct inode *inode = open_stp->st_file->fi_inode; | 4940 | unhash_lockowner_locked(lockowner(sop)); |
4290 | unsigned int inohash = lockowner_ino_hashval(inode, | 4941 | } |
4291 | clp->cl_clientid.cl_id, &lo->lo_owner.so_owner); | 4942 | |
4292 | struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); | 4943 | static void nfs4_free_lockowner(struct nfs4_stateowner *sop) |
4944 | { | ||
4945 | struct nfs4_lockowner *lo = lockowner(sop); | ||
4293 | 4946 | ||
4294 | list_add(&lo->lo_owner.so_strhash, &nn->ownerstr_hashtbl[strhashval]); | 4947 | kmem_cache_free(lockowner_slab, lo); |
4295 | list_add(&lo->lo_owner_ino_hash, &nn->lockowner_ino_hashtbl[inohash]); | ||
4296 | list_add(&lo->lo_perstateid, &open_stp->st_lockowners); | ||
4297 | } | 4948 | } |
4298 | 4949 | ||
4950 | static const struct nfs4_stateowner_operations lockowner_ops = { | ||
4951 | .so_unhash = nfs4_unhash_lockowner, | ||
4952 | .so_free = nfs4_free_lockowner, | ||
4953 | }; | ||
4954 | |||
4299 | /* | 4955 | /* |
4300 | * Alloc a lock owner structure. | 4956 | * Alloc a lock owner structure. |
4301 | * Called in nfsd4_lock - therefore, OPEN and OPEN_CONFIRM (if needed) has | 4957 | * Called in nfsd4_lock - therefore, OPEN and OPEN_CONFIRM (if needed) has |
@@ -4303,42 +4959,107 @@ static void hash_lockowner(struct nfs4_lockowner *lo, unsigned int strhashval, s | |||
4303 | * | 4959 | * |
4304 | * strhashval = ownerstr_hashval | 4960 | * strhashval = ownerstr_hashval |
4305 | */ | 4961 | */ |
4306 | |||
4307 | static struct nfs4_lockowner * | 4962 | static struct nfs4_lockowner * |
4308 | alloc_init_lock_stateowner(unsigned int strhashval, struct nfs4_client *clp, struct nfs4_ol_stateid *open_stp, struct nfsd4_lock *lock) { | 4963 | alloc_init_lock_stateowner(unsigned int strhashval, struct nfs4_client *clp, |
4309 | struct nfs4_lockowner *lo; | 4964 | struct nfs4_ol_stateid *open_stp, |
4965 | struct nfsd4_lock *lock) | ||
4966 | { | ||
4967 | struct nfs4_lockowner *lo, *ret; | ||
4310 | 4968 | ||
4311 | lo = alloc_stateowner(lockowner_slab, &lock->lk_new_owner, clp); | 4969 | lo = alloc_stateowner(lockowner_slab, &lock->lk_new_owner, clp); |
4312 | if (!lo) | 4970 | if (!lo) |
4313 | return NULL; | 4971 | return NULL; |
4314 | INIT_LIST_HEAD(&lo->lo_owner.so_stateids); | 4972 | INIT_LIST_HEAD(&lo->lo_owner.so_stateids); |
4315 | lo->lo_owner.so_is_open_owner = 0; | 4973 | lo->lo_owner.so_is_open_owner = 0; |
4316 | /* It is the openowner seqid that will be incremented in encode in the | 4974 | lo->lo_owner.so_seqid = lock->lk_new_lock_seqid; |
4317 | * case of new lockowners; so increment the lock seqid manually: */ | 4975 | lo->lo_owner.so_ops = &lockowner_ops; |
4318 | lo->lo_owner.so_seqid = lock->lk_new_lock_seqid + 1; | 4976 | spin_lock(&clp->cl_lock); |
4319 | hash_lockowner(lo, strhashval, clp, open_stp); | 4977 | ret = find_lockowner_str_locked(&clp->cl_clientid, |
4978 | &lock->lk_new_owner, clp); | ||
4979 | if (ret == NULL) { | ||
4980 | list_add(&lo->lo_owner.so_strhash, | ||
4981 | &clp->cl_ownerstr_hashtbl[strhashval]); | ||
4982 | ret = lo; | ||
4983 | } else | ||
4984 | nfs4_free_lockowner(&lo->lo_owner); | ||
4985 | spin_unlock(&clp->cl_lock); | ||
4320 | return lo; | 4986 | return lo; |
4321 | } | 4987 | } |
4322 | 4988 | ||
4323 | static struct nfs4_ol_stateid * | 4989 | static void |
4324 | alloc_init_lock_stateid(struct nfs4_lockowner *lo, struct nfs4_file *fp, struct nfs4_ol_stateid *open_stp) | 4990 | init_lock_stateid(struct nfs4_ol_stateid *stp, struct nfs4_lockowner *lo, |
4991 | struct nfs4_file *fp, struct inode *inode, | ||
4992 | struct nfs4_ol_stateid *open_stp) | ||
4325 | { | 4993 | { |
4326 | struct nfs4_ol_stateid *stp; | ||
4327 | struct nfs4_client *clp = lo->lo_owner.so_client; | 4994 | struct nfs4_client *clp = lo->lo_owner.so_client; |
4328 | 4995 | ||
4329 | stp = nfs4_alloc_stateid(clp); | 4996 | lockdep_assert_held(&clp->cl_lock); |
4330 | if (stp == NULL) | 4997 | |
4331 | return NULL; | 4998 | atomic_inc(&stp->st_stid.sc_count); |
4332 | stp->st_stid.sc_type = NFS4_LOCK_STID; | 4999 | stp->st_stid.sc_type = NFS4_LOCK_STID; |
4333 | list_add(&stp->st_perfile, &fp->fi_stateids); | ||
4334 | list_add(&stp->st_perstateowner, &lo->lo_owner.so_stateids); | ||
4335 | stp->st_stateowner = &lo->lo_owner; | 5000 | stp->st_stateowner = &lo->lo_owner; |
5001 | atomic_inc(&lo->lo_owner.so_count); | ||
4336 | get_nfs4_file(fp); | 5002 | get_nfs4_file(fp); |
4337 | stp->st_file = fp; | 5003 | stp->st_stid.sc_file = fp; |
5004 | stp->st_stid.sc_free = nfs4_free_lock_stateid; | ||
4338 | stp->st_access_bmap = 0; | 5005 | stp->st_access_bmap = 0; |
4339 | stp->st_deny_bmap = open_stp->st_deny_bmap; | 5006 | stp->st_deny_bmap = open_stp->st_deny_bmap; |
4340 | stp->st_openstp = open_stp; | 5007 | stp->st_openstp = open_stp; |
4341 | return stp; | 5008 | list_add(&stp->st_locks, &open_stp->st_locks); |
5009 | list_add(&stp->st_perstateowner, &lo->lo_owner.so_stateids); | ||
5010 | spin_lock(&fp->fi_lock); | ||
5011 | list_add(&stp->st_perfile, &fp->fi_stateids); | ||
5012 | spin_unlock(&fp->fi_lock); | ||
5013 | } | ||
5014 | |||
5015 | static struct nfs4_ol_stateid * | ||
5016 | find_lock_stateid(struct nfs4_lockowner *lo, struct nfs4_file *fp) | ||
5017 | { | ||
5018 | struct nfs4_ol_stateid *lst; | ||
5019 | struct nfs4_client *clp = lo->lo_owner.so_client; | ||
5020 | |||
5021 | lockdep_assert_held(&clp->cl_lock); | ||
5022 | |||
5023 | list_for_each_entry(lst, &lo->lo_owner.so_stateids, st_perstateowner) { | ||
5024 | if (lst->st_stid.sc_file == fp) { | ||
5025 | atomic_inc(&lst->st_stid.sc_count); | ||
5026 | return lst; | ||
5027 | } | ||
5028 | } | ||
5029 | return NULL; | ||
5030 | } | ||
5031 | |||
5032 | static struct nfs4_ol_stateid * | ||
5033 | find_or_create_lock_stateid(struct nfs4_lockowner *lo, struct nfs4_file *fi, | ||
5034 | struct inode *inode, struct nfs4_ol_stateid *ost, | ||
5035 | bool *new) | ||
5036 | { | ||
5037 | struct nfs4_stid *ns = NULL; | ||
5038 | struct nfs4_ol_stateid *lst; | ||
5039 | struct nfs4_openowner *oo = openowner(ost->st_stateowner); | ||
5040 | struct nfs4_client *clp = oo->oo_owner.so_client; | ||
5041 | |||
5042 | spin_lock(&clp->cl_lock); | ||
5043 | lst = find_lock_stateid(lo, fi); | ||
5044 | if (lst == NULL) { | ||
5045 | spin_unlock(&clp->cl_lock); | ||
5046 | ns = nfs4_alloc_stid(clp, stateid_slab); | ||
5047 | if (ns == NULL) | ||
5048 | return NULL; | ||
5049 | |||
5050 | spin_lock(&clp->cl_lock); | ||
5051 | lst = find_lock_stateid(lo, fi); | ||
5052 | if (likely(!lst)) { | ||
5053 | lst = openlockstateid(ns); | ||
5054 | init_lock_stateid(lst, lo, fi, inode, ost); | ||
5055 | ns = NULL; | ||
5056 | *new = true; | ||
5057 | } | ||
5058 | } | ||
5059 | spin_unlock(&clp->cl_lock); | ||
5060 | if (ns) | ||
5061 | nfs4_put_stid(ns); | ||
5062 | return lst; | ||
4342 | } | 5063 | } |
4343 | 5064 | ||
4344 | static int | 5065 | static int |
@@ -4350,46 +5071,53 @@ check_lock_length(u64 offset, u64 length) | |||
4350 | 5071 | ||
4351 | static void get_lock_access(struct nfs4_ol_stateid *lock_stp, u32 access) | 5072 | static void get_lock_access(struct nfs4_ol_stateid *lock_stp, u32 access) |
4352 | { | 5073 | { |
4353 | struct nfs4_file *fp = lock_stp->st_file; | 5074 | struct nfs4_file *fp = lock_stp->st_stid.sc_file; |
4354 | int oflag = nfs4_access_to_omode(access); | 5075 | |
5076 | lockdep_assert_held(&fp->fi_lock); | ||
4355 | 5077 | ||
4356 | if (test_access(access, lock_stp)) | 5078 | if (test_access(access, lock_stp)) |
4357 | return; | 5079 | return; |
4358 | nfs4_file_get_access(fp, oflag); | 5080 | __nfs4_file_get_access(fp, access); |
4359 | set_access(access, lock_stp); | 5081 | set_access(access, lock_stp); |
4360 | } | 5082 | } |
4361 | 5083 | ||
4362 | static __be32 lookup_or_create_lock_state(struct nfsd4_compound_state *cstate, struct nfs4_ol_stateid *ost, struct nfsd4_lock *lock, struct nfs4_ol_stateid **lst, bool *new) | 5084 | static __be32 |
5085 | lookup_or_create_lock_state(struct nfsd4_compound_state *cstate, | ||
5086 | struct nfs4_ol_stateid *ost, | ||
5087 | struct nfsd4_lock *lock, | ||
5088 | struct nfs4_ol_stateid **lst, bool *new) | ||
4363 | { | 5089 | { |
4364 | struct nfs4_file *fi = ost->st_file; | 5090 | __be32 status; |
5091 | struct nfs4_file *fi = ost->st_stid.sc_file; | ||
4365 | struct nfs4_openowner *oo = openowner(ost->st_stateowner); | 5092 | struct nfs4_openowner *oo = openowner(ost->st_stateowner); |
4366 | struct nfs4_client *cl = oo->oo_owner.so_client; | 5093 | struct nfs4_client *cl = oo->oo_owner.so_client; |
5094 | struct inode *inode = cstate->current_fh.fh_dentry->d_inode; | ||
4367 | struct nfs4_lockowner *lo; | 5095 | struct nfs4_lockowner *lo; |
4368 | unsigned int strhashval; | 5096 | unsigned int strhashval; |
4369 | struct nfsd_net *nn = net_generic(cl->net, nfsd_net_id); | 5097 | |
4370 | 5098 | lo = find_lockowner_str(&cl->cl_clientid, &lock->v.new.owner, cl); | |
4371 | lo = find_lockowner_str(fi->fi_inode, &cl->cl_clientid, | 5099 | if (!lo) { |
4372 | &lock->v.new.owner, nn); | 5100 | strhashval = ownerstr_hashval(&lock->v.new.owner); |
4373 | if (lo) { | 5101 | lo = alloc_init_lock_stateowner(strhashval, cl, ost, lock); |
4374 | if (!cstate->minorversion) | 5102 | if (lo == NULL) |
4375 | return nfserr_bad_seqid; | 5103 | return nfserr_jukebox; |
4376 | /* XXX: a lockowner always has exactly one stateid: */ | 5104 | } else { |
4377 | *lst = list_first_entry(&lo->lo_owner.so_stateids, | 5105 | /* with an existing lockowner, seqids must be the same */ |
4378 | struct nfs4_ol_stateid, st_perstateowner); | 5106 | status = nfserr_bad_seqid; |
4379 | return nfs_ok; | 5107 | if (!cstate->minorversion && |
5108 | lock->lk_new_lock_seqid != lo->lo_owner.so_seqid) | ||
5109 | goto out; | ||
4380 | } | 5110 | } |
4381 | strhashval = ownerstr_hashval(cl->cl_clientid.cl_id, | 5111 | |
4382 | &lock->v.new.owner); | 5112 | *lst = find_or_create_lock_stateid(lo, fi, inode, ost, new); |
4383 | lo = alloc_init_lock_stateowner(strhashval, cl, ost, lock); | ||
4384 | if (lo == NULL) | ||
4385 | return nfserr_jukebox; | ||
4386 | *lst = alloc_init_lock_stateid(lo, fi, ost); | ||
4387 | if (*lst == NULL) { | 5113 | if (*lst == NULL) { |
4388 | release_lockowner(lo); | 5114 | status = nfserr_jukebox; |
4389 | return nfserr_jukebox; | 5115 | goto out; |
4390 | } | 5116 | } |
4391 | *new = true; | 5117 | status = nfs_ok; |
4392 | return nfs_ok; | 5118 | out: |
5119 | nfs4_put_stateowner(&lo->lo_owner); | ||
5120 | return status; | ||
4393 | } | 5121 | } |
4394 | 5122 | ||
4395 | /* | 5123 | /* |
@@ -4401,14 +5129,16 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, | |||
4401 | { | 5129 | { |
4402 | struct nfs4_openowner *open_sop = NULL; | 5130 | struct nfs4_openowner *open_sop = NULL; |
4403 | struct nfs4_lockowner *lock_sop = NULL; | 5131 | struct nfs4_lockowner *lock_sop = NULL; |
4404 | struct nfs4_ol_stateid *lock_stp; | 5132 | struct nfs4_ol_stateid *lock_stp = NULL; |
5133 | struct nfs4_ol_stateid *open_stp = NULL; | ||
5134 | struct nfs4_file *fp; | ||
4405 | struct file *filp = NULL; | 5135 | struct file *filp = NULL; |
4406 | struct file_lock *file_lock = NULL; | 5136 | struct file_lock *file_lock = NULL; |
4407 | struct file_lock *conflock = NULL; | 5137 | struct file_lock *conflock = NULL; |
4408 | __be32 status = 0; | 5138 | __be32 status = 0; |
4409 | bool new_state = false; | ||
4410 | int lkflg; | 5139 | int lkflg; |
4411 | int err; | 5140 | int err; |
5141 | bool new = false; | ||
4412 | struct net *net = SVC_NET(rqstp); | 5142 | struct net *net = SVC_NET(rqstp); |
4413 | struct nfsd_net *nn = net_generic(net, nfsd_net_id); | 5143 | struct nfsd_net *nn = net_generic(net, nfsd_net_id); |
4414 | 5144 | ||
@@ -4425,11 +5155,7 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, | |||
4425 | return status; | 5155 | return status; |
4426 | } | 5156 | } |
4427 | 5157 | ||
4428 | nfs4_lock_state(); | ||
4429 | |||
4430 | if (lock->lk_is_new) { | 5158 | if (lock->lk_is_new) { |
4431 | struct nfs4_ol_stateid *open_stp = NULL; | ||
4432 | |||
4433 | if (nfsd4_has_session(cstate)) | 5159 | if (nfsd4_has_session(cstate)) |
4434 | /* See rfc 5661 18.10.3: given clientid is ignored: */ | 5160 | /* See rfc 5661 18.10.3: given clientid is ignored: */ |
4435 | memcpy(&lock->v.new.clientid, | 5161 | memcpy(&lock->v.new.clientid, |
@@ -4453,12 +5179,13 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, | |||
4453 | &lock->v.new.clientid)) | 5179 | &lock->v.new.clientid)) |
4454 | goto out; | 5180 | goto out; |
4455 | status = lookup_or_create_lock_state(cstate, open_stp, lock, | 5181 | status = lookup_or_create_lock_state(cstate, open_stp, lock, |
4456 | &lock_stp, &new_state); | 5182 | &lock_stp, &new); |
4457 | } else | 5183 | } else { |
4458 | status = nfs4_preprocess_seqid_op(cstate, | 5184 | status = nfs4_preprocess_seqid_op(cstate, |
4459 | lock->lk_old_lock_seqid, | 5185 | lock->lk_old_lock_seqid, |
4460 | &lock->lk_old_lock_stateid, | 5186 | &lock->lk_old_lock_stateid, |
4461 | NFS4_LOCK_STID, &lock_stp, nn); | 5187 | NFS4_LOCK_STID, &lock_stp, nn); |
5188 | } | ||
4462 | if (status) | 5189 | if (status) |
4463 | goto out; | 5190 | goto out; |
4464 | lock_sop = lockowner(lock_stp->st_stateowner); | 5191 | lock_sop = lockowner(lock_stp->st_stateowner); |
@@ -4482,20 +5209,25 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, | |||
4482 | goto out; | 5209 | goto out; |
4483 | } | 5210 | } |
4484 | 5211 | ||
5212 | fp = lock_stp->st_stid.sc_file; | ||
4485 | locks_init_lock(file_lock); | 5213 | locks_init_lock(file_lock); |
4486 | switch (lock->lk_type) { | 5214 | switch (lock->lk_type) { |
4487 | case NFS4_READ_LT: | 5215 | case NFS4_READ_LT: |
4488 | case NFS4_READW_LT: | 5216 | case NFS4_READW_LT: |
4489 | filp = find_readable_file(lock_stp->st_file); | 5217 | spin_lock(&fp->fi_lock); |
5218 | filp = find_readable_file_locked(fp); | ||
4490 | if (filp) | 5219 | if (filp) |
4491 | get_lock_access(lock_stp, NFS4_SHARE_ACCESS_READ); | 5220 | get_lock_access(lock_stp, NFS4_SHARE_ACCESS_READ); |
5221 | spin_unlock(&fp->fi_lock); | ||
4492 | file_lock->fl_type = F_RDLCK; | 5222 | file_lock->fl_type = F_RDLCK; |
4493 | break; | 5223 | break; |
4494 | case NFS4_WRITE_LT: | 5224 | case NFS4_WRITE_LT: |
4495 | case NFS4_WRITEW_LT: | 5225 | case NFS4_WRITEW_LT: |
4496 | filp = find_writeable_file(lock_stp->st_file); | 5226 | spin_lock(&fp->fi_lock); |
5227 | filp = find_writeable_file_locked(fp); | ||
4497 | if (filp) | 5228 | if (filp) |
4498 | get_lock_access(lock_stp, NFS4_SHARE_ACCESS_WRITE); | 5229 | get_lock_access(lock_stp, NFS4_SHARE_ACCESS_WRITE); |
5230 | spin_unlock(&fp->fi_lock); | ||
4499 | file_lock->fl_type = F_WRLCK; | 5231 | file_lock->fl_type = F_WRLCK; |
4500 | break; | 5232 | break; |
4501 | default: | 5233 | default: |
@@ -4544,11 +5276,27 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, | |||
4544 | break; | 5276 | break; |
4545 | } | 5277 | } |
4546 | out: | 5278 | out: |
4547 | if (status && new_state) | 5279 | if (filp) |
4548 | release_lockowner(lock_sop); | 5280 | fput(filp); |
5281 | if (lock_stp) { | ||
5282 | /* Bump seqid manually if the 4.0 replay owner is openowner */ | ||
5283 | if (cstate->replay_owner && | ||
5284 | cstate->replay_owner != &lock_sop->lo_owner && | ||
5285 | seqid_mutating_err(ntohl(status))) | ||
5286 | lock_sop->lo_owner.so_seqid++; | ||
5287 | |||
5288 | /* | ||
5289 | * If this is a new, never-before-used stateid, and we are | ||
5290 | * returning an error, then just go ahead and release it. | ||
5291 | */ | ||
5292 | if (status && new) | ||
5293 | release_lock_stateid(lock_stp); | ||
5294 | |||
5295 | nfs4_put_stid(&lock_stp->st_stid); | ||
5296 | } | ||
5297 | if (open_stp) | ||
5298 | nfs4_put_stid(&open_stp->st_stid); | ||
4549 | nfsd4_bump_seqid(cstate, status); | 5299 | nfsd4_bump_seqid(cstate, status); |
4550 | if (!cstate->replay_owner) | ||
4551 | nfs4_unlock_state(); | ||
4552 | if (file_lock) | 5300 | if (file_lock) |
4553 | locks_free_lock(file_lock); | 5301 | locks_free_lock(file_lock); |
4554 | if (conflock) | 5302 | if (conflock) |
@@ -4580,9 +5328,8 @@ __be32 | |||
4580 | nfsd4_lockt(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, | 5328 | nfsd4_lockt(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, |
4581 | struct nfsd4_lockt *lockt) | 5329 | struct nfsd4_lockt *lockt) |
4582 | { | 5330 | { |
4583 | struct inode *inode; | ||
4584 | struct file_lock *file_lock = NULL; | 5331 | struct file_lock *file_lock = NULL; |
4585 | struct nfs4_lockowner *lo; | 5332 | struct nfs4_lockowner *lo = NULL; |
4586 | __be32 status; | 5333 | __be32 status; |
4587 | struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); | 5334 | struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); |
4588 | 5335 | ||
@@ -4592,10 +5339,8 @@ nfsd4_lockt(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, | |||
4592 | if (check_lock_length(lockt->lt_offset, lockt->lt_length)) | 5339 | if (check_lock_length(lockt->lt_offset, lockt->lt_length)) |
4593 | return nfserr_inval; | 5340 | return nfserr_inval; |
4594 | 5341 | ||
4595 | nfs4_lock_state(); | ||
4596 | |||
4597 | if (!nfsd4_has_session(cstate)) { | 5342 | if (!nfsd4_has_session(cstate)) { |
4598 | status = lookup_clientid(&lockt->lt_clientid, false, nn, NULL); | 5343 | status = lookup_clientid(&lockt->lt_clientid, cstate, nn); |
4599 | if (status) | 5344 | if (status) |
4600 | goto out; | 5345 | goto out; |
4601 | } | 5346 | } |
@@ -4603,7 +5348,6 @@ nfsd4_lockt(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, | |||
4603 | if ((status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0))) | 5348 | if ((status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0))) |
4604 | goto out; | 5349 | goto out; |
4605 | 5350 | ||
4606 | inode = cstate->current_fh.fh_dentry->d_inode; | ||
4607 | file_lock = locks_alloc_lock(); | 5351 | file_lock = locks_alloc_lock(); |
4608 | if (!file_lock) { | 5352 | if (!file_lock) { |
4609 | dprintk("NFSD: %s: unable to allocate lock!\n", __func__); | 5353 | dprintk("NFSD: %s: unable to allocate lock!\n", __func__); |
@@ -4626,7 +5370,8 @@ nfsd4_lockt(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, | |||
4626 | goto out; | 5370 | goto out; |
4627 | } | 5371 | } |
4628 | 5372 | ||
4629 | lo = find_lockowner_str(inode, &lockt->lt_clientid, &lockt->lt_owner, nn); | 5373 | lo = find_lockowner_str(&lockt->lt_clientid, &lockt->lt_owner, |
5374 | cstate->clp); | ||
4630 | if (lo) | 5375 | if (lo) |
4631 | file_lock->fl_owner = (fl_owner_t)lo; | 5376 | file_lock->fl_owner = (fl_owner_t)lo; |
4632 | file_lock->fl_pid = current->tgid; | 5377 | file_lock->fl_pid = current->tgid; |
@@ -4646,7 +5391,8 @@ nfsd4_lockt(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, | |||
4646 | nfs4_set_lock_denied(file_lock, &lockt->lt_denied); | 5391 | nfs4_set_lock_denied(file_lock, &lockt->lt_denied); |
4647 | } | 5392 | } |
4648 | out: | 5393 | out: |
4649 | nfs4_unlock_state(); | 5394 | if (lo) |
5395 | nfs4_put_stateowner(&lo->lo_owner); | ||
4650 | if (file_lock) | 5396 | if (file_lock) |
4651 | locks_free_lock(file_lock); | 5397 | locks_free_lock(file_lock); |
4652 | return status; | 5398 | return status; |
@@ -4670,23 +5416,21 @@ nfsd4_locku(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, | |||
4670 | if (check_lock_length(locku->lu_offset, locku->lu_length)) | 5416 | if (check_lock_length(locku->lu_offset, locku->lu_length)) |
4671 | return nfserr_inval; | 5417 | return nfserr_inval; |
4672 | 5418 | ||
4673 | nfs4_lock_state(); | ||
4674 | |||
4675 | status = nfs4_preprocess_seqid_op(cstate, locku->lu_seqid, | 5419 | status = nfs4_preprocess_seqid_op(cstate, locku->lu_seqid, |
4676 | &locku->lu_stateid, NFS4_LOCK_STID, | 5420 | &locku->lu_stateid, NFS4_LOCK_STID, |
4677 | &stp, nn); | 5421 | &stp, nn); |
4678 | if (status) | 5422 | if (status) |
4679 | goto out; | 5423 | goto out; |
4680 | filp = find_any_file(stp->st_file); | 5424 | filp = find_any_file(stp->st_stid.sc_file); |
4681 | if (!filp) { | 5425 | if (!filp) { |
4682 | status = nfserr_lock_range; | 5426 | status = nfserr_lock_range; |
4683 | goto out; | 5427 | goto put_stateid; |
4684 | } | 5428 | } |
4685 | file_lock = locks_alloc_lock(); | 5429 | file_lock = locks_alloc_lock(); |
4686 | if (!file_lock) { | 5430 | if (!file_lock) { |
4687 | dprintk("NFSD: %s: unable to allocate lock!\n", __func__); | 5431 | dprintk("NFSD: %s: unable to allocate lock!\n", __func__); |
4688 | status = nfserr_jukebox; | 5432 | status = nfserr_jukebox; |
4689 | goto out; | 5433 | goto fput; |
4690 | } | 5434 | } |
4691 | locks_init_lock(file_lock); | 5435 | locks_init_lock(file_lock); |
4692 | file_lock->fl_type = F_UNLCK; | 5436 | file_lock->fl_type = F_UNLCK; |
@@ -4708,41 +5452,51 @@ nfsd4_locku(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, | |||
4708 | } | 5452 | } |
4709 | update_stateid(&stp->st_stid.sc_stateid); | 5453 | update_stateid(&stp->st_stid.sc_stateid); |
4710 | memcpy(&locku->lu_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t)); | 5454 | memcpy(&locku->lu_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t)); |
4711 | 5455 | fput: | |
5456 | fput(filp); | ||
5457 | put_stateid: | ||
5458 | nfs4_put_stid(&stp->st_stid); | ||
4712 | out: | 5459 | out: |
4713 | nfsd4_bump_seqid(cstate, status); | 5460 | nfsd4_bump_seqid(cstate, status); |
4714 | if (!cstate->replay_owner) | ||
4715 | nfs4_unlock_state(); | ||
4716 | if (file_lock) | 5461 | if (file_lock) |
4717 | locks_free_lock(file_lock); | 5462 | locks_free_lock(file_lock); |
4718 | return status; | 5463 | return status; |
4719 | 5464 | ||
4720 | out_nfserr: | 5465 | out_nfserr: |
4721 | status = nfserrno(err); | 5466 | status = nfserrno(err); |
4722 | goto out; | 5467 | goto fput; |
4723 | } | 5468 | } |
4724 | 5469 | ||
4725 | /* | 5470 | /* |
4726 | * returns | 5471 | * returns |
4727 | * 1: locks held by lockowner | 5472 | * true: locks held by lockowner |
4728 | * 0: no locks held by lockowner | 5473 | * false: no locks held by lockowner |
4729 | */ | 5474 | */ |
4730 | static int | 5475 | static bool |
4731 | check_for_locks(struct nfs4_file *filp, struct nfs4_lockowner *lowner) | 5476 | check_for_locks(struct nfs4_file *fp, struct nfs4_lockowner *lowner) |
4732 | { | 5477 | { |
4733 | struct file_lock **flpp; | 5478 | struct file_lock **flpp; |
4734 | struct inode *inode = filp->fi_inode; | 5479 | int status = false; |
4735 | int status = 0; | 5480 | struct file *filp = find_any_file(fp); |
5481 | struct inode *inode; | ||
5482 | |||
5483 | if (!filp) { | ||
5484 | /* Any valid lock stateid should have some sort of access */ | ||
5485 | WARN_ON_ONCE(1); | ||
5486 | return status; | ||
5487 | } | ||
5488 | |||
5489 | inode = file_inode(filp); | ||
4736 | 5490 | ||
4737 | spin_lock(&inode->i_lock); | 5491 | spin_lock(&inode->i_lock); |
4738 | for (flpp = &inode->i_flock; *flpp != NULL; flpp = &(*flpp)->fl_next) { | 5492 | for (flpp = &inode->i_flock; *flpp != NULL; flpp = &(*flpp)->fl_next) { |
4739 | if ((*flpp)->fl_owner == (fl_owner_t)lowner) { | 5493 | if ((*flpp)->fl_owner == (fl_owner_t)lowner) { |
4740 | status = 1; | 5494 | status = true; |
4741 | goto out; | 5495 | break; |
4742 | } | 5496 | } |
4743 | } | 5497 | } |
4744 | out: | ||
4745 | spin_unlock(&inode->i_lock); | 5498 | spin_unlock(&inode->i_lock); |
5499 | fput(filp); | ||
4746 | return status; | 5500 | return status; |
4747 | } | 5501 | } |
4748 | 5502 | ||
@@ -4753,53 +5507,46 @@ nfsd4_release_lockowner(struct svc_rqst *rqstp, | |||
4753 | { | 5507 | { |
4754 | clientid_t *clid = &rlockowner->rl_clientid; | 5508 | clientid_t *clid = &rlockowner->rl_clientid; |
4755 | struct nfs4_stateowner *sop; | 5509 | struct nfs4_stateowner *sop; |
4756 | struct nfs4_lockowner *lo; | 5510 | struct nfs4_lockowner *lo = NULL; |
4757 | struct nfs4_ol_stateid *stp; | 5511 | struct nfs4_ol_stateid *stp; |
4758 | struct xdr_netobj *owner = &rlockowner->rl_owner; | 5512 | struct xdr_netobj *owner = &rlockowner->rl_owner; |
4759 | struct list_head matches; | 5513 | unsigned int hashval = ownerstr_hashval(owner); |
4760 | unsigned int hashval = ownerstr_hashval(clid->cl_id, owner); | ||
4761 | __be32 status; | 5514 | __be32 status; |
4762 | struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); | 5515 | struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); |
5516 | struct nfs4_client *clp; | ||
4763 | 5517 | ||
4764 | dprintk("nfsd4_release_lockowner clientid: (%08x/%08x):\n", | 5518 | dprintk("nfsd4_release_lockowner clientid: (%08x/%08x):\n", |
4765 | clid->cl_boot, clid->cl_id); | 5519 | clid->cl_boot, clid->cl_id); |
4766 | 5520 | ||
4767 | nfs4_lock_state(); | 5521 | status = lookup_clientid(clid, cstate, nn); |
4768 | |||
4769 | status = lookup_clientid(clid, cstate->minorversion, nn, NULL); | ||
4770 | if (status) | 5522 | if (status) |
4771 | goto out; | 5523 | return status; |
4772 | 5524 | ||
4773 | status = nfserr_locks_held; | 5525 | clp = cstate->clp; |
4774 | INIT_LIST_HEAD(&matches); | 5526 | /* Find the matching lock stateowner */ |
5527 | spin_lock(&clp->cl_lock); | ||
5528 | list_for_each_entry(sop, &clp->cl_ownerstr_hashtbl[hashval], | ||
5529 | so_strhash) { | ||
4775 | 5530 | ||
4776 | list_for_each_entry(sop, &nn->ownerstr_hashtbl[hashval], so_strhash) { | 5531 | if (sop->so_is_open_owner || !same_owner_str(sop, owner)) |
4777 | if (sop->so_is_open_owner) | ||
4778 | continue; | 5532 | continue; |
4779 | if (!same_owner_str(sop, owner, clid)) | 5533 | |
4780 | continue; | 5534 | /* see if there are still any locks associated with it */ |
4781 | list_for_each_entry(stp, &sop->so_stateids, | 5535 | lo = lockowner(sop); |
4782 | st_perstateowner) { | 5536 | list_for_each_entry(stp, &sop->so_stateids, st_perstateowner) { |
4783 | lo = lockowner(sop); | 5537 | if (check_for_locks(stp->st_stid.sc_file, lo)) { |
4784 | if (check_for_locks(stp->st_file, lo)) | 5538 | status = nfserr_locks_held; |
4785 | goto out; | 5539 | spin_unlock(&clp->cl_lock); |
4786 | list_add(&lo->lo_list, &matches); | 5540 | return status; |
5541 | } | ||
4787 | } | 5542 | } |
5543 | |||
5544 | atomic_inc(&sop->so_count); | ||
5545 | break; | ||
4788 | } | 5546 | } |
4789 | /* Clients probably won't expect us to return with some (but not all) | 5547 | spin_unlock(&clp->cl_lock); |
4790 | * of the lockowner state released; so don't release any until all | 5548 | if (lo) |
4791 | * have been checked. */ | ||
4792 | status = nfs_ok; | ||
4793 | while (!list_empty(&matches)) { | ||
4794 | lo = list_entry(matches.next, struct nfs4_lockowner, | ||
4795 | lo_list); | ||
4796 | /* unhash_stateowner deletes so_perclient only | ||
4797 | * for openowners. */ | ||
4798 | list_del(&lo->lo_list); | ||
4799 | release_lockowner(lo); | 5549 | release_lockowner(lo); |
4800 | } | ||
4801 | out: | ||
4802 | nfs4_unlock_state(); | ||
4803 | return status; | 5550 | return status; |
4804 | } | 5551 | } |
4805 | 5552 | ||
@@ -4887,34 +5634,123 @@ nfsd4_find_reclaim_client(const char *recdir, struct nfsd_net *nn) | |||
4887 | * Called from OPEN. Look for clientid in reclaim list. | 5634 | * Called from OPEN. Look for clientid in reclaim list. |
4888 | */ | 5635 | */ |
4889 | __be32 | 5636 | __be32 |
4890 | nfs4_check_open_reclaim(clientid_t *clid, bool sessions, struct nfsd_net *nn) | 5637 | nfs4_check_open_reclaim(clientid_t *clid, |
5638 | struct nfsd4_compound_state *cstate, | ||
5639 | struct nfsd_net *nn) | ||
4891 | { | 5640 | { |
4892 | struct nfs4_client *clp; | 5641 | __be32 status; |
4893 | 5642 | ||
4894 | /* find clientid in conf_id_hashtbl */ | 5643 | /* find clientid in conf_id_hashtbl */ |
4895 | clp = find_confirmed_client(clid, sessions, nn); | 5644 | status = lookup_clientid(clid, cstate, nn); |
4896 | if (clp == NULL) | 5645 | if (status) |
4897 | return nfserr_reclaim_bad; | 5646 | return nfserr_reclaim_bad; |
4898 | 5647 | ||
4899 | return nfsd4_client_record_check(clp) ? nfserr_reclaim_bad : nfs_ok; | 5648 | if (nfsd4_client_record_check(cstate->clp)) |
5649 | return nfserr_reclaim_bad; | ||
5650 | |||
5651 | return nfs_ok; | ||
4900 | } | 5652 | } |
4901 | 5653 | ||
4902 | #ifdef CONFIG_NFSD_FAULT_INJECTION | 5654 | #ifdef CONFIG_NFSD_FAULT_INJECTION |
5655 | static inline void | ||
5656 | put_client(struct nfs4_client *clp) | ||
5657 | { | ||
5658 | atomic_dec(&clp->cl_refcount); | ||
5659 | } | ||
4903 | 5660 | ||
4904 | u64 nfsd_forget_client(struct nfs4_client *clp, u64 max) | 5661 | static struct nfs4_client * |
5662 | nfsd_find_client(struct sockaddr_storage *addr, size_t addr_size) | ||
4905 | { | 5663 | { |
4906 | if (mark_client_expired(clp)) | 5664 | struct nfs4_client *clp; |
4907 | return 0; | 5665 | struct nfsd_net *nn = net_generic(current->nsproxy->net_ns, |
4908 | expire_client(clp); | 5666 | nfsd_net_id); |
4909 | return 1; | 5667 | |
5668 | if (!nfsd_netns_ready(nn)) | ||
5669 | return NULL; | ||
5670 | |||
5671 | list_for_each_entry(clp, &nn->client_lru, cl_lru) { | ||
5672 | if (memcmp(&clp->cl_addr, addr, addr_size) == 0) | ||
5673 | return clp; | ||
5674 | } | ||
5675 | return NULL; | ||
4910 | } | 5676 | } |
4911 | 5677 | ||
4912 | u64 nfsd_print_client(struct nfs4_client *clp, u64 num) | 5678 | u64 |
5679 | nfsd_inject_print_clients(void) | ||
4913 | { | 5680 | { |
5681 | struct nfs4_client *clp; | ||
5682 | u64 count = 0; | ||
5683 | struct nfsd_net *nn = net_generic(current->nsproxy->net_ns, | ||
5684 | nfsd_net_id); | ||
4914 | char buf[INET6_ADDRSTRLEN]; | 5685 | char buf[INET6_ADDRSTRLEN]; |
4915 | rpc_ntop((struct sockaddr *)&clp->cl_addr, buf, sizeof(buf)); | 5686 | |
4916 | printk(KERN_INFO "NFS Client: %s\n", buf); | 5687 | if (!nfsd_netns_ready(nn)) |
4917 | return 1; | 5688 | return 0; |
5689 | |||
5690 | spin_lock(&nn->client_lock); | ||
5691 | list_for_each_entry(clp, &nn->client_lru, cl_lru) { | ||
5692 | rpc_ntop((struct sockaddr *)&clp->cl_addr, buf, sizeof(buf)); | ||
5693 | pr_info("NFS Client: %s\n", buf); | ||
5694 | ++count; | ||
5695 | } | ||
5696 | spin_unlock(&nn->client_lock); | ||
5697 | |||
5698 | return count; | ||
5699 | } | ||
5700 | |||
5701 | u64 | ||
5702 | nfsd_inject_forget_client(struct sockaddr_storage *addr, size_t addr_size) | ||
5703 | { | ||
5704 | u64 count = 0; | ||
5705 | struct nfs4_client *clp; | ||
5706 | struct nfsd_net *nn = net_generic(current->nsproxy->net_ns, | ||
5707 | nfsd_net_id); | ||
5708 | |||
5709 | if (!nfsd_netns_ready(nn)) | ||
5710 | return count; | ||
5711 | |||
5712 | spin_lock(&nn->client_lock); | ||
5713 | clp = nfsd_find_client(addr, addr_size); | ||
5714 | if (clp) { | ||
5715 | if (mark_client_expired_locked(clp) == nfs_ok) | ||
5716 | ++count; | ||
5717 | else | ||
5718 | clp = NULL; | ||
5719 | } | ||
5720 | spin_unlock(&nn->client_lock); | ||
5721 | |||
5722 | if (clp) | ||
5723 | expire_client(clp); | ||
5724 | |||
5725 | return count; | ||
5726 | } | ||
5727 | |||
5728 | u64 | ||
5729 | nfsd_inject_forget_clients(u64 max) | ||
5730 | { | ||
5731 | u64 count = 0; | ||
5732 | struct nfs4_client *clp, *next; | ||
5733 | struct nfsd_net *nn = net_generic(current->nsproxy->net_ns, | ||
5734 | nfsd_net_id); | ||
5735 | LIST_HEAD(reaplist); | ||
5736 | |||
5737 | if (!nfsd_netns_ready(nn)) | ||
5738 | return count; | ||
5739 | |||
5740 | spin_lock(&nn->client_lock); | ||
5741 | list_for_each_entry_safe(clp, next, &nn->client_lru, cl_lru) { | ||
5742 | if (mark_client_expired_locked(clp) == nfs_ok) { | ||
5743 | list_add(&clp->cl_lru, &reaplist); | ||
5744 | if (max != 0 && ++count >= max) | ||
5745 | break; | ||
5746 | } | ||
5747 | } | ||
5748 | spin_unlock(&nn->client_lock); | ||
5749 | |||
5750 | list_for_each_entry_safe(clp, next, &reaplist, cl_lru) | ||
5751 | expire_client(clp); | ||
5752 | |||
5753 | return count; | ||
4918 | } | 5754 | } |
4919 | 5755 | ||
4920 | static void nfsd_print_count(struct nfs4_client *clp, unsigned int count, | 5756 | static void nfsd_print_count(struct nfs4_client *clp, unsigned int count, |
@@ -4925,158 +5761,484 @@ static void nfsd_print_count(struct nfs4_client *clp, unsigned int count, | |||
4925 | printk(KERN_INFO "NFS Client: %s has %u %s\n", buf, count, type); | 5761 | printk(KERN_INFO "NFS Client: %s has %u %s\n", buf, count, type); |
4926 | } | 5762 | } |
4927 | 5763 | ||
4928 | static u64 nfsd_foreach_client_lock(struct nfs4_client *clp, u64 max, void (*func)(struct nfs4_lockowner *)) | 5764 | static void |
5765 | nfsd_inject_add_lock_to_list(struct nfs4_ol_stateid *lst, | ||
5766 | struct list_head *collect) | ||
5767 | { | ||
5768 | struct nfs4_client *clp = lst->st_stid.sc_client; | ||
5769 | struct nfsd_net *nn = net_generic(current->nsproxy->net_ns, | ||
5770 | nfsd_net_id); | ||
5771 | |||
5772 | if (!collect) | ||
5773 | return; | ||
5774 | |||
5775 | lockdep_assert_held(&nn->client_lock); | ||
5776 | atomic_inc(&clp->cl_refcount); | ||
5777 | list_add(&lst->st_locks, collect); | ||
5778 | } | ||
5779 | |||
5780 | static u64 nfsd_foreach_client_lock(struct nfs4_client *clp, u64 max, | ||
5781 | struct list_head *collect, | ||
5782 | void (*func)(struct nfs4_ol_stateid *)) | ||
4929 | { | 5783 | { |
4930 | struct nfs4_openowner *oop; | 5784 | struct nfs4_openowner *oop; |
4931 | struct nfs4_lockowner *lop, *lo_next; | ||
4932 | struct nfs4_ol_stateid *stp, *st_next; | 5785 | struct nfs4_ol_stateid *stp, *st_next; |
5786 | struct nfs4_ol_stateid *lst, *lst_next; | ||
4933 | u64 count = 0; | 5787 | u64 count = 0; |
4934 | 5788 | ||
5789 | spin_lock(&clp->cl_lock); | ||
4935 | list_for_each_entry(oop, &clp->cl_openowners, oo_perclient) { | 5790 | list_for_each_entry(oop, &clp->cl_openowners, oo_perclient) { |
4936 | list_for_each_entry_safe(stp, st_next, &oop->oo_owner.so_stateids, st_perstateowner) { | 5791 | list_for_each_entry_safe(stp, st_next, |
4937 | list_for_each_entry_safe(lop, lo_next, &stp->st_lockowners, lo_perstateid) { | 5792 | &oop->oo_owner.so_stateids, st_perstateowner) { |
4938 | if (func) | 5793 | list_for_each_entry_safe(lst, lst_next, |
4939 | func(lop); | 5794 | &stp->st_locks, st_locks) { |
4940 | if (++count == max) | 5795 | if (func) { |
4941 | return count; | 5796 | func(lst); |
5797 | nfsd_inject_add_lock_to_list(lst, | ||
5798 | collect); | ||
5799 | } | ||
5800 | ++count; | ||
5801 | /* | ||
5802 | * Despite the fact that these functions deal | ||
5803 | * with 64-bit integers for "count", we must | ||
5804 | * ensure that it doesn't blow up the | ||
5805 | * clp->cl_refcount. Throw a warning if we | ||
5806 | * start to approach INT_MAX here. | ||
5807 | */ | ||
5808 | WARN_ON_ONCE(count == (INT_MAX / 2)); | ||
5809 | if (count == max) | ||
5810 | goto out; | ||
4942 | } | 5811 | } |
4943 | } | 5812 | } |
4944 | } | 5813 | } |
5814 | out: | ||
5815 | spin_unlock(&clp->cl_lock); | ||
4945 | 5816 | ||
4946 | return count; | 5817 | return count; |
4947 | } | 5818 | } |
4948 | 5819 | ||
4949 | u64 nfsd_forget_client_locks(struct nfs4_client *clp, u64 max) | 5820 | static u64 |
5821 | nfsd_collect_client_locks(struct nfs4_client *clp, struct list_head *collect, | ||
5822 | u64 max) | ||
4950 | { | 5823 | { |
4951 | return nfsd_foreach_client_lock(clp, max, release_lockowner); | 5824 | return nfsd_foreach_client_lock(clp, max, collect, unhash_lock_stateid); |
4952 | } | 5825 | } |
4953 | 5826 | ||
4954 | u64 nfsd_print_client_locks(struct nfs4_client *clp, u64 max) | 5827 | static u64 |
5828 | nfsd_print_client_locks(struct nfs4_client *clp) | ||
4955 | { | 5829 | { |
4956 | u64 count = nfsd_foreach_client_lock(clp, max, NULL); | 5830 | u64 count = nfsd_foreach_client_lock(clp, 0, NULL, NULL); |
4957 | nfsd_print_count(clp, count, "locked files"); | 5831 | nfsd_print_count(clp, count, "locked files"); |
4958 | return count; | 5832 | return count; |
4959 | } | 5833 | } |
4960 | 5834 | ||
4961 | static u64 nfsd_foreach_client_open(struct nfs4_client *clp, u64 max, void (*func)(struct nfs4_openowner *)) | 5835 | u64 |
5836 | nfsd_inject_print_locks(void) | ||
5837 | { | ||
5838 | struct nfs4_client *clp; | ||
5839 | u64 count = 0; | ||
5840 | struct nfsd_net *nn = net_generic(current->nsproxy->net_ns, | ||
5841 | nfsd_net_id); | ||
5842 | |||
5843 | if (!nfsd_netns_ready(nn)) | ||
5844 | return 0; | ||
5845 | |||
5846 | spin_lock(&nn->client_lock); | ||
5847 | list_for_each_entry(clp, &nn->client_lru, cl_lru) | ||
5848 | count += nfsd_print_client_locks(clp); | ||
5849 | spin_unlock(&nn->client_lock); | ||
5850 | |||
5851 | return count; | ||
5852 | } | ||
5853 | |||
5854 | static void | ||
5855 | nfsd_reap_locks(struct list_head *reaplist) | ||
5856 | { | ||
5857 | struct nfs4_client *clp; | ||
5858 | struct nfs4_ol_stateid *stp, *next; | ||
5859 | |||
5860 | list_for_each_entry_safe(stp, next, reaplist, st_locks) { | ||
5861 | list_del_init(&stp->st_locks); | ||
5862 | clp = stp->st_stid.sc_client; | ||
5863 | nfs4_put_stid(&stp->st_stid); | ||
5864 | put_client(clp); | ||
5865 | } | ||
5866 | } | ||
5867 | |||
5868 | u64 | ||
5869 | nfsd_inject_forget_client_locks(struct sockaddr_storage *addr, size_t addr_size) | ||
5870 | { | ||
5871 | unsigned int count = 0; | ||
5872 | struct nfs4_client *clp; | ||
5873 | struct nfsd_net *nn = net_generic(current->nsproxy->net_ns, | ||
5874 | nfsd_net_id); | ||
5875 | LIST_HEAD(reaplist); | ||
5876 | |||
5877 | if (!nfsd_netns_ready(nn)) | ||
5878 | return count; | ||
5879 | |||
5880 | spin_lock(&nn->client_lock); | ||
5881 | clp = nfsd_find_client(addr, addr_size); | ||
5882 | if (clp) | ||
5883 | count = nfsd_collect_client_locks(clp, &reaplist, 0); | ||
5884 | spin_unlock(&nn->client_lock); | ||
5885 | nfsd_reap_locks(&reaplist); | ||
5886 | return count; | ||
5887 | } | ||
5888 | |||
5889 | u64 | ||
5890 | nfsd_inject_forget_locks(u64 max) | ||
5891 | { | ||
5892 | u64 count = 0; | ||
5893 | struct nfs4_client *clp; | ||
5894 | struct nfsd_net *nn = net_generic(current->nsproxy->net_ns, | ||
5895 | nfsd_net_id); | ||
5896 | LIST_HEAD(reaplist); | ||
5897 | |||
5898 | if (!nfsd_netns_ready(nn)) | ||
5899 | return count; | ||
5900 | |||
5901 | spin_lock(&nn->client_lock); | ||
5902 | list_for_each_entry(clp, &nn->client_lru, cl_lru) { | ||
5903 | count += nfsd_collect_client_locks(clp, &reaplist, max - count); | ||
5904 | if (max != 0 && count >= max) | ||
5905 | break; | ||
5906 | } | ||
5907 | spin_unlock(&nn->client_lock); | ||
5908 | nfsd_reap_locks(&reaplist); | ||
5909 | return count; | ||
5910 | } | ||
5911 | |||
5912 | static u64 | ||
5913 | nfsd_foreach_client_openowner(struct nfs4_client *clp, u64 max, | ||
5914 | struct list_head *collect, | ||
5915 | void (*func)(struct nfs4_openowner *)) | ||
4962 | { | 5916 | { |
4963 | struct nfs4_openowner *oop, *next; | 5917 | struct nfs4_openowner *oop, *next; |
5918 | struct nfsd_net *nn = net_generic(current->nsproxy->net_ns, | ||
5919 | nfsd_net_id); | ||
4964 | u64 count = 0; | 5920 | u64 count = 0; |
4965 | 5921 | ||
5922 | lockdep_assert_held(&nn->client_lock); | ||
5923 | |||
5924 | spin_lock(&clp->cl_lock); | ||
4966 | list_for_each_entry_safe(oop, next, &clp->cl_openowners, oo_perclient) { | 5925 | list_for_each_entry_safe(oop, next, &clp->cl_openowners, oo_perclient) { |
4967 | if (func) | 5926 | if (func) { |
4968 | func(oop); | 5927 | func(oop); |
4969 | if (++count == max) | 5928 | if (collect) { |
5929 | atomic_inc(&clp->cl_refcount); | ||
5930 | list_add(&oop->oo_perclient, collect); | ||
5931 | } | ||
5932 | } | ||
5933 | ++count; | ||
5934 | /* | ||
5935 | * Despite the fact that these functions deal with | ||
5936 | * 64-bit integers for "count", we must ensure that | ||
5937 | * it doesn't blow up the clp->cl_refcount. Throw a | ||
5938 | * warning if we start to approach INT_MAX here. | ||
5939 | */ | ||
5940 | WARN_ON_ONCE(count == (INT_MAX / 2)); | ||
5941 | if (count == max) | ||
4970 | break; | 5942 | break; |
4971 | } | 5943 | } |
5944 | spin_unlock(&clp->cl_lock); | ||
4972 | 5945 | ||
4973 | return count; | 5946 | return count; |
4974 | } | 5947 | } |
4975 | 5948 | ||
4976 | u64 nfsd_forget_client_openowners(struct nfs4_client *clp, u64 max) | 5949 | static u64 |
5950 | nfsd_print_client_openowners(struct nfs4_client *clp) | ||
4977 | { | 5951 | { |
4978 | return nfsd_foreach_client_open(clp, max, release_openowner); | 5952 | u64 count = nfsd_foreach_client_openowner(clp, 0, NULL, NULL); |
5953 | |||
5954 | nfsd_print_count(clp, count, "openowners"); | ||
5955 | return count; | ||
4979 | } | 5956 | } |
4980 | 5957 | ||
4981 | u64 nfsd_print_client_openowners(struct nfs4_client *clp, u64 max) | 5958 | static u64 |
5959 | nfsd_collect_client_openowners(struct nfs4_client *clp, | ||
5960 | struct list_head *collect, u64 max) | ||
4982 | { | 5961 | { |
4983 | u64 count = nfsd_foreach_client_open(clp, max, NULL); | 5962 | return nfsd_foreach_client_openowner(clp, max, collect, |
4984 | nfsd_print_count(clp, count, "open files"); | 5963 | unhash_openowner_locked); |
4985 | return count; | ||
4986 | } | 5964 | } |
4987 | 5965 | ||
4988 | static u64 nfsd_find_all_delegations(struct nfs4_client *clp, u64 max, | 5966 | u64 |
4989 | struct list_head *victims) | 5967 | nfsd_inject_print_openowners(void) |
4990 | { | 5968 | { |
4991 | struct nfs4_delegation *dp, *next; | 5969 | struct nfs4_client *clp; |
4992 | u64 count = 0; | 5970 | u64 count = 0; |
5971 | struct nfsd_net *nn = net_generic(current->nsproxy->net_ns, | ||
5972 | nfsd_net_id); | ||
5973 | |||
5974 | if (!nfsd_netns_ready(nn)) | ||
5975 | return 0; | ||
5976 | |||
5977 | spin_lock(&nn->client_lock); | ||
5978 | list_for_each_entry(clp, &nn->client_lru, cl_lru) | ||
5979 | count += nfsd_print_client_openowners(clp); | ||
5980 | spin_unlock(&nn->client_lock); | ||
4993 | 5981 | ||
4994 | lockdep_assert_held(&state_lock); | ||
4995 | list_for_each_entry_safe(dp, next, &clp->cl_delegations, dl_perclnt) { | ||
4996 | if (victims) | ||
4997 | list_move(&dp->dl_recall_lru, victims); | ||
4998 | if (++count == max) | ||
4999 | break; | ||
5000 | } | ||
5001 | return count; | 5982 | return count; |
5002 | } | 5983 | } |
5003 | 5984 | ||
5004 | u64 nfsd_forget_client_delegations(struct nfs4_client *clp, u64 max) | 5985 | static void |
5986 | nfsd_reap_openowners(struct list_head *reaplist) | ||
5005 | { | 5987 | { |
5006 | struct nfs4_delegation *dp, *next; | 5988 | struct nfs4_client *clp; |
5007 | LIST_HEAD(victims); | 5989 | struct nfs4_openowner *oop, *next; |
5008 | u64 count; | ||
5009 | 5990 | ||
5010 | spin_lock(&state_lock); | 5991 | list_for_each_entry_safe(oop, next, reaplist, oo_perclient) { |
5011 | count = nfsd_find_all_delegations(clp, max, &victims); | 5992 | list_del_init(&oop->oo_perclient); |
5012 | spin_unlock(&state_lock); | 5993 | clp = oop->oo_owner.so_client; |
5994 | release_openowner(oop); | ||
5995 | put_client(clp); | ||
5996 | } | ||
5997 | } | ||
5013 | 5998 | ||
5014 | list_for_each_entry_safe(dp, next, &victims, dl_recall_lru) | 5999 | u64 |
5015 | revoke_delegation(dp); | 6000 | nfsd_inject_forget_client_openowners(struct sockaddr_storage *addr, |
6001 | size_t addr_size) | ||
6002 | { | ||
6003 | unsigned int count = 0; | ||
6004 | struct nfs4_client *clp; | ||
6005 | struct nfsd_net *nn = net_generic(current->nsproxy->net_ns, | ||
6006 | nfsd_net_id); | ||
6007 | LIST_HEAD(reaplist); | ||
5016 | 6008 | ||
6009 | if (!nfsd_netns_ready(nn)) | ||
6010 | return count; | ||
6011 | |||
6012 | spin_lock(&nn->client_lock); | ||
6013 | clp = nfsd_find_client(addr, addr_size); | ||
6014 | if (clp) | ||
6015 | count = nfsd_collect_client_openowners(clp, &reaplist, 0); | ||
6016 | spin_unlock(&nn->client_lock); | ||
6017 | nfsd_reap_openowners(&reaplist); | ||
5017 | return count; | 6018 | return count; |
5018 | } | 6019 | } |
5019 | 6020 | ||
5020 | u64 nfsd_recall_client_delegations(struct nfs4_client *clp, u64 max) | 6021 | u64 |
6022 | nfsd_inject_forget_openowners(u64 max) | ||
5021 | { | 6023 | { |
5022 | struct nfs4_delegation *dp, *next; | 6024 | u64 count = 0; |
5023 | LIST_HEAD(victims); | 6025 | struct nfs4_client *clp; |
5024 | u64 count; | 6026 | struct nfsd_net *nn = net_generic(current->nsproxy->net_ns, |
6027 | nfsd_net_id); | ||
6028 | LIST_HEAD(reaplist); | ||
5025 | 6029 | ||
5026 | spin_lock(&state_lock); | 6030 | if (!nfsd_netns_ready(nn)) |
5027 | count = nfsd_find_all_delegations(clp, max, &victims); | 6031 | return count; |
5028 | list_for_each_entry_safe(dp, next, &victims, dl_recall_lru) | ||
5029 | nfsd_break_one_deleg(dp); | ||
5030 | spin_unlock(&state_lock); | ||
5031 | 6032 | ||
6033 | spin_lock(&nn->client_lock); | ||
6034 | list_for_each_entry(clp, &nn->client_lru, cl_lru) { | ||
6035 | count += nfsd_collect_client_openowners(clp, &reaplist, | ||
6036 | max - count); | ||
6037 | if (max != 0 && count >= max) | ||
6038 | break; | ||
6039 | } | ||
6040 | spin_unlock(&nn->client_lock); | ||
6041 | nfsd_reap_openowners(&reaplist); | ||
5032 | return count; | 6042 | return count; |
5033 | } | 6043 | } |
5034 | 6044 | ||
5035 | u64 nfsd_print_client_delegations(struct nfs4_client *clp, u64 max) | 6045 | static u64 nfsd_find_all_delegations(struct nfs4_client *clp, u64 max, |
6046 | struct list_head *victims) | ||
5036 | { | 6047 | { |
6048 | struct nfs4_delegation *dp, *next; | ||
6049 | struct nfsd_net *nn = net_generic(current->nsproxy->net_ns, | ||
6050 | nfsd_net_id); | ||
5037 | u64 count = 0; | 6051 | u64 count = 0; |
5038 | 6052 | ||
6053 | lockdep_assert_held(&nn->client_lock); | ||
6054 | |||
5039 | spin_lock(&state_lock); | 6055 | spin_lock(&state_lock); |
5040 | count = nfsd_find_all_delegations(clp, max, NULL); | 6056 | list_for_each_entry_safe(dp, next, &clp->cl_delegations, dl_perclnt) { |
6057 | if (victims) { | ||
6058 | /* | ||
6059 | * It's not safe to mess with delegations that have a | ||
6060 | * non-zero dl_time. They might have already been broken | ||
6061 | * and could be processed by the laundromat outside of | ||
6062 | * the state_lock. Just leave them be. | ||
6063 | */ | ||
6064 | if (dp->dl_time != 0) | ||
6065 | continue; | ||
6066 | |||
6067 | atomic_inc(&clp->cl_refcount); | ||
6068 | unhash_delegation_locked(dp); | ||
6069 | list_add(&dp->dl_recall_lru, victims); | ||
6070 | } | ||
6071 | ++count; | ||
6072 | /* | ||
6073 | * Despite the fact that these functions deal with | ||
6074 | * 64-bit integers for "count", we must ensure that | ||
6075 | * it doesn't blow up the clp->cl_refcount. Throw a | ||
6076 | * warning if we start to approach INT_MAX here. | ||
6077 | */ | ||
6078 | WARN_ON_ONCE(count == (INT_MAX / 2)); | ||
6079 | if (count == max) | ||
6080 | break; | ||
6081 | } | ||
5041 | spin_unlock(&state_lock); | 6082 | spin_unlock(&state_lock); |
6083 | return count; | ||
6084 | } | ||
6085 | |||
6086 | static u64 | ||
6087 | nfsd_print_client_delegations(struct nfs4_client *clp) | ||
6088 | { | ||
6089 | u64 count = nfsd_find_all_delegations(clp, 0, NULL); | ||
5042 | 6090 | ||
5043 | nfsd_print_count(clp, count, "delegations"); | 6091 | nfsd_print_count(clp, count, "delegations"); |
5044 | return count; | 6092 | return count; |
5045 | } | 6093 | } |
5046 | 6094 | ||
5047 | u64 nfsd_for_n_state(u64 max, u64 (*func)(struct nfs4_client *, u64)) | 6095 | u64 |
6096 | nfsd_inject_print_delegations(void) | ||
5048 | { | 6097 | { |
5049 | struct nfs4_client *clp, *next; | 6098 | struct nfs4_client *clp; |
5050 | u64 count = 0; | 6099 | u64 count = 0; |
5051 | struct nfsd_net *nn = net_generic(current->nsproxy->net_ns, nfsd_net_id); | 6100 | struct nfsd_net *nn = net_generic(current->nsproxy->net_ns, |
6101 | nfsd_net_id); | ||
5052 | 6102 | ||
5053 | if (!nfsd_netns_ready(nn)) | 6103 | if (!nfsd_netns_ready(nn)) |
5054 | return 0; | 6104 | return 0; |
5055 | 6105 | ||
5056 | list_for_each_entry_safe(clp, next, &nn->client_lru, cl_lru) { | 6106 | spin_lock(&nn->client_lock); |
5057 | count += func(clp, max - count); | 6107 | list_for_each_entry(clp, &nn->client_lru, cl_lru) |
5058 | if ((max != 0) && (count >= max)) | 6108 | count += nfsd_print_client_delegations(clp); |
5059 | break; | 6109 | spin_unlock(&nn->client_lock); |
6110 | |||
6111 | return count; | ||
6112 | } | ||
6113 | |||
6114 | static void | ||
6115 | nfsd_forget_delegations(struct list_head *reaplist) | ||
6116 | { | ||
6117 | struct nfs4_client *clp; | ||
6118 | struct nfs4_delegation *dp, *next; | ||
6119 | |||
6120 | list_for_each_entry_safe(dp, next, reaplist, dl_recall_lru) { | ||
6121 | list_del_init(&dp->dl_recall_lru); | ||
6122 | clp = dp->dl_stid.sc_client; | ||
6123 | revoke_delegation(dp); | ||
6124 | put_client(clp); | ||
5060 | } | 6125 | } |
6126 | } | ||
5061 | 6127 | ||
6128 | u64 | ||
6129 | nfsd_inject_forget_client_delegations(struct sockaddr_storage *addr, | ||
6130 | size_t addr_size) | ||
6131 | { | ||
6132 | u64 count = 0; | ||
6133 | struct nfs4_client *clp; | ||
6134 | struct nfsd_net *nn = net_generic(current->nsproxy->net_ns, | ||
6135 | nfsd_net_id); | ||
6136 | LIST_HEAD(reaplist); | ||
6137 | |||
6138 | if (!nfsd_netns_ready(nn)) | ||
6139 | return count; | ||
6140 | |||
6141 | spin_lock(&nn->client_lock); | ||
6142 | clp = nfsd_find_client(addr, addr_size); | ||
6143 | if (clp) | ||
6144 | count = nfsd_find_all_delegations(clp, 0, &reaplist); | ||
6145 | spin_unlock(&nn->client_lock); | ||
6146 | |||
6147 | nfsd_forget_delegations(&reaplist); | ||
5062 | return count; | 6148 | return count; |
5063 | } | 6149 | } |
5064 | 6150 | ||
5065 | struct nfs4_client *nfsd_find_client(struct sockaddr_storage *addr, size_t addr_size) | 6151 | u64 |
6152 | nfsd_inject_forget_delegations(u64 max) | ||
5066 | { | 6153 | { |
6154 | u64 count = 0; | ||
5067 | struct nfs4_client *clp; | 6155 | struct nfs4_client *clp; |
5068 | struct nfsd_net *nn = net_generic(current->nsproxy->net_ns, nfsd_net_id); | 6156 | struct nfsd_net *nn = net_generic(current->nsproxy->net_ns, |
6157 | nfsd_net_id); | ||
6158 | LIST_HEAD(reaplist); | ||
5069 | 6159 | ||
5070 | if (!nfsd_netns_ready(nn)) | 6160 | if (!nfsd_netns_ready(nn)) |
5071 | return NULL; | 6161 | return count; |
5072 | 6162 | ||
6163 | spin_lock(&nn->client_lock); | ||
5073 | list_for_each_entry(clp, &nn->client_lru, cl_lru) { | 6164 | list_for_each_entry(clp, &nn->client_lru, cl_lru) { |
5074 | if (memcmp(&clp->cl_addr, addr, addr_size) == 0) | 6165 | count += nfsd_find_all_delegations(clp, max - count, &reaplist); |
5075 | return clp; | 6166 | if (max != 0 && count >= max) |
6167 | break; | ||
5076 | } | 6168 | } |
5077 | return NULL; | 6169 | spin_unlock(&nn->client_lock); |
6170 | nfsd_forget_delegations(&reaplist); | ||
6171 | return count; | ||
5078 | } | 6172 | } |
5079 | 6173 | ||
6174 | static void | ||
6175 | nfsd_recall_delegations(struct list_head *reaplist) | ||
6176 | { | ||
6177 | struct nfs4_client *clp; | ||
6178 | struct nfs4_delegation *dp, *next; | ||
6179 | |||
6180 | list_for_each_entry_safe(dp, next, reaplist, dl_recall_lru) { | ||
6181 | list_del_init(&dp->dl_recall_lru); | ||
6182 | clp = dp->dl_stid.sc_client; | ||
6183 | /* | ||
6184 | * We skipped all entries that had a zero dl_time before, | ||
6185 | * so we can now reset the dl_time back to 0. If a delegation | ||
6186 | * break comes in now, then it won't make any difference since | ||
6187 | * we're recalling it either way. | ||
6188 | */ | ||
6189 | spin_lock(&state_lock); | ||
6190 | dp->dl_time = 0; | ||
6191 | spin_unlock(&state_lock); | ||
6192 | nfsd_break_one_deleg(dp); | ||
6193 | put_client(clp); | ||
6194 | } | ||
6195 | } | ||
6196 | |||
6197 | u64 | ||
6198 | nfsd_inject_recall_client_delegations(struct sockaddr_storage *addr, | ||
6199 | size_t addr_size) | ||
6200 | { | ||
6201 | u64 count = 0; | ||
6202 | struct nfs4_client *clp; | ||
6203 | struct nfsd_net *nn = net_generic(current->nsproxy->net_ns, | ||
6204 | nfsd_net_id); | ||
6205 | LIST_HEAD(reaplist); | ||
6206 | |||
6207 | if (!nfsd_netns_ready(nn)) | ||
6208 | return count; | ||
6209 | |||
6210 | spin_lock(&nn->client_lock); | ||
6211 | clp = nfsd_find_client(addr, addr_size); | ||
6212 | if (clp) | ||
6213 | count = nfsd_find_all_delegations(clp, 0, &reaplist); | ||
6214 | spin_unlock(&nn->client_lock); | ||
6215 | |||
6216 | nfsd_recall_delegations(&reaplist); | ||
6217 | return count; | ||
6218 | } | ||
6219 | |||
6220 | u64 | ||
6221 | nfsd_inject_recall_delegations(u64 max) | ||
6222 | { | ||
6223 | u64 count = 0; | ||
6224 | struct nfs4_client *clp, *next; | ||
6225 | struct nfsd_net *nn = net_generic(current->nsproxy->net_ns, | ||
6226 | nfsd_net_id); | ||
6227 | LIST_HEAD(reaplist); | ||
6228 | |||
6229 | if (!nfsd_netns_ready(nn)) | ||
6230 | return count; | ||
6231 | |||
6232 | spin_lock(&nn->client_lock); | ||
6233 | list_for_each_entry_safe(clp, next, &nn->client_lru, cl_lru) { | ||
6234 | count += nfsd_find_all_delegations(clp, max - count, &reaplist); | ||
6235 | if (max != 0 && ++count >= max) | ||
6236 | break; | ||
6237 | } | ||
6238 | spin_unlock(&nn->client_lock); | ||
6239 | nfsd_recall_delegations(&reaplist); | ||
6240 | return count; | ||
6241 | } | ||
5080 | #endif /* CONFIG_NFSD_FAULT_INJECTION */ | 6242 | #endif /* CONFIG_NFSD_FAULT_INJECTION */ |
5081 | 6243 | ||
5082 | /* | 6244 | /* |
@@ -5113,14 +6275,6 @@ static int nfs4_state_create_net(struct net *net) | |||
5113 | CLIENT_HASH_SIZE, GFP_KERNEL); | 6275 | CLIENT_HASH_SIZE, GFP_KERNEL); |
5114 | if (!nn->unconf_id_hashtbl) | 6276 | if (!nn->unconf_id_hashtbl) |
5115 | goto err_unconf_id; | 6277 | goto err_unconf_id; |
5116 | nn->ownerstr_hashtbl = kmalloc(sizeof(struct list_head) * | ||
5117 | OWNER_HASH_SIZE, GFP_KERNEL); | ||
5118 | if (!nn->ownerstr_hashtbl) | ||
5119 | goto err_ownerstr; | ||
5120 | nn->lockowner_ino_hashtbl = kmalloc(sizeof(struct list_head) * | ||
5121 | LOCKOWNER_INO_HASH_SIZE, GFP_KERNEL); | ||
5122 | if (!nn->lockowner_ino_hashtbl) | ||
5123 | goto err_lockowner_ino; | ||
5124 | nn->sessionid_hashtbl = kmalloc(sizeof(struct list_head) * | 6278 | nn->sessionid_hashtbl = kmalloc(sizeof(struct list_head) * |
5125 | SESSION_HASH_SIZE, GFP_KERNEL); | 6279 | SESSION_HASH_SIZE, GFP_KERNEL); |
5126 | if (!nn->sessionid_hashtbl) | 6280 | if (!nn->sessionid_hashtbl) |
@@ -5130,10 +6284,6 @@ static int nfs4_state_create_net(struct net *net) | |||
5130 | INIT_LIST_HEAD(&nn->conf_id_hashtbl[i]); | 6284 | INIT_LIST_HEAD(&nn->conf_id_hashtbl[i]); |
5131 | INIT_LIST_HEAD(&nn->unconf_id_hashtbl[i]); | 6285 | INIT_LIST_HEAD(&nn->unconf_id_hashtbl[i]); |
5132 | } | 6286 | } |
5133 | for (i = 0; i < OWNER_HASH_SIZE; i++) | ||
5134 | INIT_LIST_HEAD(&nn->ownerstr_hashtbl[i]); | ||
5135 | for (i = 0; i < LOCKOWNER_INO_HASH_SIZE; i++) | ||
5136 | INIT_LIST_HEAD(&nn->lockowner_ino_hashtbl[i]); | ||
5137 | for (i = 0; i < SESSION_HASH_SIZE; i++) | 6287 | for (i = 0; i < SESSION_HASH_SIZE; i++) |
5138 | INIT_LIST_HEAD(&nn->sessionid_hashtbl[i]); | 6288 | INIT_LIST_HEAD(&nn->sessionid_hashtbl[i]); |
5139 | nn->conf_name_tree = RB_ROOT; | 6289 | nn->conf_name_tree = RB_ROOT; |
@@ -5149,10 +6299,6 @@ static int nfs4_state_create_net(struct net *net) | |||
5149 | return 0; | 6299 | return 0; |
5150 | 6300 | ||
5151 | err_sessionid: | 6301 | err_sessionid: |
5152 | kfree(nn->lockowner_ino_hashtbl); | ||
5153 | err_lockowner_ino: | ||
5154 | kfree(nn->ownerstr_hashtbl); | ||
5155 | err_ownerstr: | ||
5156 | kfree(nn->unconf_id_hashtbl); | 6302 | kfree(nn->unconf_id_hashtbl); |
5157 | err_unconf_id: | 6303 | err_unconf_id: |
5158 | kfree(nn->conf_id_hashtbl); | 6304 | kfree(nn->conf_id_hashtbl); |
@@ -5182,8 +6328,6 @@ nfs4_state_destroy_net(struct net *net) | |||
5182 | } | 6328 | } |
5183 | 6329 | ||
5184 | kfree(nn->sessionid_hashtbl); | 6330 | kfree(nn->sessionid_hashtbl); |
5185 | kfree(nn->lockowner_ino_hashtbl); | ||
5186 | kfree(nn->ownerstr_hashtbl); | ||
5187 | kfree(nn->unconf_id_hashtbl); | 6331 | kfree(nn->unconf_id_hashtbl); |
5188 | kfree(nn->conf_id_hashtbl); | 6332 | kfree(nn->conf_id_hashtbl); |
5189 | put_net(net); | 6333 | put_net(net); |
@@ -5247,22 +6391,22 @@ nfs4_state_shutdown_net(struct net *net) | |||
5247 | cancel_delayed_work_sync(&nn->laundromat_work); | 6391 | cancel_delayed_work_sync(&nn->laundromat_work); |
5248 | locks_end_grace(&nn->nfsd4_manager); | 6392 | locks_end_grace(&nn->nfsd4_manager); |
5249 | 6393 | ||
5250 | nfs4_lock_state(); | ||
5251 | INIT_LIST_HEAD(&reaplist); | 6394 | INIT_LIST_HEAD(&reaplist); |
5252 | spin_lock(&state_lock); | 6395 | spin_lock(&state_lock); |
5253 | list_for_each_safe(pos, next, &nn->del_recall_lru) { | 6396 | list_for_each_safe(pos, next, &nn->del_recall_lru) { |
5254 | dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru); | 6397 | dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru); |
5255 | list_move(&dp->dl_recall_lru, &reaplist); | 6398 | unhash_delegation_locked(dp); |
6399 | list_add(&dp->dl_recall_lru, &reaplist); | ||
5256 | } | 6400 | } |
5257 | spin_unlock(&state_lock); | 6401 | spin_unlock(&state_lock); |
5258 | list_for_each_safe(pos, next, &reaplist) { | 6402 | list_for_each_safe(pos, next, &reaplist) { |
5259 | dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru); | 6403 | dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru); |
5260 | destroy_delegation(dp); | 6404 | list_del_init(&dp->dl_recall_lru); |
6405 | nfs4_put_stid(&dp->dl_stid); | ||
5261 | } | 6406 | } |
5262 | 6407 | ||
5263 | nfsd4_client_tracking_exit(net); | 6408 | nfsd4_client_tracking_exit(net); |
5264 | nfs4_state_destroy_net(net); | 6409 | nfs4_state_destroy_net(net); |
5265 | nfs4_unlock_state(); | ||
5266 | } | 6410 | } |
5267 | 6411 | ||
5268 | void | 6412 | void |
diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c index 944275c8f56d..f9821ce6658a 100644 --- a/fs/nfsd/nfs4xdr.c +++ b/fs/nfsd/nfs4xdr.c | |||
@@ -181,28 +181,43 @@ static int zero_clientid(clientid_t *clid) | |||
181 | } | 181 | } |
182 | 182 | ||
183 | /** | 183 | /** |
184 | * defer_free - mark an allocation as deferred freed | 184 | * svcxdr_tmpalloc - allocate memory to be freed after compound processing |
185 | * @argp: NFSv4 compound argument structure to be freed with | 185 | * @argp: NFSv4 compound argument structure |
186 | * @release: release callback to free @p, typically kfree() | 186 | * @p: pointer to be freed (with kfree()) |
187 | * @p: pointer to be freed | ||
188 | * | 187 | * |
189 | * Marks @p to be freed when processing the compound operation | 188 | * Marks @p to be freed when processing the compound operation |
190 | * described in @argp finishes. | 189 | * described in @argp finishes. |
191 | */ | 190 | */ |
192 | static int | 191 | static void * |
193 | defer_free(struct nfsd4_compoundargs *argp, | 192 | svcxdr_tmpalloc(struct nfsd4_compoundargs *argp, u32 len) |
194 | void (*release)(const void *), void *p) | ||
195 | { | 193 | { |
196 | struct tmpbuf *tb; | 194 | struct svcxdr_tmpbuf *tb; |
197 | 195 | ||
198 | tb = kmalloc(sizeof(*tb), GFP_KERNEL); | 196 | tb = kmalloc(sizeof(*tb) + len, GFP_KERNEL); |
199 | if (!tb) | 197 | if (!tb) |
200 | return -ENOMEM; | 198 | return NULL; |
201 | tb->buf = p; | ||
202 | tb->release = release; | ||
203 | tb->next = argp->to_free; | 199 | tb->next = argp->to_free; |
204 | argp->to_free = tb; | 200 | argp->to_free = tb; |
205 | return 0; | 201 | return tb->buf; |
202 | } | ||
203 | |||
204 | /* | ||
205 | * For xdr strings that need to be passed to other kernel api's | ||
206 | * as null-terminated strings. | ||
207 | * | ||
208 | * Note null-terminating in place usually isn't safe since the | ||
209 | * buffer might end on a page boundary. | ||
210 | */ | ||
211 | static char * | ||
212 | svcxdr_dupstr(struct nfsd4_compoundargs *argp, void *buf, u32 len) | ||
213 | { | ||
214 | char *p = svcxdr_tmpalloc(argp, len + 1); | ||
215 | |||
216 | if (!p) | ||
217 | return NULL; | ||
218 | memcpy(p, buf, len); | ||
219 | p[len] = '\0'; | ||
220 | return p; | ||
206 | } | 221 | } |
207 | 222 | ||
208 | /** | 223 | /** |
@@ -217,19 +232,13 @@ defer_free(struct nfsd4_compoundargs *argp, | |||
217 | */ | 232 | */ |
218 | static char *savemem(struct nfsd4_compoundargs *argp, __be32 *p, int nbytes) | 233 | static char *savemem(struct nfsd4_compoundargs *argp, __be32 *p, int nbytes) |
219 | { | 234 | { |
220 | if (p == argp->tmp) { | 235 | void *ret; |
221 | p = kmemdup(argp->tmp, nbytes, GFP_KERNEL); | 236 | |
222 | if (!p) | 237 | ret = svcxdr_tmpalloc(argp, nbytes); |
223 | return NULL; | 238 | if (!ret) |
224 | } else { | ||
225 | BUG_ON(p != argp->tmpp); | ||
226 | argp->tmpp = NULL; | ||
227 | } | ||
228 | if (defer_free(argp, kfree, p)) { | ||
229 | kfree(p); | ||
230 | return NULL; | 239 | return NULL; |
231 | } else | 240 | memcpy(ret, p, nbytes); |
232 | return (char *)p; | 241 | return ret; |
233 | } | 242 | } |
234 | 243 | ||
235 | static __be32 | 244 | static __be32 |
@@ -292,12 +301,10 @@ nfsd4_decode_fattr(struct nfsd4_compoundargs *argp, u32 *bmval, | |||
292 | if (nace > NFS4_ACL_MAX) | 301 | if (nace > NFS4_ACL_MAX) |
293 | return nfserr_fbig; | 302 | return nfserr_fbig; |
294 | 303 | ||
295 | *acl = nfs4_acl_new(nace); | 304 | *acl = svcxdr_tmpalloc(argp, nfs4_acl_bytes(nace)); |
296 | if (*acl == NULL) | 305 | if (*acl == NULL) |
297 | return nfserr_jukebox; | 306 | return nfserr_jukebox; |
298 | 307 | ||
299 | defer_free(argp, kfree, *acl); | ||
300 | |||
301 | (*acl)->naces = nace; | 308 | (*acl)->naces = nace; |
302 | for (ace = (*acl)->aces; ace < (*acl)->aces + nace; ace++) { | 309 | for (ace = (*acl)->aces; ace < (*acl)->aces + nace; ace++) { |
303 | READ_BUF(16); len += 16; | 310 | READ_BUF(16); len += 16; |
@@ -418,12 +425,10 @@ nfsd4_decode_fattr(struct nfsd4_compoundargs *argp, u32 *bmval, | |||
418 | return nfserr_badlabel; | 425 | return nfserr_badlabel; |
419 | len += (XDR_QUADLEN(dummy32) << 2); | 426 | len += (XDR_QUADLEN(dummy32) << 2); |
420 | READMEM(buf, dummy32); | 427 | READMEM(buf, dummy32); |
421 | label->data = kzalloc(dummy32 + 1, GFP_KERNEL); | 428 | label->len = dummy32; |
429 | label->data = svcxdr_dupstr(argp, buf, dummy32); | ||
422 | if (!label->data) | 430 | if (!label->data) |
423 | return nfserr_jukebox; | 431 | return nfserr_jukebox; |
424 | label->len = dummy32; | ||
425 | defer_free(argp, kfree, label->data); | ||
426 | memcpy(label->data, buf, dummy32); | ||
427 | } | 432 | } |
428 | #endif | 433 | #endif |
429 | 434 | ||
@@ -598,20 +603,11 @@ nfsd4_decode_create(struct nfsd4_compoundargs *argp, struct nfsd4_create *create | |||
598 | switch (create->cr_type) { | 603 | switch (create->cr_type) { |
599 | case NF4LNK: | 604 | case NF4LNK: |
600 | READ_BUF(4); | 605 | READ_BUF(4); |
601 | create->cr_linklen = be32_to_cpup(p++); | 606 | create->cr_datalen = be32_to_cpup(p++); |
602 | READ_BUF(create->cr_linklen); | 607 | READ_BUF(create->cr_datalen); |
603 | /* | 608 | create->cr_data = svcxdr_dupstr(argp, p, create->cr_datalen); |
604 | * The VFS will want a null-terminated string, and | 609 | if (!create->cr_data) |
605 | * null-terminating in place isn't safe since this might | ||
606 | * end on a page boundary: | ||
607 | */ | ||
608 | create->cr_linkname = | ||
609 | kmalloc(create->cr_linklen + 1, GFP_KERNEL); | ||
610 | if (!create->cr_linkname) | ||
611 | return nfserr_jukebox; | 610 | return nfserr_jukebox; |
612 | memcpy(create->cr_linkname, p, create->cr_linklen); | ||
613 | create->cr_linkname[create->cr_linklen] = '\0'; | ||
614 | defer_free(argp, kfree, create->cr_linkname); | ||
615 | break; | 611 | break; |
616 | case NF4BLK: | 612 | case NF4BLK: |
617 | case NF4CHR: | 613 | case NF4CHR: |
@@ -1481,13 +1477,12 @@ nfsd4_decode_test_stateid(struct nfsd4_compoundargs *argp, struct nfsd4_test_sta | |||
1481 | INIT_LIST_HEAD(&test_stateid->ts_stateid_list); | 1477 | INIT_LIST_HEAD(&test_stateid->ts_stateid_list); |
1482 | 1478 | ||
1483 | for (i = 0; i < test_stateid->ts_num_ids; i++) { | 1479 | for (i = 0; i < test_stateid->ts_num_ids; i++) { |
1484 | stateid = kmalloc(sizeof(struct nfsd4_test_stateid_id), GFP_KERNEL); | 1480 | stateid = svcxdr_tmpalloc(argp, sizeof(*stateid)); |
1485 | if (!stateid) { | 1481 | if (!stateid) { |
1486 | status = nfserrno(-ENOMEM); | 1482 | status = nfserrno(-ENOMEM); |
1487 | goto out; | 1483 | goto out; |
1488 | } | 1484 | } |
1489 | 1485 | ||
1490 | defer_free(argp, kfree, stateid); | ||
1491 | INIT_LIST_HEAD(&stateid->ts_id_list); | 1486 | INIT_LIST_HEAD(&stateid->ts_id_list); |
1492 | list_add_tail(&stateid->ts_id_list, &test_stateid->ts_stateid_list); | 1487 | list_add_tail(&stateid->ts_id_list, &test_stateid->ts_stateid_list); |
1493 | 1488 | ||
@@ -1640,7 +1635,7 @@ nfsd4_decode_compound(struct nfsd4_compoundargs *argp) | |||
1640 | goto xdr_error; | 1635 | goto xdr_error; |
1641 | 1636 | ||
1642 | if (argp->opcnt > ARRAY_SIZE(argp->iops)) { | 1637 | if (argp->opcnt > ARRAY_SIZE(argp->iops)) { |
1643 | argp->ops = kmalloc(argp->opcnt * sizeof(*argp->ops), GFP_KERNEL); | 1638 | argp->ops = kzalloc(argp->opcnt * sizeof(*argp->ops), GFP_KERNEL); |
1644 | if (!argp->ops) { | 1639 | if (!argp->ops) { |
1645 | argp->ops = argp->iops; | 1640 | argp->ops = argp->iops; |
1646 | dprintk("nfsd: couldn't allocate room for COMPOUND\n"); | 1641 | dprintk("nfsd: couldn't allocate room for COMPOUND\n"); |
@@ -3077,11 +3072,8 @@ static __be32 nfsd4_encode_splice_read( | |||
3077 | __be32 nfserr; | 3072 | __be32 nfserr; |
3078 | __be32 *p = xdr->p - 2; | 3073 | __be32 *p = xdr->p - 2; |
3079 | 3074 | ||
3080 | /* | 3075 | /* Make sure there will be room for padding if needed */ |
3081 | * Don't inline pages unless we know there's room for eof, | 3076 | if (xdr->end - xdr->p < 1) |
3082 | * count, and possible padding: | ||
3083 | */ | ||
3084 | if (xdr->end - xdr->p < 3) | ||
3085 | return nfserr_resource; | 3077 | return nfserr_resource; |
3086 | 3078 | ||
3087 | nfserr = nfsd_splice_read(read->rd_rqstp, file, | 3079 | nfserr = nfsd_splice_read(read->rd_rqstp, file, |
@@ -3147,9 +3139,7 @@ static __be32 nfsd4_encode_readv(struct nfsd4_compoundres *resp, | |||
3147 | len = maxcount; | 3139 | len = maxcount; |
3148 | v = 0; | 3140 | v = 0; |
3149 | 3141 | ||
3150 | thislen = (void *)xdr->end - (void *)xdr->p; | 3142 | thislen = min_t(long, len, ((void *)xdr->end - (void *)xdr->p)); |
3151 | if (len < thislen) | ||
3152 | thislen = len; | ||
3153 | p = xdr_reserve_space(xdr, (thislen+3)&~3); | 3143 | p = xdr_reserve_space(xdr, (thislen+3)&~3); |
3154 | WARN_ON_ONCE(!p); | 3144 | WARN_ON_ONCE(!p); |
3155 | resp->rqstp->rq_vec[v].iov_base = p; | 3145 | resp->rqstp->rq_vec[v].iov_base = p; |
@@ -3216,10 +3206,8 @@ nfsd4_encode_read(struct nfsd4_compoundres *resp, __be32 nfserr, | |||
3216 | xdr_commit_encode(xdr); | 3206 | xdr_commit_encode(xdr); |
3217 | 3207 | ||
3218 | maxcount = svc_max_payload(resp->rqstp); | 3208 | maxcount = svc_max_payload(resp->rqstp); |
3219 | if (maxcount > xdr->buf->buflen - xdr->buf->len) | 3209 | maxcount = min_t(unsigned long, maxcount, (xdr->buf->buflen - xdr->buf->len)); |
3220 | maxcount = xdr->buf->buflen - xdr->buf->len; | 3210 | maxcount = min_t(unsigned long, maxcount, read->rd_length); |
3221 | if (maxcount > read->rd_length) | ||
3222 | maxcount = read->rd_length; | ||
3223 | 3211 | ||
3224 | if (!read->rd_filp) { | 3212 | if (!read->rd_filp) { |
3225 | err = nfsd_get_tmp_read_open(resp->rqstp, read->rd_fhp, | 3213 | err = nfsd_get_tmp_read_open(resp->rqstp, read->rd_fhp, |
@@ -3937,8 +3925,6 @@ status: | |||
3937 | * | 3925 | * |
3938 | * XDR note: do not encode rp->rp_buflen: the buffer contains the | 3926 | * XDR note: do not encode rp->rp_buflen: the buffer contains the |
3939 | * previously sent already encoded operation. | 3927 | * previously sent already encoded operation. |
3940 | * | ||
3941 | * called with nfs4_lock_state() held | ||
3942 | */ | 3928 | */ |
3943 | void | 3929 | void |
3944 | nfsd4_encode_replay(struct xdr_stream *xdr, struct nfsd4_op *op) | 3930 | nfsd4_encode_replay(struct xdr_stream *xdr, struct nfsd4_op *op) |
@@ -3977,9 +3963,8 @@ int nfsd4_release_compoundargs(void *rq, __be32 *p, void *resp) | |||
3977 | kfree(args->tmpp); | 3963 | kfree(args->tmpp); |
3978 | args->tmpp = NULL; | 3964 | args->tmpp = NULL; |
3979 | while (args->to_free) { | 3965 | while (args->to_free) { |
3980 | struct tmpbuf *tb = args->to_free; | 3966 | struct svcxdr_tmpbuf *tb = args->to_free; |
3981 | args->to_free = tb->next; | 3967 | args->to_free = tb->next; |
3982 | tb->release(tb->buf); | ||
3983 | kfree(tb); | 3968 | kfree(tb); |
3984 | } | 3969 | } |
3985 | return 1; | 3970 | return 1; |
@@ -4012,7 +3997,6 @@ nfs4svc_encode_compoundres(struct svc_rqst *rqstp, __be32 *p, struct nfsd4_compo | |||
4012 | /* | 3997 | /* |
4013 | * All that remains is to write the tag and operation count... | 3998 | * All that remains is to write the tag and operation count... |
4014 | */ | 3999 | */ |
4015 | struct nfsd4_compound_state *cs = &resp->cstate; | ||
4016 | struct xdr_buf *buf = resp->xdr.buf; | 4000 | struct xdr_buf *buf = resp->xdr.buf; |
4017 | 4001 | ||
4018 | WARN_ON_ONCE(buf->len != buf->head[0].iov_len + buf->page_len + | 4002 | WARN_ON_ONCE(buf->len != buf->head[0].iov_len + buf->page_len + |
@@ -4026,19 +4010,7 @@ nfs4svc_encode_compoundres(struct svc_rqst *rqstp, __be32 *p, struct nfsd4_compo | |||
4026 | p += XDR_QUADLEN(resp->taglen); | 4010 | p += XDR_QUADLEN(resp->taglen); |
4027 | *p++ = htonl(resp->opcnt); | 4011 | *p++ = htonl(resp->opcnt); |
4028 | 4012 | ||
4029 | if (nfsd4_has_session(cs)) { | 4013 | nfsd4_sequence_done(resp); |
4030 | struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); | ||
4031 | struct nfs4_client *clp = cs->session->se_client; | ||
4032 | if (cs->status != nfserr_replay_cache) { | ||
4033 | nfsd4_store_cache_entry(resp); | ||
4034 | cs->slot->sl_flags &= ~NFSD4_SLOT_INUSE; | ||
4035 | } | ||
4036 | /* Renew the clientid on success and on replay */ | ||
4037 | spin_lock(&nn->client_lock); | ||
4038 | nfsd4_put_session(cs->session); | ||
4039 | spin_unlock(&nn->client_lock); | ||
4040 | put_client_renew(clp); | ||
4041 | } | ||
4042 | return 1; | 4014 | return 1; |
4043 | } | 4015 | } |
4044 | 4016 | ||
diff --git a/fs/nfsd/nfscache.c b/fs/nfsd/nfscache.c index 6040da8830ff..ff9567633245 100644 --- a/fs/nfsd/nfscache.c +++ b/fs/nfsd/nfscache.c | |||
@@ -221,7 +221,12 @@ static void | |||
221 | hash_refile(struct svc_cacherep *rp) | 221 | hash_refile(struct svc_cacherep *rp) |
222 | { | 222 | { |
223 | hlist_del_init(&rp->c_hash); | 223 | hlist_del_init(&rp->c_hash); |
224 | hlist_add_head(&rp->c_hash, cache_hash + hash_32(rp->c_xid, maskbits)); | 224 | /* |
225 | * No point in byte swapping c_xid since we're just using it to pick | ||
226 | * a hash bucket. | ||
227 | */ | ||
228 | hlist_add_head(&rp->c_hash, cache_hash + | ||
229 | hash_32((__force u32)rp->c_xid, maskbits)); | ||
225 | } | 230 | } |
226 | 231 | ||
227 | /* | 232 | /* |
@@ -356,7 +361,11 @@ nfsd_cache_search(struct svc_rqst *rqstp, __wsum csum) | |||
356 | struct hlist_head *rh; | 361 | struct hlist_head *rh; |
357 | unsigned int entries = 0; | 362 | unsigned int entries = 0; |
358 | 363 | ||
359 | rh = &cache_hash[hash_32(rqstp->rq_xid, maskbits)]; | 364 | /* |
365 | * No point in byte swapping rq_xid since we're just using it to pick | ||
366 | * a hash bucket. | ||
367 | */ | ||
368 | rh = &cache_hash[hash_32((__force u32)rqstp->rq_xid, maskbits)]; | ||
360 | hlist_for_each_entry(rp, rh, c_hash) { | 369 | hlist_for_each_entry(rp, rh, c_hash) { |
361 | ++entries; | 370 | ++entries; |
362 | if (nfsd_cache_match(rqstp, csum, rp)) { | 371 | if (nfsd_cache_match(rqstp, csum, rp)) { |
diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c index 51844048937f..4e042105fb6e 100644 --- a/fs/nfsd/nfsctl.c +++ b/fs/nfsd/nfsctl.c | |||
@@ -39,6 +39,7 @@ enum { | |||
39 | NFSD_Versions, | 39 | NFSD_Versions, |
40 | NFSD_Ports, | 40 | NFSD_Ports, |
41 | NFSD_MaxBlkSize, | 41 | NFSD_MaxBlkSize, |
42 | NFSD_MaxConnections, | ||
42 | NFSD_SupportedEnctypes, | 43 | NFSD_SupportedEnctypes, |
43 | /* | 44 | /* |
44 | * The below MUST come last. Otherwise we leave a hole in nfsd_files[] | 45 | * The below MUST come last. Otherwise we leave a hole in nfsd_files[] |
@@ -62,6 +63,7 @@ static ssize_t write_pool_threads(struct file *file, char *buf, size_t size); | |||
62 | static ssize_t write_versions(struct file *file, char *buf, size_t size); | 63 | static ssize_t write_versions(struct file *file, char *buf, size_t size); |
63 | static ssize_t write_ports(struct file *file, char *buf, size_t size); | 64 | static ssize_t write_ports(struct file *file, char *buf, size_t size); |
64 | static ssize_t write_maxblksize(struct file *file, char *buf, size_t size); | 65 | static ssize_t write_maxblksize(struct file *file, char *buf, size_t size); |
66 | static ssize_t write_maxconn(struct file *file, char *buf, size_t size); | ||
65 | #ifdef CONFIG_NFSD_V4 | 67 | #ifdef CONFIG_NFSD_V4 |
66 | static ssize_t write_leasetime(struct file *file, char *buf, size_t size); | 68 | static ssize_t write_leasetime(struct file *file, char *buf, size_t size); |
67 | static ssize_t write_gracetime(struct file *file, char *buf, size_t size); | 69 | static ssize_t write_gracetime(struct file *file, char *buf, size_t size); |
@@ -77,6 +79,7 @@ static ssize_t (*write_op[])(struct file *, char *, size_t) = { | |||
77 | [NFSD_Versions] = write_versions, | 79 | [NFSD_Versions] = write_versions, |
78 | [NFSD_Ports] = write_ports, | 80 | [NFSD_Ports] = write_ports, |
79 | [NFSD_MaxBlkSize] = write_maxblksize, | 81 | [NFSD_MaxBlkSize] = write_maxblksize, |
82 | [NFSD_MaxConnections] = write_maxconn, | ||
80 | #ifdef CONFIG_NFSD_V4 | 83 | #ifdef CONFIG_NFSD_V4 |
81 | [NFSD_Leasetime] = write_leasetime, | 84 | [NFSD_Leasetime] = write_leasetime, |
82 | [NFSD_Gracetime] = write_gracetime, | 85 | [NFSD_Gracetime] = write_gracetime, |
@@ -369,8 +372,7 @@ static ssize_t write_filehandle(struct file *file, char *buf, size_t size) | |||
369 | 372 | ||
370 | if (maxsize < NFS_FHSIZE) | 373 | if (maxsize < NFS_FHSIZE) |
371 | return -EINVAL; | 374 | return -EINVAL; |
372 | if (maxsize > NFS3_FHSIZE) | 375 | maxsize = min(maxsize, NFS3_FHSIZE); |
373 | maxsize = NFS3_FHSIZE; | ||
374 | 376 | ||
375 | if (qword_get(&mesg, mesg, size)>0) | 377 | if (qword_get(&mesg, mesg, size)>0) |
376 | return -EINVAL; | 378 | return -EINVAL; |
@@ -871,10 +873,8 @@ static ssize_t write_maxblksize(struct file *file, char *buf, size_t size) | |||
871 | /* force bsize into allowed range and | 873 | /* force bsize into allowed range and |
872 | * required alignment. | 874 | * required alignment. |
873 | */ | 875 | */ |
874 | if (bsize < 1024) | 876 | bsize = max_t(int, bsize, 1024); |
875 | bsize = 1024; | 877 | bsize = min_t(int, bsize, NFSSVC_MAXBLKSIZE); |
876 | if (bsize > NFSSVC_MAXBLKSIZE) | ||
877 | bsize = NFSSVC_MAXBLKSIZE; | ||
878 | bsize &= ~(1024-1); | 878 | bsize &= ~(1024-1); |
879 | mutex_lock(&nfsd_mutex); | 879 | mutex_lock(&nfsd_mutex); |
880 | if (nn->nfsd_serv) { | 880 | if (nn->nfsd_serv) { |
@@ -889,6 +889,44 @@ static ssize_t write_maxblksize(struct file *file, char *buf, size_t size) | |||
889 | nfsd_max_blksize); | 889 | nfsd_max_blksize); |
890 | } | 890 | } |
891 | 891 | ||
892 | /** | ||
893 | * write_maxconn - Set or report the current max number of connections | ||
894 | * | ||
895 | * Input: | ||
896 | * buf: ignored | ||
897 | * size: zero | ||
898 | * OR | ||
899 | * | ||
900 | * Input: | ||
901 | * buf: C string containing an unsigned | ||
902 | * integer value representing the new | ||
903 | * number of max connections | ||
904 | * size: non-zero length of C string in @buf | ||
905 | * Output: | ||
906 | * On success: passed-in buffer filled with '\n'-terminated C string | ||
907 | * containing numeric value of max_connections setting | ||
908 | * for this net namespace; | ||
909 | * return code is the size in bytes of the string | ||
910 | * On error: return code is zero or a negative errno value | ||
911 | */ | ||
912 | static ssize_t write_maxconn(struct file *file, char *buf, size_t size) | ||
913 | { | ||
914 | char *mesg = buf; | ||
915 | struct net *net = file->f_dentry->d_sb->s_fs_info; | ||
916 | struct nfsd_net *nn = net_generic(net, nfsd_net_id); | ||
917 | unsigned int maxconn = nn->max_connections; | ||
918 | |||
919 | if (size > 0) { | ||
920 | int rv = get_uint(&mesg, &maxconn); | ||
921 | |||
922 | if (rv) | ||
923 | return rv; | ||
924 | nn->max_connections = maxconn; | ||
925 | } | ||
926 | |||
927 | return scnprintf(buf, SIMPLE_TRANSACTION_LIMIT, "%u\n", maxconn); | ||
928 | } | ||
929 | |||
892 | #ifdef CONFIG_NFSD_V4 | 930 | #ifdef CONFIG_NFSD_V4 |
893 | static ssize_t __nfsd4_write_time(struct file *file, char *buf, size_t size, | 931 | static ssize_t __nfsd4_write_time(struct file *file, char *buf, size_t size, |
894 | time_t *time, struct nfsd_net *nn) | 932 | time_t *time, struct nfsd_net *nn) |
@@ -1064,6 +1102,7 @@ static int nfsd_fill_super(struct super_block * sb, void * data, int silent) | |||
1064 | [NFSD_Versions] = {"versions", &transaction_ops, S_IWUSR|S_IRUSR}, | 1102 | [NFSD_Versions] = {"versions", &transaction_ops, S_IWUSR|S_IRUSR}, |
1065 | [NFSD_Ports] = {"portlist", &transaction_ops, S_IWUSR|S_IRUGO}, | 1103 | [NFSD_Ports] = {"portlist", &transaction_ops, S_IWUSR|S_IRUGO}, |
1066 | [NFSD_MaxBlkSize] = {"max_block_size", &transaction_ops, S_IWUSR|S_IRUGO}, | 1104 | [NFSD_MaxBlkSize] = {"max_block_size", &transaction_ops, S_IWUSR|S_IRUGO}, |
1105 | [NFSD_MaxConnections] = {"max_connections", &transaction_ops, S_IWUSR|S_IRUGO}, | ||
1067 | #if defined(CONFIG_SUNRPC_GSS) || defined(CONFIG_SUNRPC_GSS_MODULE) | 1106 | #if defined(CONFIG_SUNRPC_GSS) || defined(CONFIG_SUNRPC_GSS_MODULE) |
1068 | [NFSD_SupportedEnctypes] = {"supported_krb5_enctypes", &supported_enctypes_ops, S_IRUGO}, | 1107 | [NFSD_SupportedEnctypes] = {"supported_krb5_enctypes", &supported_enctypes_ops, S_IRUGO}, |
1069 | #endif /* CONFIG_SUNRPC_GSS or CONFIG_SUNRPC_GSS_MODULE */ | 1108 | #endif /* CONFIG_SUNRPC_GSS or CONFIG_SUNRPC_GSS_MODULE */ |
diff --git a/fs/nfsd/nfsfh.c b/fs/nfsd/nfsfh.c index ec8393418154..e883a5868be6 100644 --- a/fs/nfsd/nfsfh.c +++ b/fs/nfsd/nfsfh.c | |||
@@ -162,7 +162,14 @@ static __be32 nfsd_set_fh_dentry(struct svc_rqst *rqstp, struct svc_fh *fhp) | |||
162 | /* deprecated, convert to type 3 */ | 162 | /* deprecated, convert to type 3 */ |
163 | len = key_len(FSID_ENCODE_DEV)/4; | 163 | len = key_len(FSID_ENCODE_DEV)/4; |
164 | fh->fh_fsid_type = FSID_ENCODE_DEV; | 164 | fh->fh_fsid_type = FSID_ENCODE_DEV; |
165 | fh->fh_fsid[0] = new_encode_dev(MKDEV(ntohl(fh->fh_fsid[0]), ntohl(fh->fh_fsid[1]))); | 165 | /* |
166 | * struct knfsd_fh uses host-endian fields, which are | ||
167 | * sometimes used to hold net-endian values. This | ||
168 | * confuses sparse, so we must use __force here to | ||
169 | * keep it from complaining. | ||
170 | */ | ||
171 | fh->fh_fsid[0] = new_encode_dev(MKDEV(ntohl((__force __be32)fh->fh_fsid[0]), | ||
172 | ntohl((__force __be32)fh->fh_fsid[1]))); | ||
166 | fh->fh_fsid[1] = fh->fh_fsid[2]; | 173 | fh->fh_fsid[1] = fh->fh_fsid[2]; |
167 | } | 174 | } |
168 | data_left -= len; | 175 | data_left -= len; |
@@ -539,8 +546,7 @@ fh_compose(struct svc_fh *fhp, struct svc_export *exp, struct dentry *dentry, | |||
539 | dentry); | 546 | dentry); |
540 | 547 | ||
541 | fhp->fh_dentry = dget(dentry); /* our internal copy */ | 548 | fhp->fh_dentry = dget(dentry); /* our internal copy */ |
542 | fhp->fh_export = exp; | 549 | fhp->fh_export = exp_get(exp); |
543 | cache_get(&exp->h); | ||
544 | 550 | ||
545 | if (fhp->fh_handle.fh_version == 0xca) { | 551 | if (fhp->fh_handle.fh_version == 0xca) { |
546 | /* old style filehandle please */ | 552 | /* old style filehandle please */ |
diff --git a/fs/nfsd/nfsfh.h b/fs/nfsd/nfsfh.h index 2e89e70ac15c..08236d70c667 100644 --- a/fs/nfsd/nfsfh.h +++ b/fs/nfsd/nfsfh.h | |||
@@ -73,8 +73,15 @@ enum fsid_source { | |||
73 | extern enum fsid_source fsid_source(struct svc_fh *fhp); | 73 | extern enum fsid_source fsid_source(struct svc_fh *fhp); |
74 | 74 | ||
75 | 75 | ||
76 | /* This might look a little large to "inline" but in all calls except | 76 | /* |
77 | * This might look a little large to "inline" but in all calls except | ||
77 | * one, 'vers' is constant so moste of the function disappears. | 78 | * one, 'vers' is constant so moste of the function disappears. |
79 | * | ||
80 | * In some cases the values are considered to be host endian and in | ||
81 | * others, net endian. fsidv is always considered to be u32 as the | ||
82 | * callers don't know which it will be. So we must use __force to keep | ||
83 | * sparse from complaining. Since these values are opaque to the | ||
84 | * client, that shouldn't be a problem. | ||
78 | */ | 85 | */ |
79 | static inline void mk_fsid(int vers, u32 *fsidv, dev_t dev, ino_t ino, | 86 | static inline void mk_fsid(int vers, u32 *fsidv, dev_t dev, ino_t ino, |
80 | u32 fsid, unsigned char *uuid) | 87 | u32 fsid, unsigned char *uuid) |
@@ -82,7 +89,7 @@ static inline void mk_fsid(int vers, u32 *fsidv, dev_t dev, ino_t ino, | |||
82 | u32 *up; | 89 | u32 *up; |
83 | switch(vers) { | 90 | switch(vers) { |
84 | case FSID_DEV: | 91 | case FSID_DEV: |
85 | fsidv[0] = htonl((MAJOR(dev)<<16) | | 92 | fsidv[0] = (__force __u32)htonl((MAJOR(dev)<<16) | |
86 | MINOR(dev)); | 93 | MINOR(dev)); |
87 | fsidv[1] = ino_t_to_u32(ino); | 94 | fsidv[1] = ino_t_to_u32(ino); |
88 | break; | 95 | break; |
@@ -90,8 +97,8 @@ static inline void mk_fsid(int vers, u32 *fsidv, dev_t dev, ino_t ino, | |||
90 | fsidv[0] = fsid; | 97 | fsidv[0] = fsid; |
91 | break; | 98 | break; |
92 | case FSID_MAJOR_MINOR: | 99 | case FSID_MAJOR_MINOR: |
93 | fsidv[0] = htonl(MAJOR(dev)); | 100 | fsidv[0] = (__force __u32)htonl(MAJOR(dev)); |
94 | fsidv[1] = htonl(MINOR(dev)); | 101 | fsidv[1] = (__force __u32)htonl(MINOR(dev)); |
95 | fsidv[2] = ino_t_to_u32(ino); | 102 | fsidv[2] = ino_t_to_u32(ino); |
96 | break; | 103 | break; |
97 | 104 | ||
diff --git a/fs/nfsd/nfsproc.c b/fs/nfsd/nfsproc.c index 54c6b3d3cc79..b8680738f588 100644 --- a/fs/nfsd/nfsproc.c +++ b/fs/nfsd/nfsproc.c | |||
@@ -403,12 +403,13 @@ nfsd_proc_symlink(struct svc_rqst *rqstp, struct nfsd_symlinkargs *argp, | |||
403 | 403 | ||
404 | fh_init(&newfh, NFS_FHSIZE); | 404 | fh_init(&newfh, NFS_FHSIZE); |
405 | /* | 405 | /* |
406 | * Create the link, look up new file and set attrs. | 406 | * Crazy hack: the request fits in a page, and already-decoded |
407 | * attributes follow argp->tname, so it's safe to just write a | ||
408 | * null to ensure it's null-terminated: | ||
407 | */ | 409 | */ |
410 | argp->tname[argp->tlen] = '\0'; | ||
408 | nfserr = nfsd_symlink(rqstp, &argp->ffh, argp->fname, argp->flen, | 411 | nfserr = nfsd_symlink(rqstp, &argp->ffh, argp->fname, argp->flen, |
409 | argp->tname, argp->tlen, | 412 | argp->tname, &newfh); |
410 | &newfh, &argp->attrs); | ||
411 | |||
412 | 413 | ||
413 | fh_put(&argp->ffh); | 414 | fh_put(&argp->ffh); |
414 | fh_put(&newfh); | 415 | fh_put(&newfh); |
@@ -716,6 +717,7 @@ nfserrno (int errno) | |||
716 | { nfserr_noent, -ENOENT }, | 717 | { nfserr_noent, -ENOENT }, |
717 | { nfserr_io, -EIO }, | 718 | { nfserr_io, -EIO }, |
718 | { nfserr_nxio, -ENXIO }, | 719 | { nfserr_nxio, -ENXIO }, |
720 | { nfserr_fbig, -E2BIG }, | ||
719 | { nfserr_acces, -EACCES }, | 721 | { nfserr_acces, -EACCES }, |
720 | { nfserr_exist, -EEXIST }, | 722 | { nfserr_exist, -EEXIST }, |
721 | { nfserr_xdev, -EXDEV }, | 723 | { nfserr_xdev, -EXDEV }, |
@@ -743,6 +745,7 @@ nfserrno (int errno) | |||
743 | { nfserr_notsupp, -EOPNOTSUPP }, | 745 | { nfserr_notsupp, -EOPNOTSUPP }, |
744 | { nfserr_toosmall, -ETOOSMALL }, | 746 | { nfserr_toosmall, -ETOOSMALL }, |
745 | { nfserr_serverfault, -ESERVERFAULT }, | 747 | { nfserr_serverfault, -ESERVERFAULT }, |
748 | { nfserr_serverfault, -ENFILE }, | ||
746 | }; | 749 | }; |
747 | int i; | 750 | int i; |
748 | 751 | ||
@@ -750,7 +753,7 @@ nfserrno (int errno) | |||
750 | if (nfs_errtbl[i].syserr == errno) | 753 | if (nfs_errtbl[i].syserr == errno) |
751 | return nfs_errtbl[i].nfserr; | 754 | return nfs_errtbl[i].nfserr; |
752 | } | 755 | } |
753 | printk (KERN_INFO "nfsd: non-standard errno: %d\n", errno); | 756 | WARN(1, "nfsd: non-standard errno: %d\n", errno); |
754 | return nfserr_io; | 757 | return nfserr_io; |
755 | } | 758 | } |
756 | 759 | ||
diff --git a/fs/nfsd/nfssvc.c b/fs/nfsd/nfssvc.c index 1879e43f2868..752d56bbe0ba 100644 --- a/fs/nfsd/nfssvc.c +++ b/fs/nfsd/nfssvc.c | |||
@@ -221,7 +221,8 @@ static int nfsd_startup_generic(int nrservs) | |||
221 | */ | 221 | */ |
222 | ret = nfsd_racache_init(2*nrservs); | 222 | ret = nfsd_racache_init(2*nrservs); |
223 | if (ret) | 223 | if (ret) |
224 | return ret; | 224 | goto dec_users; |
225 | |||
225 | ret = nfs4_state_start(); | 226 | ret = nfs4_state_start(); |
226 | if (ret) | 227 | if (ret) |
227 | goto out_racache; | 228 | goto out_racache; |
@@ -229,6 +230,8 @@ static int nfsd_startup_generic(int nrservs) | |||
229 | 230 | ||
230 | out_racache: | 231 | out_racache: |
231 | nfsd_racache_shutdown(); | 232 | nfsd_racache_shutdown(); |
233 | dec_users: | ||
234 | nfsd_users--; | ||
232 | return ret; | 235 | return ret; |
233 | } | 236 | } |
234 | 237 | ||
@@ -405,6 +408,7 @@ int nfsd_create_serv(struct net *net) | |||
405 | if (nn->nfsd_serv == NULL) | 408 | if (nn->nfsd_serv == NULL) |
406 | return -ENOMEM; | 409 | return -ENOMEM; |
407 | 410 | ||
411 | nn->nfsd_serv->sv_maxconn = nn->max_connections; | ||
408 | error = svc_bind(nn->nfsd_serv, net); | 412 | error = svc_bind(nn->nfsd_serv, net); |
409 | if (error < 0) { | 413 | if (error < 0) { |
410 | svc_destroy(nn->nfsd_serv); | 414 | svc_destroy(nn->nfsd_serv); |
@@ -469,8 +473,7 @@ int nfsd_set_nrthreads(int n, int *nthreads, struct net *net) | |||
469 | /* enforce a global maximum number of threads */ | 473 | /* enforce a global maximum number of threads */ |
470 | tot = 0; | 474 | tot = 0; |
471 | for (i = 0; i < n; i++) { | 475 | for (i = 0; i < n; i++) { |
472 | if (nthreads[i] > NFSD_MAXSERVS) | 476 | nthreads[i] = min(nthreads[i], NFSD_MAXSERVS); |
473 | nthreads[i] = NFSD_MAXSERVS; | ||
474 | tot += nthreads[i]; | 477 | tot += nthreads[i]; |
475 | } | 478 | } |
476 | if (tot > NFSD_MAXSERVS) { | 479 | if (tot > NFSD_MAXSERVS) { |
@@ -519,11 +522,11 @@ nfsd_svc(int nrservs, struct net *net) | |||
519 | 522 | ||
520 | mutex_lock(&nfsd_mutex); | 523 | mutex_lock(&nfsd_mutex); |
521 | dprintk("nfsd: creating service\n"); | 524 | dprintk("nfsd: creating service\n"); |
522 | if (nrservs <= 0) | 525 | |
523 | nrservs = 0; | 526 | nrservs = max(nrservs, 0); |
524 | if (nrservs > NFSD_MAXSERVS) | 527 | nrservs = min(nrservs, NFSD_MAXSERVS); |
525 | nrservs = NFSD_MAXSERVS; | ||
526 | error = 0; | 528 | error = 0; |
529 | |||
527 | if (nrservs == 0 && nn->nfsd_serv == NULL) | 530 | if (nrservs == 0 && nn->nfsd_serv == NULL) |
528 | goto out; | 531 | goto out; |
529 | 532 | ||
@@ -564,6 +567,7 @@ nfsd(void *vrqstp) | |||
564 | struct svc_rqst *rqstp = (struct svc_rqst *) vrqstp; | 567 | struct svc_rqst *rqstp = (struct svc_rqst *) vrqstp; |
565 | struct svc_xprt *perm_sock = list_entry(rqstp->rq_server->sv_permsocks.next, typeof(struct svc_xprt), xpt_list); | 568 | struct svc_xprt *perm_sock = list_entry(rqstp->rq_server->sv_permsocks.next, typeof(struct svc_xprt), xpt_list); |
566 | struct net *net = perm_sock->xpt_net; | 569 | struct net *net = perm_sock->xpt_net; |
570 | struct nfsd_net *nn = net_generic(net, nfsd_net_id); | ||
567 | int err; | 571 | int err; |
568 | 572 | ||
569 | /* Lock module and set up kernel thread */ | 573 | /* Lock module and set up kernel thread */ |
@@ -597,6 +601,9 @@ nfsd(void *vrqstp) | |||
597 | * The main request loop | 601 | * The main request loop |
598 | */ | 602 | */ |
599 | for (;;) { | 603 | for (;;) { |
604 | /* Update sv_maxconn if it has changed */ | ||
605 | rqstp->rq_server->sv_maxconn = nn->max_connections; | ||
606 | |||
600 | /* | 607 | /* |
601 | * Find a socket with data available and call its | 608 | * Find a socket with data available and call its |
602 | * recvfrom routine. | 609 | * recvfrom routine. |
diff --git a/fs/nfsd/nfsxdr.c b/fs/nfsd/nfsxdr.c index 1ac306b769df..412d7061f9e5 100644 --- a/fs/nfsd/nfsxdr.c +++ b/fs/nfsd/nfsxdr.c | |||
@@ -257,8 +257,7 @@ nfssvc_decode_readargs(struct svc_rqst *rqstp, __be32 *p, | |||
257 | len = args->count = ntohl(*p++); | 257 | len = args->count = ntohl(*p++); |
258 | p++; /* totalcount - unused */ | 258 | p++; /* totalcount - unused */ |
259 | 259 | ||
260 | if (len > NFSSVC_MAXBLKSIZE_V2) | 260 | len = min_t(unsigned int, len, NFSSVC_MAXBLKSIZE_V2); |
261 | len = NFSSVC_MAXBLKSIZE_V2; | ||
262 | 261 | ||
263 | /* set up somewhere to store response. | 262 | /* set up somewhere to store response. |
264 | * We take pages, put them on reslist and include in iovec | 263 | * We take pages, put them on reslist and include in iovec |
@@ -268,7 +267,7 @@ nfssvc_decode_readargs(struct svc_rqst *rqstp, __be32 *p, | |||
268 | struct page *p = *(rqstp->rq_next_page++); | 267 | struct page *p = *(rqstp->rq_next_page++); |
269 | 268 | ||
270 | rqstp->rq_vec[v].iov_base = page_address(p); | 269 | rqstp->rq_vec[v].iov_base = page_address(p); |
271 | rqstp->rq_vec[v].iov_len = len < PAGE_SIZE?len:PAGE_SIZE; | 270 | rqstp->rq_vec[v].iov_len = min_t(unsigned int, len, PAGE_SIZE); |
272 | len -= rqstp->rq_vec[v].iov_len; | 271 | len -= rqstp->rq_vec[v].iov_len; |
273 | v++; | 272 | v++; |
274 | } | 273 | } |
@@ -400,9 +399,7 @@ nfssvc_decode_readdirargs(struct svc_rqst *rqstp, __be32 *p, | |||
400 | return 0; | 399 | return 0; |
401 | args->cookie = ntohl(*p++); | 400 | args->cookie = ntohl(*p++); |
402 | args->count = ntohl(*p++); | 401 | args->count = ntohl(*p++); |
403 | if (args->count > PAGE_SIZE) | 402 | args->count = min_t(u32, args->count, PAGE_SIZE); |
404 | args->count = PAGE_SIZE; | ||
405 | |||
406 | args->buffer = page_address(*(rqstp->rq_next_page++)); | 403 | args->buffer = page_address(*(rqstp->rq_next_page++)); |
407 | 404 | ||
408 | return xdr_argsize_check(rqstp, p); | 405 | return xdr_argsize_check(rqstp, p); |
@@ -516,10 +513,11 @@ nfssvc_encode_entry(void *ccdv, const char *name, | |||
516 | } | 513 | } |
517 | if (cd->offset) | 514 | if (cd->offset) |
518 | *cd->offset = htonl(offset); | 515 | *cd->offset = htonl(offset); |
519 | if (namlen > NFS2_MAXNAMLEN) | ||
520 | namlen = NFS2_MAXNAMLEN;/* truncate filename */ | ||
521 | 516 | ||
517 | /* truncate filename */ | ||
518 | namlen = min(namlen, NFS2_MAXNAMLEN); | ||
522 | slen = XDR_QUADLEN(namlen); | 519 | slen = XDR_QUADLEN(namlen); |
520 | |||
523 | if ((buflen = cd->buflen - slen - 4) < 0) { | 521 | if ((buflen = cd->buflen - slen - 4) < 0) { |
524 | cd->common.err = nfserr_toosmall; | 522 | cd->common.err = nfserr_toosmall; |
525 | return -EINVAL; | 523 | return -EINVAL; |
diff --git a/fs/nfsd/state.h b/fs/nfsd/state.h index 374c66283ac5..4a89e00d7461 100644 --- a/fs/nfsd/state.h +++ b/fs/nfsd/state.h | |||
@@ -72,7 +72,13 @@ struct nfsd4_callback { | |||
72 | bool cb_done; | 72 | bool cb_done; |
73 | }; | 73 | }; |
74 | 74 | ||
75 | /* | ||
76 | * A core object that represents a "common" stateid. These are generally | ||
77 | * embedded within the different (more specific) stateid objects and contain | ||
78 | * fields that are of general use to any stateid. | ||
79 | */ | ||
75 | struct nfs4_stid { | 80 | struct nfs4_stid { |
81 | atomic_t sc_count; | ||
76 | #define NFS4_OPEN_STID 1 | 82 | #define NFS4_OPEN_STID 1 |
77 | #define NFS4_LOCK_STID 2 | 83 | #define NFS4_LOCK_STID 2 |
78 | #define NFS4_DELEG_STID 4 | 84 | #define NFS4_DELEG_STID 4 |
@@ -80,22 +86,43 @@ struct nfs4_stid { | |||
80 | #define NFS4_CLOSED_STID 8 | 86 | #define NFS4_CLOSED_STID 8 |
81 | /* For a deleg stateid kept around only to process free_stateid's: */ | 87 | /* For a deleg stateid kept around only to process free_stateid's: */ |
82 | #define NFS4_REVOKED_DELEG_STID 16 | 88 | #define NFS4_REVOKED_DELEG_STID 16 |
89 | #define NFS4_CLOSED_DELEG_STID 32 | ||
83 | unsigned char sc_type; | 90 | unsigned char sc_type; |
84 | stateid_t sc_stateid; | 91 | stateid_t sc_stateid; |
85 | struct nfs4_client *sc_client; | 92 | struct nfs4_client *sc_client; |
93 | struct nfs4_file *sc_file; | ||
94 | void (*sc_free)(struct nfs4_stid *); | ||
86 | }; | 95 | }; |
87 | 96 | ||
97 | /* | ||
98 | * Represents a delegation stateid. The nfs4_client holds references to these | ||
99 | * and they are put when it is being destroyed or when the delegation is | ||
100 | * returned by the client: | ||
101 | * | ||
102 | * o 1 reference as long as a delegation is still in force (taken when it's | ||
103 | * alloc'd, put when it's returned or revoked) | ||
104 | * | ||
105 | * o 1 reference as long as a recall rpc is in progress (taken when the lease | ||
106 | * is broken, put when the rpc exits) | ||
107 | * | ||
108 | * o 1 more ephemeral reference for each nfsd thread currently doing something | ||
109 | * with that delegation without holding the cl_lock | ||
110 | * | ||
111 | * If the server attempts to recall a delegation and the client doesn't do so | ||
112 | * before a timeout, the server may also revoke the delegation. In that case, | ||
113 | * the object will either be destroyed (v4.0) or moved to a per-client list of | ||
114 | * revoked delegations (v4.1+). | ||
115 | * | ||
116 | * This object is a superset of the nfs4_stid. | ||
117 | */ | ||
88 | struct nfs4_delegation { | 118 | struct nfs4_delegation { |
89 | struct nfs4_stid dl_stid; /* must be first field */ | 119 | struct nfs4_stid dl_stid; /* must be first field */ |
90 | struct list_head dl_perfile; | 120 | struct list_head dl_perfile; |
91 | struct list_head dl_perclnt; | 121 | struct list_head dl_perclnt; |
92 | struct list_head dl_recall_lru; /* delegation recalled */ | 122 | struct list_head dl_recall_lru; /* delegation recalled */ |
93 | atomic_t dl_count; /* ref count */ | ||
94 | struct nfs4_file *dl_file; | ||
95 | u32 dl_type; | 123 | u32 dl_type; |
96 | time_t dl_time; | 124 | time_t dl_time; |
97 | /* For recall: */ | 125 | /* For recall: */ |
98 | struct knfsd_fh dl_fh; | ||
99 | int dl_retries; | 126 | int dl_retries; |
100 | struct nfsd4_callback dl_recall; | 127 | struct nfsd4_callback dl_recall; |
101 | }; | 128 | }; |
@@ -194,6 +221,11 @@ struct nfsd4_conn { | |||
194 | unsigned char cn_flags; | 221 | unsigned char cn_flags; |
195 | }; | 222 | }; |
196 | 223 | ||
224 | /* | ||
225 | * Representation of a v4.1+ session. These are refcounted in a similar fashion | ||
226 | * to the nfs4_client. References are only taken when the server is actively | ||
227 | * working on the object (primarily during the processing of compounds). | ||
228 | */ | ||
197 | struct nfsd4_session { | 229 | struct nfsd4_session { |
198 | atomic_t se_ref; | 230 | atomic_t se_ref; |
199 | struct list_head se_hash; /* hash by sessionid */ | 231 | struct list_head se_hash; /* hash by sessionid */ |
@@ -212,8 +244,6 @@ struct nfsd4_session { | |||
212 | struct nfsd4_slot *se_slots[]; /* forward channel slots */ | 244 | struct nfsd4_slot *se_slots[]; /* forward channel slots */ |
213 | }; | 245 | }; |
214 | 246 | ||
215 | extern void nfsd4_put_session(struct nfsd4_session *ses); | ||
216 | |||
217 | /* formatted contents of nfs4_sessionid */ | 247 | /* formatted contents of nfs4_sessionid */ |
218 | struct nfsd4_sessionid { | 248 | struct nfsd4_sessionid { |
219 | clientid_t clientid; | 249 | clientid_t clientid; |
@@ -225,17 +255,35 @@ struct nfsd4_sessionid { | |||
225 | 255 | ||
226 | /* | 256 | /* |
227 | * struct nfs4_client - one per client. Clientids live here. | 257 | * struct nfs4_client - one per client. Clientids live here. |
228 | * o Each nfs4_client is hashed by clientid. | ||
229 | * | 258 | * |
230 | * o Each nfs4_clients is also hashed by name | 259 | * The initial object created by an NFS client using SETCLIENTID (for NFSv4.0) |
231 | * (the opaque quantity initially sent by the client to identify itself). | 260 | * or EXCHANGE_ID (for NFSv4.1+). These objects are refcounted and timestamped. |
261 | * Each nfsd_net_ns object contains a set of these and they are tracked via | ||
262 | * short and long form clientid. They are hashed and searched for under the | ||
263 | * per-nfsd_net client_lock spinlock. | ||
264 | * | ||
265 | * References to it are only held during the processing of compounds, and in | ||
266 | * certain other operations. In their "resting state" they have a refcount of | ||
267 | * 0. If they are not renewed within a lease period, they become eligible for | ||
268 | * destruction by the laundromat. | ||
269 | * | ||
270 | * These objects can also be destroyed prematurely by the fault injection code, | ||
271 | * or if the client sends certain forms of SETCLIENTID or EXCHANGE_ID updates. | ||
272 | * Care is taken *not* to do this however when the objects have an elevated | ||
273 | * refcount. | ||
274 | * | ||
275 | * o Each nfs4_client is hashed by clientid | ||
276 | * | ||
277 | * o Each nfs4_clients is also hashed by name (the opaque quantity initially | ||
278 | * sent by the client to identify itself). | ||
232 | * | 279 | * |
233 | * o cl_perclient list is used to ensure no dangling stateowner references | 280 | * o cl_perclient list is used to ensure no dangling stateowner references |
234 | * when we expire the nfs4_client | 281 | * when we expire the nfs4_client |
235 | */ | 282 | */ |
236 | struct nfs4_client { | 283 | struct nfs4_client { |
237 | struct list_head cl_idhash; /* hash by cl_clientid.id */ | 284 | struct list_head cl_idhash; /* hash by cl_clientid.id */ |
238 | struct rb_node cl_namenode; /* link into by-name trees */ | 285 | struct rb_node cl_namenode; /* link into by-name trees */ |
286 | struct list_head *cl_ownerstr_hashtbl; | ||
239 | struct list_head cl_openowners; | 287 | struct list_head cl_openowners; |
240 | struct idr cl_stateids; /* stateid lookup */ | 288 | struct idr cl_stateids; /* stateid lookup */ |
241 | struct list_head cl_delegations; | 289 | struct list_head cl_delegations; |
@@ -329,21 +377,43 @@ struct nfs4_replay { | |||
329 | unsigned int rp_buflen; | 377 | unsigned int rp_buflen; |
330 | char *rp_buf; | 378 | char *rp_buf; |
331 | struct knfsd_fh rp_openfh; | 379 | struct knfsd_fh rp_openfh; |
380 | struct mutex rp_mutex; | ||
332 | char rp_ibuf[NFSD4_REPLAY_ISIZE]; | 381 | char rp_ibuf[NFSD4_REPLAY_ISIZE]; |
333 | }; | 382 | }; |
334 | 383 | ||
384 | struct nfs4_stateowner; | ||
385 | |||
386 | struct nfs4_stateowner_operations { | ||
387 | void (*so_unhash)(struct nfs4_stateowner *); | ||
388 | void (*so_free)(struct nfs4_stateowner *); | ||
389 | }; | ||
390 | |||
391 | /* | ||
392 | * A core object that represents either an open or lock owner. The object and | ||
393 | * lock owner objects have one of these embedded within them. Refcounts and | ||
394 | * other fields common to both owner types are contained within these | ||
395 | * structures. | ||
396 | */ | ||
335 | struct nfs4_stateowner { | 397 | struct nfs4_stateowner { |
336 | struct list_head so_strhash; /* hash by op_name */ | 398 | struct list_head so_strhash; |
337 | struct list_head so_stateids; | 399 | struct list_head so_stateids; |
338 | struct nfs4_client * so_client; | 400 | struct nfs4_client *so_client; |
339 | /* after increment in ENCODE_SEQID_OP_TAIL, represents the next | 401 | const struct nfs4_stateowner_operations *so_ops; |
402 | /* after increment in nfsd4_bump_seqid, represents the next | ||
340 | * sequence id expected from the client: */ | 403 | * sequence id expected from the client: */ |
341 | u32 so_seqid; | 404 | atomic_t so_count; |
342 | struct xdr_netobj so_owner; /* open owner name */ | 405 | u32 so_seqid; |
343 | struct nfs4_replay so_replay; | 406 | struct xdr_netobj so_owner; /* open owner name */ |
344 | bool so_is_open_owner; | 407 | struct nfs4_replay so_replay; |
408 | bool so_is_open_owner; | ||
345 | }; | 409 | }; |
346 | 410 | ||
411 | /* | ||
412 | * When a file is opened, the client provides an open state owner opaque string | ||
413 | * that indicates the "owner" of that open. These objects are refcounted. | ||
414 | * References to it are held by each open state associated with it. This object | ||
415 | * is a superset of the nfs4_stateowner struct. | ||
416 | */ | ||
347 | struct nfs4_openowner { | 417 | struct nfs4_openowner { |
348 | struct nfs4_stateowner oo_owner; /* must be first field */ | 418 | struct nfs4_stateowner oo_owner; /* must be first field */ |
349 | struct list_head oo_perclient; | 419 | struct list_head oo_perclient; |
@@ -358,15 +428,17 @@ struct nfs4_openowner { | |||
358 | struct nfs4_ol_stateid *oo_last_closed_stid; | 428 | struct nfs4_ol_stateid *oo_last_closed_stid; |
359 | time_t oo_time; /* time of placement on so_close_lru */ | 429 | time_t oo_time; /* time of placement on so_close_lru */ |
360 | #define NFS4_OO_CONFIRMED 1 | 430 | #define NFS4_OO_CONFIRMED 1 |
361 | #define NFS4_OO_NEW 4 | ||
362 | unsigned char oo_flags; | 431 | unsigned char oo_flags; |
363 | }; | 432 | }; |
364 | 433 | ||
434 | /* | ||
435 | * Represents a generic "lockowner". Similar to an openowner. References to it | ||
436 | * are held by the lock stateids that are created on its behalf. This object is | ||
437 | * a superset of the nfs4_stateowner struct (or would be if it needed any extra | ||
438 | * fields). | ||
439 | */ | ||
365 | struct nfs4_lockowner { | 440 | struct nfs4_lockowner { |
366 | struct nfs4_stateowner lo_owner; /* must be first element */ | 441 | struct nfs4_stateowner lo_owner; /* must be first element */ |
367 | struct list_head lo_owner_ino_hash; /* hash by owner,file */ | ||
368 | struct list_head lo_perstateid; | ||
369 | struct list_head lo_list; /* for temporary uses */ | ||
370 | }; | 442 | }; |
371 | 443 | ||
372 | static inline struct nfs4_openowner * openowner(struct nfs4_stateowner *so) | 444 | static inline struct nfs4_openowner * openowner(struct nfs4_stateowner *so) |
@@ -379,9 +451,17 @@ static inline struct nfs4_lockowner * lockowner(struct nfs4_stateowner *so) | |||
379 | return container_of(so, struct nfs4_lockowner, lo_owner); | 451 | return container_of(so, struct nfs4_lockowner, lo_owner); |
380 | } | 452 | } |
381 | 453 | ||
382 | /* nfs4_file: a file opened by some number of (open) nfs4_stateowners. */ | 454 | /* |
455 | * nfs4_file: a file opened by some number of (open) nfs4_stateowners. | ||
456 | * | ||
457 | * These objects are global. nfsd only keeps one instance of a nfs4_file per | ||
458 | * inode (though it may keep multiple file descriptors open per inode). These | ||
459 | * are tracked in the file_hashtbl which is protected by the state_lock | ||
460 | * spinlock. | ||
461 | */ | ||
383 | struct nfs4_file { | 462 | struct nfs4_file { |
384 | atomic_t fi_ref; | 463 | atomic_t fi_ref; |
464 | spinlock_t fi_lock; | ||
385 | struct hlist_node fi_hash; /* hash by "struct inode *" */ | 465 | struct hlist_node fi_hash; /* hash by "struct inode *" */ |
386 | struct list_head fi_stateids; | 466 | struct list_head fi_stateids; |
387 | struct list_head fi_delegations; | 467 | struct list_head fi_delegations; |
@@ -395,49 +475,36 @@ struct nfs4_file { | |||
395 | * + 1 to both of the above if NFS4_SHARE_ACCESS_BOTH is set. | 475 | * + 1 to both of the above if NFS4_SHARE_ACCESS_BOTH is set. |
396 | */ | 476 | */ |
397 | atomic_t fi_access[2]; | 477 | atomic_t fi_access[2]; |
478 | u32 fi_share_deny; | ||
398 | struct file *fi_deleg_file; | 479 | struct file *fi_deleg_file; |
399 | struct file_lock *fi_lease; | 480 | struct file_lock *fi_lease; |
400 | atomic_t fi_delegees; | 481 | atomic_t fi_delegees; |
401 | struct inode *fi_inode; | 482 | struct knfsd_fh fi_fhandle; |
402 | bool fi_had_conflict; | 483 | bool fi_had_conflict; |
403 | }; | 484 | }; |
404 | 485 | ||
405 | /* XXX: for first cut may fall back on returning file that doesn't work | 486 | /* |
406 | * at all? */ | 487 | * A generic struct representing either a open or lock stateid. The nfs4_client |
407 | static inline struct file *find_writeable_file(struct nfs4_file *f) | 488 | * holds a reference to each of these objects, and they in turn hold a |
408 | { | 489 | * reference to their respective stateowners. The client's reference is |
409 | if (f->fi_fds[O_WRONLY]) | 490 | * released in response to a close or unlock (depending on whether it's an open |
410 | return f->fi_fds[O_WRONLY]; | 491 | * or lock stateid) or when the client is being destroyed. |
411 | return f->fi_fds[O_RDWR]; | 492 | * |
412 | } | 493 | * In the case of v4.0 open stateids, these objects are preserved for a little |
413 | 494 | * while after close in order to handle CLOSE replays. Those are eventually | |
414 | static inline struct file *find_readable_file(struct nfs4_file *f) | 495 | * reclaimed via a LRU scheme by the laundromat. |
415 | { | 496 | * |
416 | if (f->fi_fds[O_RDONLY]) | 497 | * This object is a superset of the nfs4_stid. "ol" stands for "Open or Lock". |
417 | return f->fi_fds[O_RDONLY]; | 498 | * Better suggestions welcome. |
418 | return f->fi_fds[O_RDWR]; | 499 | */ |
419 | } | ||
420 | |||
421 | static inline struct file *find_any_file(struct nfs4_file *f) | ||
422 | { | ||
423 | if (f->fi_fds[O_RDWR]) | ||
424 | return f->fi_fds[O_RDWR]; | ||
425 | else if (f->fi_fds[O_WRONLY]) | ||
426 | return f->fi_fds[O_WRONLY]; | ||
427 | else | ||
428 | return f->fi_fds[O_RDONLY]; | ||
429 | } | ||
430 | |||
431 | /* "ol" stands for "Open or Lock". Better suggestions welcome. */ | ||
432 | struct nfs4_ol_stateid { | 500 | struct nfs4_ol_stateid { |
433 | struct nfs4_stid st_stid; /* must be first field */ | 501 | struct nfs4_stid st_stid; /* must be first field */ |
434 | struct list_head st_perfile; | 502 | struct list_head st_perfile; |
435 | struct list_head st_perstateowner; | 503 | struct list_head st_perstateowner; |
436 | struct list_head st_lockowners; | 504 | struct list_head st_locks; |
437 | struct nfs4_stateowner * st_stateowner; | 505 | struct nfs4_stateowner * st_stateowner; |
438 | struct nfs4_file * st_file; | 506 | unsigned char st_access_bmap; |
439 | unsigned long st_access_bmap; | 507 | unsigned char st_deny_bmap; |
440 | unsigned long st_deny_bmap; | ||
441 | struct nfs4_ol_stateid * st_openstp; | 508 | struct nfs4_ol_stateid * st_openstp; |
442 | }; | 509 | }; |
443 | 510 | ||
@@ -456,15 +523,16 @@ struct nfsd_net; | |||
456 | extern __be32 nfs4_preprocess_stateid_op(struct net *net, | 523 | extern __be32 nfs4_preprocess_stateid_op(struct net *net, |
457 | struct nfsd4_compound_state *cstate, | 524 | struct nfsd4_compound_state *cstate, |
458 | stateid_t *stateid, int flags, struct file **filp); | 525 | stateid_t *stateid, int flags, struct file **filp); |
459 | extern void nfs4_lock_state(void); | 526 | void nfs4_put_stid(struct nfs4_stid *s); |
460 | extern void nfs4_unlock_state(void); | ||
461 | void nfs4_remove_reclaim_record(struct nfs4_client_reclaim *, struct nfsd_net *); | 527 | void nfs4_remove_reclaim_record(struct nfs4_client_reclaim *, struct nfsd_net *); |
462 | extern void nfs4_release_reclaim(struct nfsd_net *); | 528 | extern void nfs4_release_reclaim(struct nfsd_net *); |
463 | extern struct nfs4_client_reclaim *nfsd4_find_reclaim_client(const char *recdir, | 529 | extern struct nfs4_client_reclaim *nfsd4_find_reclaim_client(const char *recdir, |
464 | struct nfsd_net *nn); | 530 | struct nfsd_net *nn); |
465 | extern __be32 nfs4_check_open_reclaim(clientid_t *clid, bool sessions, struct nfsd_net *nn); | 531 | extern __be32 nfs4_check_open_reclaim(clientid_t *clid, |
532 | struct nfsd4_compound_state *cstate, struct nfsd_net *nn); | ||
466 | extern int set_callback_cred(void); | 533 | extern int set_callback_cred(void); |
467 | extern void nfsd4_init_callback(struct nfsd4_callback *); | 534 | void nfsd4_run_cb_null(struct work_struct *w); |
535 | void nfsd4_run_cb_recall(struct work_struct *w); | ||
468 | extern void nfsd4_probe_callback(struct nfs4_client *clp); | 536 | extern void nfsd4_probe_callback(struct nfs4_client *clp); |
469 | extern void nfsd4_probe_callback_sync(struct nfs4_client *clp); | 537 | extern void nfsd4_probe_callback_sync(struct nfs4_client *clp); |
470 | extern void nfsd4_change_callback(struct nfs4_client *clp, struct nfs4_cb_conn *); | 538 | extern void nfsd4_change_callback(struct nfs4_client *clp, struct nfs4_cb_conn *); |
@@ -472,11 +540,10 @@ extern void nfsd4_cb_recall(struct nfs4_delegation *dp); | |||
472 | extern int nfsd4_create_callback_queue(void); | 540 | extern int nfsd4_create_callback_queue(void); |
473 | extern void nfsd4_destroy_callback_queue(void); | 541 | extern void nfsd4_destroy_callback_queue(void); |
474 | extern void nfsd4_shutdown_callback(struct nfs4_client *); | 542 | extern void nfsd4_shutdown_callback(struct nfs4_client *); |
475 | extern void nfs4_put_delegation(struct nfs4_delegation *dp); | 543 | extern void nfsd4_prepare_cb_recall(struct nfs4_delegation *dp); |
476 | extern struct nfs4_client_reclaim *nfs4_client_to_reclaim(const char *name, | 544 | extern struct nfs4_client_reclaim *nfs4_client_to_reclaim(const char *name, |
477 | struct nfsd_net *nn); | 545 | struct nfsd_net *nn); |
478 | extern bool nfs4_has_reclaimed_state(const char *name, struct nfsd_net *nn); | 546 | extern bool nfs4_has_reclaimed_state(const char *name, struct nfsd_net *nn); |
479 | extern void put_client_renew(struct nfs4_client *clp); | ||
480 | 547 | ||
481 | /* nfs4recover operations */ | 548 | /* nfs4recover operations */ |
482 | extern int nfsd4_client_tracking_init(struct net *net); | 549 | extern int nfsd4_client_tracking_init(struct net *net); |
@@ -490,19 +557,24 @@ extern void nfsd4_record_grace_done(struct nfsd_net *nn, time_t boot_time); | |||
490 | #ifdef CONFIG_NFSD_FAULT_INJECTION | 557 | #ifdef CONFIG_NFSD_FAULT_INJECTION |
491 | int nfsd_fault_inject_init(void); | 558 | int nfsd_fault_inject_init(void); |
492 | void nfsd_fault_inject_cleanup(void); | 559 | void nfsd_fault_inject_cleanup(void); |
493 | u64 nfsd_for_n_state(u64, u64 (*)(struct nfs4_client *, u64)); | 560 | |
494 | struct nfs4_client *nfsd_find_client(struct sockaddr_storage *, size_t); | 561 | u64 nfsd_inject_print_clients(void); |
495 | 562 | u64 nfsd_inject_forget_client(struct sockaddr_storage *, size_t); | |
496 | u64 nfsd_forget_client(struct nfs4_client *, u64); | 563 | u64 nfsd_inject_forget_clients(u64); |
497 | u64 nfsd_forget_client_locks(struct nfs4_client*, u64); | 564 | |
498 | u64 nfsd_forget_client_openowners(struct nfs4_client *, u64); | 565 | u64 nfsd_inject_print_locks(void); |
499 | u64 nfsd_forget_client_delegations(struct nfs4_client *, u64); | 566 | u64 nfsd_inject_forget_client_locks(struct sockaddr_storage *, size_t); |
500 | u64 nfsd_recall_client_delegations(struct nfs4_client *, u64); | 567 | u64 nfsd_inject_forget_locks(u64); |
501 | 568 | ||
502 | u64 nfsd_print_client(struct nfs4_client *, u64); | 569 | u64 nfsd_inject_print_openowners(void); |
503 | u64 nfsd_print_client_locks(struct nfs4_client *, u64); | 570 | u64 nfsd_inject_forget_client_openowners(struct sockaddr_storage *, size_t); |
504 | u64 nfsd_print_client_openowners(struct nfs4_client *, u64); | 571 | u64 nfsd_inject_forget_openowners(u64); |
505 | u64 nfsd_print_client_delegations(struct nfs4_client *, u64); | 572 | |
573 | u64 nfsd_inject_print_delegations(void); | ||
574 | u64 nfsd_inject_forget_client_delegations(struct sockaddr_storage *, size_t); | ||
575 | u64 nfsd_inject_forget_delegations(u64); | ||
576 | u64 nfsd_inject_recall_client_delegations(struct sockaddr_storage *, size_t); | ||
577 | u64 nfsd_inject_recall_delegations(u64); | ||
506 | #else /* CONFIG_NFSD_FAULT_INJECTION */ | 578 | #else /* CONFIG_NFSD_FAULT_INJECTION */ |
507 | static inline int nfsd_fault_inject_init(void) { return 0; } | 579 | static inline int nfsd_fault_inject_init(void) { return 0; } |
508 | static inline void nfsd_fault_inject_cleanup(void) {} | 580 | static inline void nfsd_fault_inject_cleanup(void) {} |
diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c index 140c496f612c..f501a9b5c9df 100644 --- a/fs/nfsd/vfs.c +++ b/fs/nfsd/vfs.c | |||
@@ -189,8 +189,7 @@ nfsd_lookup_dentry(struct svc_rqst *rqstp, struct svc_fh *fhp, | |||
189 | dprintk("nfsd: nfsd_lookup(fh %s, %.*s)\n", SVCFH_fmt(fhp), len,name); | 189 | dprintk("nfsd: nfsd_lookup(fh %s, %.*s)\n", SVCFH_fmt(fhp), len,name); |
190 | 190 | ||
191 | dparent = fhp->fh_dentry; | 191 | dparent = fhp->fh_dentry; |
192 | exp = fhp->fh_export; | 192 | exp = exp_get(fhp->fh_export); |
193 | exp_get(exp); | ||
194 | 193 | ||
195 | /* Lookup the name, but don't follow links */ | 194 | /* Lookup the name, but don't follow links */ |
196 | if (isdotent(name, len)) { | 195 | if (isdotent(name, len)) { |
@@ -464,7 +463,7 @@ out_put_write_access: | |||
464 | if (size_change) | 463 | if (size_change) |
465 | put_write_access(inode); | 464 | put_write_access(inode); |
466 | if (!err) | 465 | if (!err) |
467 | commit_metadata(fhp); | 466 | err = nfserrno(commit_metadata(fhp)); |
468 | out: | 467 | out: |
469 | return err; | 468 | return err; |
470 | } | 469 | } |
@@ -820,7 +819,8 @@ static int nfsd_direct_splice_actor(struct pipe_inode_info *pipe, | |||
820 | return __splice_from_pipe(pipe, sd, nfsd_splice_actor); | 819 | return __splice_from_pipe(pipe, sd, nfsd_splice_actor); |
821 | } | 820 | } |
822 | 821 | ||
823 | __be32 nfsd_finish_read(struct file *file, unsigned long *count, int host_err) | 822 | static __be32 |
823 | nfsd_finish_read(struct file *file, unsigned long *count, int host_err) | ||
824 | { | 824 | { |
825 | if (host_err >= 0) { | 825 | if (host_err >= 0) { |
826 | nfsdstats.io_read += host_err; | 826 | nfsdstats.io_read += host_err; |
@@ -831,7 +831,7 @@ __be32 nfsd_finish_read(struct file *file, unsigned long *count, int host_err) | |||
831 | return nfserrno(host_err); | 831 | return nfserrno(host_err); |
832 | } | 832 | } |
833 | 833 | ||
834 | int nfsd_splice_read(struct svc_rqst *rqstp, | 834 | __be32 nfsd_splice_read(struct svc_rqst *rqstp, |
835 | struct file *file, loff_t offset, unsigned long *count) | 835 | struct file *file, loff_t offset, unsigned long *count) |
836 | { | 836 | { |
837 | struct splice_desc sd = { | 837 | struct splice_desc sd = { |
@@ -847,7 +847,7 @@ int nfsd_splice_read(struct svc_rqst *rqstp, | |||
847 | return nfsd_finish_read(file, count, host_err); | 847 | return nfsd_finish_read(file, count, host_err); |
848 | } | 848 | } |
849 | 849 | ||
850 | int nfsd_readv(struct file *file, loff_t offset, struct kvec *vec, int vlen, | 850 | __be32 nfsd_readv(struct file *file, loff_t offset, struct kvec *vec, int vlen, |
851 | unsigned long *count) | 851 | unsigned long *count) |
852 | { | 852 | { |
853 | mm_segment_t oldfs; | 853 | mm_segment_t oldfs; |
@@ -1121,7 +1121,8 @@ nfsd_create_setattr(struct svc_rqst *rqstp, struct svc_fh *resfhp, | |||
1121 | iap->ia_valid &= ~(ATTR_UID|ATTR_GID); | 1121 | iap->ia_valid &= ~(ATTR_UID|ATTR_GID); |
1122 | if (iap->ia_valid) | 1122 | if (iap->ia_valid) |
1123 | return nfsd_setattr(rqstp, resfhp, iap, 0, (time_t)0); | 1123 | return nfsd_setattr(rqstp, resfhp, iap, 0, (time_t)0); |
1124 | return 0; | 1124 | /* Callers expect file metadata to be committed here */ |
1125 | return nfserrno(commit_metadata(resfhp)); | ||
1125 | } | 1126 | } |
1126 | 1127 | ||
1127 | /* HPUX client sometimes creates a file in mode 000, and sets size to 0. | 1128 | /* HPUX client sometimes creates a file in mode 000, and sets size to 0. |
@@ -1253,9 +1254,10 @@ nfsd_create(struct svc_rqst *rqstp, struct svc_fh *fhp, | |||
1253 | err = nfsd_create_setattr(rqstp, resfhp, iap); | 1254 | err = nfsd_create_setattr(rqstp, resfhp, iap); |
1254 | 1255 | ||
1255 | /* | 1256 | /* |
1256 | * nfsd_setattr already committed the child. Transactional filesystems | 1257 | * nfsd_create_setattr already committed the child. Transactional |
1257 | * had a chance to commit changes for both parent and child | 1258 | * filesystems had a chance to commit changes for both parent and |
1258 | * simultaneously making the following commit_metadata a noop. | 1259 | * child * simultaneously making the following commit_metadata a |
1260 | * noop. | ||
1259 | */ | 1261 | */ |
1260 | err2 = nfserrno(commit_metadata(fhp)); | 1262 | err2 = nfserrno(commit_metadata(fhp)); |
1261 | if (err2) | 1263 | if (err2) |
@@ -1426,7 +1428,8 @@ do_nfsd_create(struct svc_rqst *rqstp, struct svc_fh *fhp, | |||
1426 | err = nfsd_create_setattr(rqstp, resfhp, iap); | 1428 | err = nfsd_create_setattr(rqstp, resfhp, iap); |
1427 | 1429 | ||
1428 | /* | 1430 | /* |
1429 | * nfsd_setattr already committed the child (and possibly also the parent). | 1431 | * nfsd_create_setattr already committed the child |
1432 | * (and possibly also the parent). | ||
1430 | */ | 1433 | */ |
1431 | if (!err) | 1434 | if (!err) |
1432 | err = nfserrno(commit_metadata(fhp)); | 1435 | err = nfserrno(commit_metadata(fhp)); |
@@ -1504,16 +1507,15 @@ out_nfserr: | |||
1504 | __be32 | 1507 | __be32 |
1505 | nfsd_symlink(struct svc_rqst *rqstp, struct svc_fh *fhp, | 1508 | nfsd_symlink(struct svc_rqst *rqstp, struct svc_fh *fhp, |
1506 | char *fname, int flen, | 1509 | char *fname, int flen, |
1507 | char *path, int plen, | 1510 | char *path, |
1508 | struct svc_fh *resfhp, | 1511 | struct svc_fh *resfhp) |
1509 | struct iattr *iap) | ||
1510 | { | 1512 | { |
1511 | struct dentry *dentry, *dnew; | 1513 | struct dentry *dentry, *dnew; |
1512 | __be32 err, cerr; | 1514 | __be32 err, cerr; |
1513 | int host_err; | 1515 | int host_err; |
1514 | 1516 | ||
1515 | err = nfserr_noent; | 1517 | err = nfserr_noent; |
1516 | if (!flen || !plen) | 1518 | if (!flen || path[0] == '\0') |
1517 | goto out; | 1519 | goto out; |
1518 | err = nfserr_exist; | 1520 | err = nfserr_exist; |
1519 | if (isdotent(fname, flen)) | 1521 | if (isdotent(fname, flen)) |
@@ -1534,18 +1536,7 @@ nfsd_symlink(struct svc_rqst *rqstp, struct svc_fh *fhp, | |||
1534 | if (IS_ERR(dnew)) | 1536 | if (IS_ERR(dnew)) |
1535 | goto out_nfserr; | 1537 | goto out_nfserr; |
1536 | 1538 | ||
1537 | if (unlikely(path[plen] != 0)) { | 1539 | host_err = vfs_symlink(dentry->d_inode, dnew, path); |
1538 | char *path_alloced = kmalloc(plen+1, GFP_KERNEL); | ||
1539 | if (path_alloced == NULL) | ||
1540 | host_err = -ENOMEM; | ||
1541 | else { | ||
1542 | strncpy(path_alloced, path, plen); | ||
1543 | path_alloced[plen] = 0; | ||
1544 | host_err = vfs_symlink(dentry->d_inode, dnew, path_alloced); | ||
1545 | kfree(path_alloced); | ||
1546 | } | ||
1547 | } else | ||
1548 | host_err = vfs_symlink(dentry->d_inode, dnew, path); | ||
1549 | err = nfserrno(host_err); | 1540 | err = nfserrno(host_err); |
1550 | if (!err) | 1541 | if (!err) |
1551 | err = nfserrno(commit_metadata(fhp)); | 1542 | err = nfserrno(commit_metadata(fhp)); |
@@ -2093,8 +2084,7 @@ nfsd_racache_init(int cache_size) | |||
2093 | if (raparm_hash[0].pb_head) | 2084 | if (raparm_hash[0].pb_head) |
2094 | return 0; | 2085 | return 0; |
2095 | nperbucket = DIV_ROUND_UP(cache_size, RAPARM_HASH_SIZE); | 2086 | nperbucket = DIV_ROUND_UP(cache_size, RAPARM_HASH_SIZE); |
2096 | if (nperbucket < 2) | 2087 | nperbucket = max(2, nperbucket); |
2097 | nperbucket = 2; | ||
2098 | cache_size = nperbucket * RAPARM_HASH_SIZE; | 2088 | cache_size = nperbucket * RAPARM_HASH_SIZE; |
2099 | 2089 | ||
2100 | dprintk("nfsd: allocating %d readahead buffers.\n", cache_size); | 2090 | dprintk("nfsd: allocating %d readahead buffers.\n", cache_size); |
diff --git a/fs/nfsd/vfs.h b/fs/nfsd/vfs.h index 91b6ae3f658b..c2ff3f14e5f6 100644 --- a/fs/nfsd/vfs.h +++ b/fs/nfsd/vfs.h | |||
@@ -74,9 +74,9 @@ struct raparms; | |||
74 | __be32 nfsd_get_tmp_read_open(struct svc_rqst *, struct svc_fh *, | 74 | __be32 nfsd_get_tmp_read_open(struct svc_rqst *, struct svc_fh *, |
75 | struct file **, struct raparms **); | 75 | struct file **, struct raparms **); |
76 | void nfsd_put_tmp_read_open(struct file *, struct raparms *); | 76 | void nfsd_put_tmp_read_open(struct file *, struct raparms *); |
77 | int nfsd_splice_read(struct svc_rqst *, | 77 | __be32 nfsd_splice_read(struct svc_rqst *, |
78 | struct file *, loff_t, unsigned long *); | 78 | struct file *, loff_t, unsigned long *); |
79 | int nfsd_readv(struct file *, loff_t, struct kvec *, int, | 79 | __be32 nfsd_readv(struct file *, loff_t, struct kvec *, int, |
80 | unsigned long *); | 80 | unsigned long *); |
81 | __be32 nfsd_read(struct svc_rqst *, struct svc_fh *, | 81 | __be32 nfsd_read(struct svc_rqst *, struct svc_fh *, |
82 | loff_t, struct kvec *, int, unsigned long *); | 82 | loff_t, struct kvec *, int, unsigned long *); |
@@ -85,8 +85,8 @@ __be32 nfsd_write(struct svc_rqst *, struct svc_fh *,struct file *, | |||
85 | __be32 nfsd_readlink(struct svc_rqst *, struct svc_fh *, | 85 | __be32 nfsd_readlink(struct svc_rqst *, struct svc_fh *, |
86 | char *, int *); | 86 | char *, int *); |
87 | __be32 nfsd_symlink(struct svc_rqst *, struct svc_fh *, | 87 | __be32 nfsd_symlink(struct svc_rqst *, struct svc_fh *, |
88 | char *name, int len, char *path, int plen, | 88 | char *name, int len, char *path, |
89 | struct svc_fh *res, struct iattr *); | 89 | struct svc_fh *res); |
90 | __be32 nfsd_link(struct svc_rqst *, struct svc_fh *, | 90 | __be32 nfsd_link(struct svc_rqst *, struct svc_fh *, |
91 | char *, int, struct svc_fh *); | 91 | char *, int, struct svc_fh *); |
92 | __be32 nfsd_rename(struct svc_rqst *, | 92 | __be32 nfsd_rename(struct svc_rqst *, |
diff --git a/fs/nfsd/xdr4.h b/fs/nfsd/xdr4.h index 18cbb6d9c8a9..465e7799742a 100644 --- a/fs/nfsd/xdr4.h +++ b/fs/nfsd/xdr4.h | |||
@@ -55,6 +55,7 @@ struct nfsd4_compound_state { | |||
55 | struct svc_fh current_fh; | 55 | struct svc_fh current_fh; |
56 | struct svc_fh save_fh; | 56 | struct svc_fh save_fh; |
57 | struct nfs4_stateowner *replay_owner; | 57 | struct nfs4_stateowner *replay_owner; |
58 | struct nfs4_client *clp; | ||
58 | /* For sessions DRC */ | 59 | /* For sessions DRC */ |
59 | struct nfsd4_session *session; | 60 | struct nfsd4_session *session; |
60 | struct nfsd4_slot *slot; | 61 | struct nfsd4_slot *slot; |
@@ -107,8 +108,8 @@ struct nfsd4_create { | |||
107 | u32 cr_type; /* request */ | 108 | u32 cr_type; /* request */ |
108 | union { /* request */ | 109 | union { /* request */ |
109 | struct { | 110 | struct { |
110 | u32 namelen; | 111 | u32 datalen; |
111 | char *name; | 112 | char *data; |
112 | } link; /* NF4LNK */ | 113 | } link; /* NF4LNK */ |
113 | struct { | 114 | struct { |
114 | u32 specdata1; | 115 | u32 specdata1; |
@@ -121,8 +122,8 @@ struct nfsd4_create { | |||
121 | struct nfs4_acl *cr_acl; | 122 | struct nfs4_acl *cr_acl; |
122 | struct xdr_netobj cr_label; | 123 | struct xdr_netobj cr_label; |
123 | }; | 124 | }; |
124 | #define cr_linklen u.link.namelen | 125 | #define cr_datalen u.link.datalen |
125 | #define cr_linkname u.link.name | 126 | #define cr_data u.link.data |
126 | #define cr_specdata1 u.dev.specdata1 | 127 | #define cr_specdata1 u.dev.specdata1 |
127 | #define cr_specdata2 u.dev.specdata2 | 128 | #define cr_specdata2 u.dev.specdata2 |
128 | 129 | ||
@@ -478,6 +479,14 @@ struct nfsd4_op { | |||
478 | 479 | ||
479 | bool nfsd4_cache_this_op(struct nfsd4_op *); | 480 | bool nfsd4_cache_this_op(struct nfsd4_op *); |
480 | 481 | ||
482 | /* | ||
483 | * Memory needed just for the duration of processing one compound: | ||
484 | */ | ||
485 | struct svcxdr_tmpbuf { | ||
486 | struct svcxdr_tmpbuf *next; | ||
487 | char buf[]; | ||
488 | }; | ||
489 | |||
481 | struct nfsd4_compoundargs { | 490 | struct nfsd4_compoundargs { |
482 | /* scratch variables for XDR decode */ | 491 | /* scratch variables for XDR decode */ |
483 | __be32 * p; | 492 | __be32 * p; |
@@ -486,11 +495,7 @@ struct nfsd4_compoundargs { | |||
486 | int pagelen; | 495 | int pagelen; |
487 | __be32 tmp[8]; | 496 | __be32 tmp[8]; |
488 | __be32 * tmpp; | 497 | __be32 * tmpp; |
489 | struct tmpbuf { | 498 | struct svcxdr_tmpbuf *to_free; |
490 | struct tmpbuf *next; | ||
491 | void (*release)(const void *); | ||
492 | void *buf; | ||
493 | } *to_free; | ||
494 | 499 | ||
495 | struct svc_rqst *rqstp; | 500 | struct svc_rqst *rqstp; |
496 | 501 | ||
@@ -574,7 +579,6 @@ extern __be32 nfsd4_setclientid(struct svc_rqst *rqstp, | |||
574 | extern __be32 nfsd4_setclientid_confirm(struct svc_rqst *rqstp, | 579 | extern __be32 nfsd4_setclientid_confirm(struct svc_rqst *rqstp, |
575 | struct nfsd4_compound_state *, | 580 | struct nfsd4_compound_state *, |
576 | struct nfsd4_setclientid_confirm *setclientid_confirm); | 581 | struct nfsd4_setclientid_confirm *setclientid_confirm); |
577 | extern void nfsd4_store_cache_entry(struct nfsd4_compoundres *resp); | ||
578 | extern __be32 nfsd4_exchange_id(struct svc_rqst *rqstp, | 582 | extern __be32 nfsd4_exchange_id(struct svc_rqst *rqstp, |
579 | struct nfsd4_compound_state *, struct nfsd4_exchange_id *); | 583 | struct nfsd4_compound_state *, struct nfsd4_exchange_id *); |
580 | extern __be32 nfsd4_backchannel_ctl(struct svc_rqst *, struct nfsd4_compound_state *, struct nfsd4_backchannel_ctl *); | 584 | extern __be32 nfsd4_backchannel_ctl(struct svc_rqst *, struct nfsd4_compound_state *, struct nfsd4_backchannel_ctl *); |
@@ -585,6 +589,7 @@ extern __be32 nfsd4_create_session(struct svc_rqst *, | |||
585 | extern __be32 nfsd4_sequence(struct svc_rqst *, | 589 | extern __be32 nfsd4_sequence(struct svc_rqst *, |
586 | struct nfsd4_compound_state *, | 590 | struct nfsd4_compound_state *, |
587 | struct nfsd4_sequence *); | 591 | struct nfsd4_sequence *); |
592 | extern void nfsd4_sequence_done(struct nfsd4_compoundres *resp); | ||
588 | extern __be32 nfsd4_destroy_session(struct svc_rqst *, | 593 | extern __be32 nfsd4_destroy_session(struct svc_rqst *, |
589 | struct nfsd4_compound_state *, | 594 | struct nfsd4_compound_state *, |
590 | struct nfsd4_destroy_session *); | 595 | struct nfsd4_destroy_session *); |
@@ -594,7 +599,9 @@ extern __be32 nfsd4_process_open1(struct nfsd4_compound_state *, | |||
594 | struct nfsd4_open *open, struct nfsd_net *nn); | 599 | struct nfsd4_open *open, struct nfsd_net *nn); |
595 | extern __be32 nfsd4_process_open2(struct svc_rqst *rqstp, | 600 | extern __be32 nfsd4_process_open2(struct svc_rqst *rqstp, |
596 | struct svc_fh *current_fh, struct nfsd4_open *open); | 601 | struct svc_fh *current_fh, struct nfsd4_open *open); |
597 | extern void nfsd4_cleanup_open_state(struct nfsd4_open *open, __be32 status); | 602 | extern void nfsd4_cstate_clear_replay(struct nfsd4_compound_state *cstate); |
603 | extern void nfsd4_cleanup_open_state(struct nfsd4_compound_state *cstate, | ||
604 | struct nfsd4_open *open, __be32 status); | ||
598 | extern __be32 nfsd4_open_confirm(struct svc_rqst *rqstp, | 605 | extern __be32 nfsd4_open_confirm(struct svc_rqst *rqstp, |
599 | struct nfsd4_compound_state *, struct nfsd4_open_confirm *oc); | 606 | struct nfsd4_compound_state *, struct nfsd4_open_confirm *oc); |
600 | extern __be32 nfsd4_close(struct svc_rqst *rqstp, | 607 | extern __be32 nfsd4_close(struct svc_rqst *rqstp, |
@@ -625,6 +632,7 @@ extern __be32 nfsd4_test_stateid(struct svc_rqst *rqstp, | |||
625 | extern __be32 nfsd4_free_stateid(struct svc_rqst *rqstp, | 632 | extern __be32 nfsd4_free_stateid(struct svc_rqst *rqstp, |
626 | struct nfsd4_compound_state *, struct nfsd4_free_stateid *free_stateid); | 633 | struct nfsd4_compound_state *, struct nfsd4_free_stateid *free_stateid); |
627 | extern void nfsd4_bump_seqid(struct nfsd4_compound_state *, __be32 nfserr); | 634 | extern void nfsd4_bump_seqid(struct nfsd4_compound_state *, __be32 nfserr); |
635 | |||
628 | #endif | 636 | #endif |
629 | 637 | ||
630 | /* | 638 | /* |
diff --git a/fs/proc/Makefile b/fs/proc/Makefile index 239493ec718e..7151ea428041 100644 --- a/fs/proc/Makefile +++ b/fs/proc/Makefile | |||
@@ -23,6 +23,7 @@ proc-y += version.o | |||
23 | proc-y += softirqs.o | 23 | proc-y += softirqs.o |
24 | proc-y += namespaces.o | 24 | proc-y += namespaces.o |
25 | proc-y += self.o | 25 | proc-y += self.o |
26 | proc-y += thread_self.o | ||
26 | proc-$(CONFIG_PROC_SYSCTL) += proc_sysctl.o | 27 | proc-$(CONFIG_PROC_SYSCTL) += proc_sysctl.o |
27 | proc-$(CONFIG_NET) += proc_net.o | 28 | proc-$(CONFIG_NET) += proc_net.o |
28 | proc-$(CONFIG_PROC_KCORE) += kcore.o | 29 | proc-$(CONFIG_PROC_KCORE) += kcore.o |
diff --git a/fs/proc/base.c b/fs/proc/base.c index 043c83cb51f9..baf852b648ad 100644 --- a/fs/proc/base.c +++ b/fs/proc/base.c | |||
@@ -2814,7 +2814,7 @@ retry: | |||
2814 | return iter; | 2814 | return iter; |
2815 | } | 2815 | } |
2816 | 2816 | ||
2817 | #define TGID_OFFSET (FIRST_PROCESS_ENTRY + 1) | 2817 | #define TGID_OFFSET (FIRST_PROCESS_ENTRY + 2) |
2818 | 2818 | ||
2819 | /* for the /proc/ directory itself, after non-process stuff has been done */ | 2819 | /* for the /proc/ directory itself, after non-process stuff has been done */ |
2820 | int proc_pid_readdir(struct file *file, struct dir_context *ctx) | 2820 | int proc_pid_readdir(struct file *file, struct dir_context *ctx) |
@@ -2826,14 +2826,19 @@ int proc_pid_readdir(struct file *file, struct dir_context *ctx) | |||
2826 | if (pos >= PID_MAX_LIMIT + TGID_OFFSET) | 2826 | if (pos >= PID_MAX_LIMIT + TGID_OFFSET) |
2827 | return 0; | 2827 | return 0; |
2828 | 2828 | ||
2829 | if (pos == TGID_OFFSET - 1) { | 2829 | if (pos == TGID_OFFSET - 2) { |
2830 | struct inode *inode = ns->proc_self->d_inode; | 2830 | struct inode *inode = ns->proc_self->d_inode; |
2831 | if (!dir_emit(ctx, "self", 4, inode->i_ino, DT_LNK)) | 2831 | if (!dir_emit(ctx, "self", 4, inode->i_ino, DT_LNK)) |
2832 | return 0; | 2832 | return 0; |
2833 | iter.tgid = 0; | 2833 | ctx->pos = pos = pos + 1; |
2834 | } else { | 2834 | } |
2835 | iter.tgid = pos - TGID_OFFSET; | 2835 | if (pos == TGID_OFFSET - 1) { |
2836 | struct inode *inode = ns->proc_thread_self->d_inode; | ||
2837 | if (!dir_emit(ctx, "thread-self", 11, inode->i_ino, DT_LNK)) | ||
2838 | return 0; | ||
2839 | ctx->pos = pos = pos + 1; | ||
2836 | } | 2840 | } |
2841 | iter.tgid = pos - TGID_OFFSET; | ||
2837 | iter.task = NULL; | 2842 | iter.task = NULL; |
2838 | for (iter = next_tgid(ns, iter); | 2843 | for (iter = next_tgid(ns, iter); |
2839 | iter.task; | 2844 | iter.task; |
@@ -2862,6 +2867,9 @@ static const struct pid_entry tid_base_stuff[] = { | |||
2862 | DIR("fd", S_IRUSR|S_IXUSR, proc_fd_inode_operations, proc_fd_operations), | 2867 | DIR("fd", S_IRUSR|S_IXUSR, proc_fd_inode_operations, proc_fd_operations), |
2863 | DIR("fdinfo", S_IRUSR|S_IXUSR, proc_fdinfo_inode_operations, proc_fdinfo_operations), | 2868 | DIR("fdinfo", S_IRUSR|S_IXUSR, proc_fdinfo_inode_operations, proc_fdinfo_operations), |
2864 | DIR("ns", S_IRUSR|S_IXUGO, proc_ns_dir_inode_operations, proc_ns_dir_operations), | 2869 | DIR("ns", S_IRUSR|S_IXUGO, proc_ns_dir_inode_operations, proc_ns_dir_operations), |
2870 | #ifdef CONFIG_NET | ||
2871 | DIR("net", S_IRUGO|S_IXUGO, proc_net_inode_operations, proc_net_operations), | ||
2872 | #endif | ||
2865 | REG("environ", S_IRUSR, proc_environ_operations), | 2873 | REG("environ", S_IRUSR, proc_environ_operations), |
2866 | ONE("auxv", S_IRUSR, proc_pid_auxv), | 2874 | ONE("auxv", S_IRUSR, proc_pid_auxv), |
2867 | ONE("status", S_IRUGO, proc_pid_status), | 2875 | ONE("status", S_IRUGO, proc_pid_status), |
diff --git a/fs/proc/inode.c b/fs/proc/inode.c index 0adbc02d60e3..333080d7a671 100644 --- a/fs/proc/inode.c +++ b/fs/proc/inode.c | |||
@@ -442,6 +442,7 @@ struct inode *proc_get_inode(struct super_block *sb, struct proc_dir_entry *de) | |||
442 | int proc_fill_super(struct super_block *s) | 442 | int proc_fill_super(struct super_block *s) |
443 | { | 443 | { |
444 | struct inode *root_inode; | 444 | struct inode *root_inode; |
445 | int ret; | ||
445 | 446 | ||
446 | s->s_flags |= MS_NODIRATIME | MS_NOSUID | MS_NOEXEC; | 447 | s->s_flags |= MS_NODIRATIME | MS_NOSUID | MS_NOEXEC; |
447 | s->s_blocksize = 1024; | 448 | s->s_blocksize = 1024; |
@@ -463,5 +464,9 @@ int proc_fill_super(struct super_block *s) | |||
463 | return -ENOMEM; | 464 | return -ENOMEM; |
464 | } | 465 | } |
465 | 466 | ||
466 | return proc_setup_self(s); | 467 | ret = proc_setup_self(s); |
468 | if (ret) { | ||
469 | return ret; | ||
470 | } | ||
471 | return proc_setup_thread_self(s); | ||
467 | } | 472 | } |
diff --git a/fs/proc/internal.h b/fs/proc/internal.h index a024cf7b260f..7da13e49128a 100644 --- a/fs/proc/internal.h +++ b/fs/proc/internal.h | |||
@@ -231,6 +231,12 @@ static inline int proc_net_init(void) { return 0; } | |||
231 | extern int proc_setup_self(struct super_block *); | 231 | extern int proc_setup_self(struct super_block *); |
232 | 232 | ||
233 | /* | 233 | /* |
234 | * proc_thread_self.c | ||
235 | */ | ||
236 | extern int proc_setup_thread_self(struct super_block *); | ||
237 | extern void proc_thread_self_init(void); | ||
238 | |||
239 | /* | ||
234 | * proc_sysctl.c | 240 | * proc_sysctl.c |
235 | */ | 241 | */ |
236 | #ifdef CONFIG_PROC_SYSCTL | 242 | #ifdef CONFIG_PROC_SYSCTL |
diff --git a/fs/proc/proc_net.c b/fs/proc/proc_net.c index 4677bb7dc7c2..39481028ec08 100644 --- a/fs/proc/proc_net.c +++ b/fs/proc/proc_net.c | |||
@@ -113,9 +113,11 @@ static struct net *get_proc_task_net(struct inode *dir) | |||
113 | rcu_read_lock(); | 113 | rcu_read_lock(); |
114 | task = pid_task(proc_pid(dir), PIDTYPE_PID); | 114 | task = pid_task(proc_pid(dir), PIDTYPE_PID); |
115 | if (task != NULL) { | 115 | if (task != NULL) { |
116 | ns = task_nsproxy(task); | 116 | task_lock(task); |
117 | ns = task->nsproxy; | ||
117 | if (ns != NULL) | 118 | if (ns != NULL) |
118 | net = get_net(ns->net_ns); | 119 | net = get_net(ns->net_ns); |
120 | task_unlock(task); | ||
119 | } | 121 | } |
120 | rcu_read_unlock(); | 122 | rcu_read_unlock(); |
121 | 123 | ||
@@ -224,7 +226,7 @@ static struct pernet_operations __net_initdata proc_net_ns_ops = { | |||
224 | 226 | ||
225 | int __init proc_net_init(void) | 227 | int __init proc_net_init(void) |
226 | { | 228 | { |
227 | proc_symlink("net", NULL, "self/net"); | 229 | proc_symlink("net", NULL, "thread-self/net"); |
228 | 230 | ||
229 | return register_pernet_subsys(&proc_net_ns_ops); | 231 | return register_pernet_subsys(&proc_net_ns_ops); |
230 | } | 232 | } |
diff --git a/fs/proc/root.c b/fs/proc/root.c index 574bafc41f0b..6296c7626963 100644 --- a/fs/proc/root.c +++ b/fs/proc/root.c | |||
@@ -149,6 +149,8 @@ static void proc_kill_sb(struct super_block *sb) | |||
149 | ns = (struct pid_namespace *)sb->s_fs_info; | 149 | ns = (struct pid_namespace *)sb->s_fs_info; |
150 | if (ns->proc_self) | 150 | if (ns->proc_self) |
151 | dput(ns->proc_self); | 151 | dput(ns->proc_self); |
152 | if (ns->proc_thread_self) | ||
153 | dput(ns->proc_thread_self); | ||
152 | kill_anon_super(sb); | 154 | kill_anon_super(sb); |
153 | put_pid_ns(ns); | 155 | put_pid_ns(ns); |
154 | } | 156 | } |
@@ -170,7 +172,8 @@ void __init proc_root_init(void) | |||
170 | return; | 172 | return; |
171 | 173 | ||
172 | proc_self_init(); | 174 | proc_self_init(); |
173 | proc_symlink("mounts", NULL, "self/mounts"); | 175 | proc_thread_self_init(); |
176 | proc_symlink("mounts", NULL, "thread-self/mounts"); | ||
174 | 177 | ||
175 | proc_net_init(); | 178 | proc_net_init(); |
176 | 179 | ||
diff --git a/fs/proc/thread_self.c b/fs/proc/thread_self.c new file mode 100644 index 000000000000..59075b509df3 --- /dev/null +++ b/fs/proc/thread_self.c | |||
@@ -0,0 +1,85 @@ | |||
1 | #include <linux/sched.h> | ||
2 | #include <linux/namei.h> | ||
3 | #include <linux/slab.h> | ||
4 | #include <linux/pid_namespace.h> | ||
5 | #include "internal.h" | ||
6 | |||
7 | /* | ||
8 | * /proc/thread_self: | ||
9 | */ | ||
10 | static int proc_thread_self_readlink(struct dentry *dentry, char __user *buffer, | ||
11 | int buflen) | ||
12 | { | ||
13 | struct pid_namespace *ns = dentry->d_sb->s_fs_info; | ||
14 | pid_t tgid = task_tgid_nr_ns(current, ns); | ||
15 | pid_t pid = task_pid_nr_ns(current, ns); | ||
16 | char tmp[PROC_NUMBUF + 6 + PROC_NUMBUF]; | ||
17 | if (!pid) | ||
18 | return -ENOENT; | ||
19 | sprintf(tmp, "%d/task/%d", tgid, pid); | ||
20 | return readlink_copy(buffer, buflen, tmp); | ||
21 | } | ||
22 | |||
23 | static void *proc_thread_self_follow_link(struct dentry *dentry, struct nameidata *nd) | ||
24 | { | ||
25 | struct pid_namespace *ns = dentry->d_sb->s_fs_info; | ||
26 | pid_t tgid = task_tgid_nr_ns(current, ns); | ||
27 | pid_t pid = task_pid_nr_ns(current, ns); | ||
28 | char *name = ERR_PTR(-ENOENT); | ||
29 | if (pid) { | ||
30 | name = kmalloc(PROC_NUMBUF + 6 + PROC_NUMBUF, GFP_KERNEL); | ||
31 | if (!name) | ||
32 | name = ERR_PTR(-ENOMEM); | ||
33 | else | ||
34 | sprintf(name, "%d/task/%d", tgid, pid); | ||
35 | } | ||
36 | nd_set_link(nd, name); | ||
37 | return NULL; | ||
38 | } | ||
39 | |||
40 | static const struct inode_operations proc_thread_self_inode_operations = { | ||
41 | .readlink = proc_thread_self_readlink, | ||
42 | .follow_link = proc_thread_self_follow_link, | ||
43 | .put_link = kfree_put_link, | ||
44 | }; | ||
45 | |||
46 | static unsigned thread_self_inum; | ||
47 | |||
48 | int proc_setup_thread_self(struct super_block *s) | ||
49 | { | ||
50 | struct inode *root_inode = s->s_root->d_inode; | ||
51 | struct pid_namespace *ns = s->s_fs_info; | ||
52 | struct dentry *thread_self; | ||
53 | |||
54 | mutex_lock(&root_inode->i_mutex); | ||
55 | thread_self = d_alloc_name(s->s_root, "thread-self"); | ||
56 | if (thread_self) { | ||
57 | struct inode *inode = new_inode_pseudo(s); | ||
58 | if (inode) { | ||
59 | inode->i_ino = thread_self_inum; | ||
60 | inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME; | ||
61 | inode->i_mode = S_IFLNK | S_IRWXUGO; | ||
62 | inode->i_uid = GLOBAL_ROOT_UID; | ||
63 | inode->i_gid = GLOBAL_ROOT_GID; | ||
64 | inode->i_op = &proc_thread_self_inode_operations; | ||
65 | d_add(thread_self, inode); | ||
66 | } else { | ||
67 | dput(thread_self); | ||
68 | thread_self = ERR_PTR(-ENOMEM); | ||
69 | } | ||
70 | } else { | ||
71 | thread_self = ERR_PTR(-ENOMEM); | ||
72 | } | ||
73 | mutex_unlock(&root_inode->i_mutex); | ||
74 | if (IS_ERR(thread_self)) { | ||
75 | pr_err("proc_fill_super: can't allocate /proc/thread_self\n"); | ||
76 | return PTR_ERR(thread_self); | ||
77 | } | ||
78 | ns->proc_thread_self = thread_self; | ||
79 | return 0; | ||
80 | } | ||
81 | |||
82 | void __init proc_thread_self_init(void) | ||
83 | { | ||
84 | proc_alloc_inum(&thread_self_inum); | ||
85 | } | ||
diff --git a/fs/proc_namespace.c b/fs/proc_namespace.c index 1a81373947f3..73ca1740d839 100644 --- a/fs/proc_namespace.c +++ b/fs/proc_namespace.c | |||
@@ -232,17 +232,15 @@ static int mounts_open_common(struct inode *inode, struct file *file, | |||
232 | if (!task) | 232 | if (!task) |
233 | goto err; | 233 | goto err; |
234 | 234 | ||
235 | rcu_read_lock(); | 235 | task_lock(task); |
236 | nsp = task_nsproxy(task); | 236 | nsp = task->nsproxy; |
237 | if (!nsp || !nsp->mnt_ns) { | 237 | if (!nsp || !nsp->mnt_ns) { |
238 | rcu_read_unlock(); | 238 | task_unlock(task); |
239 | put_task_struct(task); | 239 | put_task_struct(task); |
240 | goto err; | 240 | goto err; |
241 | } | 241 | } |
242 | ns = nsp->mnt_ns; | 242 | ns = nsp->mnt_ns; |
243 | get_mnt_ns(ns); | 243 | get_mnt_ns(ns); |
244 | rcu_read_unlock(); | ||
245 | task_lock(task); | ||
246 | if (!task->fs) { | 244 | if (!task->fs) { |
247 | task_unlock(task); | 245 | task_unlock(task); |
248 | put_task_struct(task); | 246 | put_task_struct(task); |
diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h index 202f0a7171e8..1d9f0f1ff52d 100644 --- a/include/drm/ttm/ttm_bo_driver.h +++ b/include/drm/ttm/ttm_bo_driver.h | |||
@@ -133,6 +133,7 @@ struct ttm_tt { | |||
133 | * struct ttm_dma_tt | 133 | * struct ttm_dma_tt |
134 | * | 134 | * |
135 | * @ttm: Base ttm_tt struct. | 135 | * @ttm: Base ttm_tt struct. |
136 | * @cpu_address: The CPU address of the pages | ||
136 | * @dma_address: The DMA (bus) addresses of the pages | 137 | * @dma_address: The DMA (bus) addresses of the pages |
137 | * @pages_list: used by some page allocation backend | 138 | * @pages_list: used by some page allocation backend |
138 | * | 139 | * |
@@ -142,6 +143,7 @@ struct ttm_tt { | |||
142 | */ | 143 | */ |
143 | struct ttm_dma_tt { | 144 | struct ttm_dma_tt { |
144 | struct ttm_tt ttm; | 145 | struct ttm_tt ttm; |
146 | void **cpu_address; | ||
145 | dma_addr_t *dma_address; | 147 | dma_addr_t *dma_address; |
146 | struct list_head pages_list; | 148 | struct list_head pages_list; |
147 | }; | 149 | }; |
diff --git a/include/linux/acpi.h b/include/linux/acpi.h index 5320153c311b..807cbc46d73e 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h | |||
@@ -364,6 +364,17 @@ extern bool osc_sb_apei_support_acked; | |||
364 | #define OSC_PCI_EXPRESS_CAPABILITY_CONTROL 0x00000010 | 364 | #define OSC_PCI_EXPRESS_CAPABILITY_CONTROL 0x00000010 |
365 | #define OSC_PCI_CONTROL_MASKS 0x0000001f | 365 | #define OSC_PCI_CONTROL_MASKS 0x0000001f |
366 | 366 | ||
367 | #define ACPI_GSB_ACCESS_ATTRIB_QUICK 0x00000002 | ||
368 | #define ACPI_GSB_ACCESS_ATTRIB_SEND_RCV 0x00000004 | ||
369 | #define ACPI_GSB_ACCESS_ATTRIB_BYTE 0x00000006 | ||
370 | #define ACPI_GSB_ACCESS_ATTRIB_WORD 0x00000008 | ||
371 | #define ACPI_GSB_ACCESS_ATTRIB_BLOCK 0x0000000A | ||
372 | #define ACPI_GSB_ACCESS_ATTRIB_MULTIBYTE 0x0000000B | ||
373 | #define ACPI_GSB_ACCESS_ATTRIB_WORD_CALL 0x0000000C | ||
374 | #define ACPI_GSB_ACCESS_ATTRIB_BLOCK_CALL 0x0000000D | ||
375 | #define ACPI_GSB_ACCESS_ATTRIB_RAW_BYTES 0x0000000E | ||
376 | #define ACPI_GSB_ACCESS_ATTRIB_RAW_PROCESS 0x0000000F | ||
377 | |||
367 | extern acpi_status acpi_pci_osc_control_set(acpi_handle handle, | 378 | extern acpi_status acpi_pci_osc_control_set(acpi_handle handle, |
368 | u32 *mask, u32 req); | 379 | u32 *mask, u32 req); |
369 | 380 | ||
diff --git a/include/linux/cred.h b/include/linux/cred.h index f61d6c8f5ef3..b2d0820837c4 100644 --- a/include/linux/cred.h +++ b/include/linux/cred.h | |||
@@ -259,6 +259,15 @@ static inline void put_cred(const struct cred *_cred) | |||
259 | rcu_dereference_protected(current->cred, 1) | 259 | rcu_dereference_protected(current->cred, 1) |
260 | 260 | ||
261 | /** | 261 | /** |
262 | * current_real_cred - Access the current task's objective credentials | ||
263 | * | ||
264 | * Access the objective credentials of the current task. RCU-safe, | ||
265 | * since nobody else can modify it. | ||
266 | */ | ||
267 | #define current_real_cred() \ | ||
268 | rcu_dereference_protected(current->real_cred, 1) | ||
269 | |||
270 | /** | ||
262 | * __task_cred - Access a task's objective credentials | 271 | * __task_cred - Access a task's objective credentials |
263 | * @task: The task to query | 272 | * @task: The task to query |
264 | * | 273 | * |
diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h index 06c6faa9e5cc..28672e87e910 100644 --- a/include/linux/ftrace_event.h +++ b/include/linux/ftrace_event.h | |||
@@ -571,40 +571,6 @@ do { \ | |||
571 | __trace_printk(ip, fmt, ##args); \ | 571 | __trace_printk(ip, fmt, ##args); \ |
572 | } while (0) | 572 | } while (0) |
573 | 573 | ||
574 | /** | ||
575 | * tracepoint_string - register constant persistent string to trace system | ||
576 | * @str - a constant persistent string that will be referenced in tracepoints | ||
577 | * | ||
578 | * If constant strings are being used in tracepoints, it is faster and | ||
579 | * more efficient to just save the pointer to the string and reference | ||
580 | * that with a printf "%s" instead of saving the string in the ring buffer | ||
581 | * and wasting space and time. | ||
582 | * | ||
583 | * The problem with the above approach is that userspace tools that read | ||
584 | * the binary output of the trace buffers do not have access to the string. | ||
585 | * Instead they just show the address of the string which is not very | ||
586 | * useful to users. | ||
587 | * | ||
588 | * With tracepoint_string(), the string will be registered to the tracing | ||
589 | * system and exported to userspace via the debugfs/tracing/printk_formats | ||
590 | * file that maps the string address to the string text. This way userspace | ||
591 | * tools that read the binary buffers have a way to map the pointers to | ||
592 | * the ASCII strings they represent. | ||
593 | * | ||
594 | * The @str used must be a constant string and persistent as it would not | ||
595 | * make sense to show a string that no longer exists. But it is still fine | ||
596 | * to be used with modules, because when modules are unloaded, if they | ||
597 | * had tracepoints, the ring buffers are cleared too. As long as the string | ||
598 | * does not change during the life of the module, it is fine to use | ||
599 | * tracepoint_string() within a module. | ||
600 | */ | ||
601 | #define tracepoint_string(str) \ | ||
602 | ({ \ | ||
603 | static const char *___tp_str __tracepoint_string = str; \ | ||
604 | ___tp_str; \ | ||
605 | }) | ||
606 | #define __tracepoint_string __attribute__((section("__tracepoint_str"))) | ||
607 | |||
608 | #ifdef CONFIG_PERF_EVENTS | 574 | #ifdef CONFIG_PERF_EVENTS |
609 | struct perf_event; | 575 | struct perf_event; |
610 | 576 | ||
diff --git a/include/linux/i2c.h b/include/linux/i2c.h index b556e0ab946f..ea507665896c 100644 --- a/include/linux/i2c.h +++ b/include/linux/i2c.h | |||
@@ -577,4 +577,16 @@ static inline struct i2c_adapter *of_find_i2c_adapter_by_node(struct device_node | |||
577 | } | 577 | } |
578 | #endif /* CONFIG_OF */ | 578 | #endif /* CONFIG_OF */ |
579 | 579 | ||
580 | #ifdef CONFIG_I2C_ACPI | ||
581 | int acpi_i2c_install_space_handler(struct i2c_adapter *adapter); | ||
582 | void acpi_i2c_remove_space_handler(struct i2c_adapter *adapter); | ||
583 | void acpi_i2c_register_devices(struct i2c_adapter *adap); | ||
584 | #else | ||
585 | static inline void acpi_i2c_register_devices(struct i2c_adapter *adap) { } | ||
586 | static inline void acpi_i2c_remove_space_handler(struct i2c_adapter *adapter) | ||
587 | { } | ||
588 | static inline int acpi_i2c_install_space_handler(struct i2c_adapter *adapter) | ||
589 | { return 0; } | ||
590 | #endif | ||
591 | |||
580 | #endif /* _LINUX_I2C_H */ | 592 | #endif /* _LINUX_I2C_H */ |
diff --git a/include/linux/i2c/s6000.h b/include/linux/i2c/s6000.h deleted file mode 100644 index d9b34bfdae76..000000000000 --- a/include/linux/i2c/s6000.h +++ /dev/null | |||
@@ -1,10 +0,0 @@ | |||
1 | #ifndef __LINUX_I2C_S6000_H | ||
2 | #define __LINUX_I2C_S6000_H | ||
3 | |||
4 | struct s6_i2c_platform_data { | ||
5 | const char *clock; /* the clock to use */ | ||
6 | int bus_num; /* the bus number to register */ | ||
7 | }; | ||
8 | |||
9 | #endif | ||
10 | |||
diff --git a/include/linux/mount.h b/include/linux/mount.h index 839bac270904..b0c1e6574e7f 100644 --- a/include/linux/mount.h +++ b/include/linux/mount.h | |||
@@ -42,13 +42,20 @@ struct mnt_namespace; | |||
42 | * flag, consider how it interacts with shared mounts. | 42 | * flag, consider how it interacts with shared mounts. |
43 | */ | 43 | */ |
44 | #define MNT_SHARED_MASK (MNT_UNBINDABLE) | 44 | #define MNT_SHARED_MASK (MNT_UNBINDABLE) |
45 | #define MNT_PROPAGATION_MASK (MNT_SHARED | MNT_UNBINDABLE) | 45 | #define MNT_USER_SETTABLE_MASK (MNT_NOSUID | MNT_NODEV | MNT_NOEXEC \ |
46 | | MNT_NOATIME | MNT_NODIRATIME | MNT_RELATIME \ | ||
47 | | MNT_READONLY) | ||
48 | #define MNT_ATIME_MASK (MNT_NOATIME | MNT_NODIRATIME | MNT_RELATIME ) | ||
46 | 49 | ||
47 | #define MNT_INTERNAL_FLAGS (MNT_SHARED | MNT_WRITE_HOLD | MNT_INTERNAL | \ | 50 | #define MNT_INTERNAL_FLAGS (MNT_SHARED | MNT_WRITE_HOLD | MNT_INTERNAL | \ |
48 | MNT_DOOMED | MNT_SYNC_UMOUNT | MNT_MARKED) | 51 | MNT_DOOMED | MNT_SYNC_UMOUNT | MNT_MARKED) |
49 | 52 | ||
50 | #define MNT_INTERNAL 0x4000 | 53 | #define MNT_INTERNAL 0x4000 |
51 | 54 | ||
55 | #define MNT_LOCK_ATIME 0x040000 | ||
56 | #define MNT_LOCK_NOEXEC 0x080000 | ||
57 | #define MNT_LOCK_NOSUID 0x100000 | ||
58 | #define MNT_LOCK_NODEV 0x200000 | ||
52 | #define MNT_LOCK_READONLY 0x400000 | 59 | #define MNT_LOCK_READONLY 0x400000 |
53 | #define MNT_LOCKED 0x800000 | 60 | #define MNT_LOCKED 0x800000 |
54 | #define MNT_DOOMED 0x1000000 | 61 | #define MNT_DOOMED 0x1000000 |
diff --git a/include/linux/nsproxy.h b/include/linux/nsproxy.h index b4ec59d159ac..35fa08fd7739 100644 --- a/include/linux/nsproxy.h +++ b/include/linux/nsproxy.h | |||
@@ -40,32 +40,28 @@ extern struct nsproxy init_nsproxy; | |||
40 | * the namespaces access rules are: | 40 | * the namespaces access rules are: |
41 | * | 41 | * |
42 | * 1. only current task is allowed to change tsk->nsproxy pointer or | 42 | * 1. only current task is allowed to change tsk->nsproxy pointer or |
43 | * any pointer on the nsproxy itself | 43 | * any pointer on the nsproxy itself. Current must hold the task_lock |
44 | * when changing tsk->nsproxy. | ||
44 | * | 45 | * |
45 | * 2. when accessing (i.e. reading) current task's namespaces - no | 46 | * 2. when accessing (i.e. reading) current task's namespaces - no |
46 | * precautions should be taken - just dereference the pointers | 47 | * precautions should be taken - just dereference the pointers |
47 | * | 48 | * |
48 | * 3. the access to other task namespaces is performed like this | 49 | * 3. the access to other task namespaces is performed like this |
49 | * rcu_read_lock(); | 50 | * task_lock(task); |
50 | * nsproxy = task_nsproxy(tsk); | 51 | * nsproxy = task->nsproxy; |
51 | * if (nsproxy != NULL) { | 52 | * if (nsproxy != NULL) { |
52 | * / * | 53 | * / * |
53 | * * work with the namespaces here | 54 | * * work with the namespaces here |
54 | * * e.g. get the reference on one of them | 55 | * * e.g. get the reference on one of them |
55 | * * / | 56 | * * / |
56 | * } / * | 57 | * } / * |
57 | * * NULL task_nsproxy() means that this task is | 58 | * * NULL task->nsproxy means that this task is |
58 | * * almost dead (zombie) | 59 | * * almost dead (zombie) |
59 | * * / | 60 | * * / |
60 | * rcu_read_unlock(); | 61 | * task_unlock(task); |
61 | * | 62 | * |
62 | */ | 63 | */ |
63 | 64 | ||
64 | static inline struct nsproxy *task_nsproxy(struct task_struct *tsk) | ||
65 | { | ||
66 | return rcu_dereference(tsk->nsproxy); | ||
67 | } | ||
68 | |||
69 | int copy_namespaces(unsigned long flags, struct task_struct *tsk); | 65 | int copy_namespaces(unsigned long flags, struct task_struct *tsk); |
70 | void exit_task_namespaces(struct task_struct *tsk); | 66 | void exit_task_namespaces(struct task_struct *tsk); |
71 | void switch_task_namespaces(struct task_struct *tsk, struct nsproxy *new); | 67 | void switch_task_namespaces(struct task_struct *tsk, struct nsproxy *new); |
diff --git a/include/linux/pid_namespace.h b/include/linux/pid_namespace.h index 7246ef3d4455..1997ffc295a7 100644 --- a/include/linux/pid_namespace.h +++ b/include/linux/pid_namespace.h | |||
@@ -33,6 +33,7 @@ struct pid_namespace { | |||
33 | #ifdef CONFIG_PROC_FS | 33 | #ifdef CONFIG_PROC_FS |
34 | struct vfsmount *proc_mnt; | 34 | struct vfsmount *proc_mnt; |
35 | struct dentry *proc_self; | 35 | struct dentry *proc_self; |
36 | struct dentry *proc_thread_self; | ||
36 | #endif | 37 | #endif |
37 | #ifdef CONFIG_BSD_PROCESS_ACCT | 38 | #ifdef CONFIG_BSD_PROCESS_ACCT |
38 | struct bsd_acct_struct *bacct; | 39 | struct bsd_acct_struct *bacct; |
diff --git a/include/linux/sched.h b/include/linux/sched.h index db2f6474e95e..857ba40426ba 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -2360,8 +2360,10 @@ static inline int on_sig_stack(unsigned long sp) | |||
2360 | 2360 | ||
2361 | static inline int sas_ss_flags(unsigned long sp) | 2361 | static inline int sas_ss_flags(unsigned long sp) |
2362 | { | 2362 | { |
2363 | return (current->sas_ss_size == 0 ? SS_DISABLE | 2363 | if (!current->sas_ss_size) |
2364 | : on_sig_stack(sp) ? SS_ONSTACK : 0); | 2364 | return SS_DISABLE; |
2365 | |||
2366 | return on_sig_stack(sp) ? SS_ONSTACK : 0; | ||
2365 | } | 2367 | } |
2366 | 2368 | ||
2367 | static inline unsigned long sigsp(unsigned long sp, struct ksignal *ksig) | 2369 | static inline unsigned long sigsp(unsigned long sp, struct ksignal *ksig) |
diff --git a/include/linux/signal.h b/include/linux/signal.h index c9e65360c49a..750196fcc0a5 100644 --- a/include/linux/signal.h +++ b/include/linux/signal.h | |||
@@ -280,9 +280,8 @@ struct ksignal { | |||
280 | int sig; | 280 | int sig; |
281 | }; | 281 | }; |
282 | 282 | ||
283 | extern int get_signal_to_deliver(siginfo_t *info, struct k_sigaction *return_ka, struct pt_regs *regs, void *cookie); | 283 | extern int get_signal(struct ksignal *ksig); |
284 | extern void signal_setup_done(int failed, struct ksignal *ksig, int stepping); | 284 | extern void signal_setup_done(int failed, struct ksignal *ksig, int stepping); |
285 | extern void signal_delivered(int sig, siginfo_t *info, struct k_sigaction *ka, struct pt_regs *regs, int stepping); | ||
286 | extern void exit_signals(struct task_struct *tsk); | 285 | extern void exit_signals(struct task_struct *tsk); |
287 | extern void kernel_sigaction(int, __sighandler_t); | 286 | extern void kernel_sigaction(int, __sighandler_t); |
288 | 287 | ||
@@ -301,18 +300,6 @@ static inline void disallow_signal(int sig) | |||
301 | kernel_sigaction(sig, SIG_IGN); | 300 | kernel_sigaction(sig, SIG_IGN); |
302 | } | 301 | } |
303 | 302 | ||
304 | /* | ||
305 | * Eventually that'll replace get_signal_to_deliver(); macro for now, | ||
306 | * to avoid nastiness with include order. | ||
307 | */ | ||
308 | #define get_signal(ksig) \ | ||
309 | ({ \ | ||
310 | struct ksignal *p = (ksig); \ | ||
311 | p->sig = get_signal_to_deliver(&p->info, &p->ka, \ | ||
312 | signal_pt_regs(), NULL);\ | ||
313 | p->sig > 0; \ | ||
314 | }) | ||
315 | |||
316 | extern struct kmem_cache *sighand_cachep; | 303 | extern struct kmem_cache *sighand_cachep; |
317 | 304 | ||
318 | int unhandled_signal(struct task_struct *tsk, int sig); | 305 | int unhandled_signal(struct task_struct *tsk, int sig); |
diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h index 1bc7cd05b22e..cf61ecd148e0 100644 --- a/include/linux/sunrpc/svc.h +++ b/include/linux/sunrpc/svc.h | |||
@@ -236,7 +236,7 @@ struct svc_rqst { | |||
236 | struct svc_cred rq_cred; /* auth info */ | 236 | struct svc_cred rq_cred; /* auth info */ |
237 | void * rq_xprt_ctxt; /* transport specific context ptr */ | 237 | void * rq_xprt_ctxt; /* transport specific context ptr */ |
238 | struct svc_deferred_req*rq_deferred; /* deferred request we are replaying */ | 238 | struct svc_deferred_req*rq_deferred; /* deferred request we are replaying */ |
239 | int rq_usedeferral; /* use deferral */ | 239 | bool rq_usedeferral; /* use deferral */ |
240 | 240 | ||
241 | size_t rq_xprt_hlen; /* xprt header len */ | 241 | size_t rq_xprt_hlen; /* xprt header len */ |
242 | struct xdr_buf rq_arg; | 242 | struct xdr_buf rq_arg; |
@@ -277,7 +277,7 @@ struct svc_rqst { | |||
277 | struct auth_domain * rq_gssclient; /* "gss/"-style peer info */ | 277 | struct auth_domain * rq_gssclient; /* "gss/"-style peer info */ |
278 | int rq_cachetype; | 278 | int rq_cachetype; |
279 | struct svc_cacherep * rq_cacherep; /* cache info */ | 279 | struct svc_cacherep * rq_cacherep; /* cache info */ |
280 | int rq_splice_ok; /* turned off in gss privacy | 280 | bool rq_splice_ok; /* turned off in gss privacy |
281 | * to prevent encrypting page | 281 | * to prevent encrypting page |
282 | * cache pages */ | 282 | * cache pages */ |
283 | wait_queue_head_t rq_wait; /* synchronization */ | 283 | wait_queue_head_t rq_wait; /* synchronization */ |
diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h index 5cf99a016368..975da754c778 100644 --- a/include/linux/sunrpc/svc_rdma.h +++ b/include/linux/sunrpc/svc_rdma.h | |||
@@ -174,8 +174,7 @@ struct svcxprt_rdma { | |||
174 | * page size of 4k, or 32k * 2 ops / 4k = 16 outstanding RDMA_READ. */ | 174 | * page size of 4k, or 32k * 2 ops / 4k = 16 outstanding RDMA_READ. */ |
175 | #define RPCRDMA_ORD (64/4) | 175 | #define RPCRDMA_ORD (64/4) |
176 | #define RPCRDMA_SQ_DEPTH_MULT 8 | 176 | #define RPCRDMA_SQ_DEPTH_MULT 8 |
177 | #define RPCRDMA_MAX_THREADS 16 | 177 | #define RPCRDMA_MAX_REQUESTS 32 |
178 | #define RPCRDMA_MAX_REQUESTS 16 | ||
179 | #define RPCRDMA_MAX_REQ_SIZE 4096 | 178 | #define RPCRDMA_MAX_REQ_SIZE 4096 |
180 | 179 | ||
181 | /* svc_rdma_marshal.c */ | 180 | /* svc_rdma_marshal.c */ |
diff --git a/include/linux/sunrpc/svc_xprt.h b/include/linux/sunrpc/svc_xprt.h index 7235040a19b2..ce6e4182a5b2 100644 --- a/include/linux/sunrpc/svc_xprt.h +++ b/include/linux/sunrpc/svc_xprt.h | |||
@@ -25,6 +25,7 @@ struct svc_xprt_ops { | |||
25 | void (*xpo_detach)(struct svc_xprt *); | 25 | void (*xpo_detach)(struct svc_xprt *); |
26 | void (*xpo_free)(struct svc_xprt *); | 26 | void (*xpo_free)(struct svc_xprt *); |
27 | int (*xpo_secure_port)(struct svc_rqst *); | 27 | int (*xpo_secure_port)(struct svc_rqst *); |
28 | void (*xpo_adjust_wspace)(struct svc_xprt *); | ||
28 | }; | 29 | }; |
29 | 30 | ||
30 | struct svc_xprt_class { | 31 | struct svc_xprt_class { |
@@ -33,6 +34,7 @@ struct svc_xprt_class { | |||
33 | struct svc_xprt_ops *xcl_ops; | 34 | struct svc_xprt_ops *xcl_ops; |
34 | struct list_head xcl_list; | 35 | struct list_head xcl_list; |
35 | u32 xcl_max_payload; | 36 | u32 xcl_max_payload; |
37 | int xcl_ident; | ||
36 | }; | 38 | }; |
37 | 39 | ||
38 | /* | 40 | /* |
diff --git a/include/linux/tracehook.h b/include/linux/tracehook.h index 6f8ab7da27c4..84d497297c5f 100644 --- a/include/linux/tracehook.h +++ b/include/linux/tracehook.h | |||
@@ -133,10 +133,6 @@ static inline void tracehook_report_syscall_exit(struct pt_regs *regs, int step) | |||
133 | 133 | ||
134 | /** | 134 | /** |
135 | * tracehook_signal_handler - signal handler setup is complete | 135 | * tracehook_signal_handler - signal handler setup is complete |
136 | * @sig: number of signal being delivered | ||
137 | * @info: siginfo_t of signal being delivered | ||
138 | * @ka: sigaction setting that chose the handler | ||
139 | * @regs: user register state | ||
140 | * @stepping: nonzero if debugger single-step or block-step in use | 136 | * @stepping: nonzero if debugger single-step or block-step in use |
141 | * | 137 | * |
142 | * Called by the arch code after a signal handler has been set up. | 138 | * Called by the arch code after a signal handler has been set up. |
@@ -146,9 +142,7 @@ static inline void tracehook_report_syscall_exit(struct pt_regs *regs, int step) | |||
146 | * Called without locks, shortly before returning to user mode | 142 | * Called without locks, shortly before returning to user mode |
147 | * (or handling more signals). | 143 | * (or handling more signals). |
148 | */ | 144 | */ |
149 | static inline void tracehook_signal_handler(int sig, siginfo_t *info, | 145 | static inline void tracehook_signal_handler(int stepping) |
150 | const struct k_sigaction *ka, | ||
151 | struct pt_regs *regs, int stepping) | ||
152 | { | 146 | { |
153 | if (stepping) | 147 | if (stepping) |
154 | ptrace_notify(SIGTRAP); | 148 | ptrace_notify(SIGTRAP); |
diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h index 2e2a5f7717e5..b1293f15f592 100644 --- a/include/linux/tracepoint.h +++ b/include/linux/tracepoint.h | |||
@@ -249,6 +249,50 @@ extern void syscall_unregfunc(void); | |||
249 | 249 | ||
250 | #endif /* CONFIG_TRACEPOINTS */ | 250 | #endif /* CONFIG_TRACEPOINTS */ |
251 | 251 | ||
252 | #ifdef CONFIG_TRACING | ||
253 | /** | ||
254 | * tracepoint_string - register constant persistent string to trace system | ||
255 | * @str - a constant persistent string that will be referenced in tracepoints | ||
256 | * | ||
257 | * If constant strings are being used in tracepoints, it is faster and | ||
258 | * more efficient to just save the pointer to the string and reference | ||
259 | * that with a printf "%s" instead of saving the string in the ring buffer | ||
260 | * and wasting space and time. | ||
261 | * | ||
262 | * The problem with the above approach is that userspace tools that read | ||
263 | * the binary output of the trace buffers do not have access to the string. | ||
264 | * Instead they just show the address of the string which is not very | ||
265 | * useful to users. | ||
266 | * | ||
267 | * With tracepoint_string(), the string will be registered to the tracing | ||
268 | * system and exported to userspace via the debugfs/tracing/printk_formats | ||
269 | * file that maps the string address to the string text. This way userspace | ||
270 | * tools that read the binary buffers have a way to map the pointers to | ||
271 | * the ASCII strings they represent. | ||
272 | * | ||
273 | * The @str used must be a constant string and persistent as it would not | ||
274 | * make sense to show a string that no longer exists. But it is still fine | ||
275 | * to be used with modules, because when modules are unloaded, if they | ||
276 | * had tracepoints, the ring buffers are cleared too. As long as the string | ||
277 | * does not change during the life of the module, it is fine to use | ||
278 | * tracepoint_string() within a module. | ||
279 | */ | ||
280 | #define tracepoint_string(str) \ | ||
281 | ({ \ | ||
282 | static const char *___tp_str __tracepoint_string = str; \ | ||
283 | ___tp_str; \ | ||
284 | }) | ||
285 | #define __tracepoint_string __attribute__((section("__tracepoint_str"))) | ||
286 | #else | ||
287 | /* | ||
288 | * tracepoint_string() is used to save the string address for userspace | ||
289 | * tracing tools. When tracing isn't configured, there's no need to save | ||
290 | * anything. | ||
291 | */ | ||
292 | # define tracepoint_string(str) str | ||
293 | # define __tracepoint_string | ||
294 | #endif | ||
295 | |||
252 | /* | 296 | /* |
253 | * The need for the DECLARE_TRACE_NOARGS() is to handle the prototype | 297 | * The need for the DECLARE_TRACE_NOARGS() is to handle the prototype |
254 | * (void). "void" is a special value in a function prototype and can | 298 | * (void). "void" is a special value in a function prototype and can |
diff --git a/include/net/netlabel.h b/include/net/netlabel.h index a4fc39bb3e4f..7b5a300de7f5 100644 --- a/include/net/netlabel.h +++ b/include/net/netlabel.h | |||
@@ -524,10 +524,10 @@ static inline int netlbl_catmap_setrng(struct netlbl_lsm_catmap **catmap, | |||
524 | { | 524 | { |
525 | return 0; | 525 | return 0; |
526 | } | 526 | } |
527 | static int netlbl_catmap_setlong(struct netlbl_lsm_catmap **catmap, | 527 | static inline int netlbl_catmap_setlong(struct netlbl_lsm_catmap **catmap, |
528 | u32 offset, | 528 | u32 offset, |
529 | unsigned long bitmap, | 529 | unsigned long bitmap, |
530 | gfp_t flags) | 530 | gfp_t flags) |
531 | { | 531 | { |
532 | return 0; | 532 | return 0; |
533 | } | 533 | } |
diff --git a/include/trace/events/ipi.h b/include/trace/events/ipi.h new file mode 100644 index 000000000000..834a7362a610 --- /dev/null +++ b/include/trace/events/ipi.h | |||
@@ -0,0 +1,89 @@ | |||
1 | #undef TRACE_SYSTEM | ||
2 | #define TRACE_SYSTEM ipi | ||
3 | |||
4 | #if !defined(_TRACE_IPI_H) || defined(TRACE_HEADER_MULTI_READ) | ||
5 | #define _TRACE_IPI_H | ||
6 | |||
7 | #include <linux/tracepoint.h> | ||
8 | |||
9 | /** | ||
10 | * ipi_raise - called when a smp cross call is made | ||
11 | * | ||
12 | * @mask: mask of recipient CPUs for the IPI | ||
13 | * @reason: string identifying the IPI purpose | ||
14 | * | ||
15 | * It is necessary for @reason to be a static string declared with | ||
16 | * __tracepoint_string. | ||
17 | */ | ||
18 | TRACE_EVENT(ipi_raise, | ||
19 | |||
20 | TP_PROTO(const struct cpumask *mask, const char *reason), | ||
21 | |||
22 | TP_ARGS(mask, reason), | ||
23 | |||
24 | TP_STRUCT__entry( | ||
25 | __bitmask(target_cpus, nr_cpumask_bits) | ||
26 | __field(const char *, reason) | ||
27 | ), | ||
28 | |||
29 | TP_fast_assign( | ||
30 | __assign_bitmask(target_cpus, cpumask_bits(mask), nr_cpumask_bits); | ||
31 | __entry->reason = reason; | ||
32 | ), | ||
33 | |||
34 | TP_printk("target_mask=%s (%s)", __get_bitmask(target_cpus), __entry->reason) | ||
35 | ); | ||
36 | |||
37 | DECLARE_EVENT_CLASS(ipi_handler, | ||
38 | |||
39 | TP_PROTO(const char *reason), | ||
40 | |||
41 | TP_ARGS(reason), | ||
42 | |||
43 | TP_STRUCT__entry( | ||
44 | __field(const char *, reason) | ||
45 | ), | ||
46 | |||
47 | TP_fast_assign( | ||
48 | __entry->reason = reason; | ||
49 | ), | ||
50 | |||
51 | TP_printk("(%s)", __entry->reason) | ||
52 | ); | ||
53 | |||
54 | /** | ||
55 | * ipi_entry - called immediately before the IPI handler | ||
56 | * | ||
57 | * @reason: string identifying the IPI purpose | ||
58 | * | ||
59 | * It is necessary for @reason to be a static string declared with | ||
60 | * __tracepoint_string, ideally the same as used with trace_ipi_raise | ||
61 | * for that IPI. | ||
62 | */ | ||
63 | DEFINE_EVENT(ipi_handler, ipi_entry, | ||
64 | |||
65 | TP_PROTO(const char *reason), | ||
66 | |||
67 | TP_ARGS(reason) | ||
68 | ); | ||
69 | |||
70 | /** | ||
71 | * ipi_exit - called immediately after the IPI handler returns | ||
72 | * | ||
73 | * @reason: string identifying the IPI purpose | ||
74 | * | ||
75 | * It is necessary for @reason to be a static string declared with | ||
76 | * __tracepoint_string, ideally the same as used with trace_ipi_raise for | ||
77 | * that IPI. | ||
78 | */ | ||
79 | DEFINE_EVENT(ipi_handler, ipi_exit, | ||
80 | |||
81 | TP_PROTO(const char *reason), | ||
82 | |||
83 | TP_ARGS(reason) | ||
84 | ); | ||
85 | |||
86 | #endif /* _TRACE_IPI_H */ | ||
87 | |||
88 | /* This part must be outside protection */ | ||
89 | #include <trace/define_trace.h> | ||
diff --git a/include/uapi/drm/nouveau_drm.h b/include/uapi/drm/nouveau_drm.h index 2a5769fdf8ba..0d7608dc1a34 100644 --- a/include/uapi/drm/nouveau_drm.h +++ b/include/uapi/drm/nouveau_drm.h | |||
@@ -25,6 +25,16 @@ | |||
25 | #ifndef __NOUVEAU_DRM_H__ | 25 | #ifndef __NOUVEAU_DRM_H__ |
26 | #define __NOUVEAU_DRM_H__ | 26 | #define __NOUVEAU_DRM_H__ |
27 | 27 | ||
28 | #define DRM_NOUVEAU_EVENT_NVIF 0x80000000 | ||
29 | |||
30 | /* reserved object handles when using deprecated object APIs - these | ||
31 | * are here so that libdrm can allow interoperability with the new | ||
32 | * object APIs | ||
33 | */ | ||
34 | #define NOUVEAU_ABI16_CLIENT 0xffffffff | ||
35 | #define NOUVEAU_ABI16_DEVICE 0xdddddddd | ||
36 | #define NOUVEAU_ABI16_CHAN(n) (0xcccc0000 | (n)) | ||
37 | |||
28 | #define NOUVEAU_GEM_DOMAIN_CPU (1 << 0) | 38 | #define NOUVEAU_GEM_DOMAIN_CPU (1 << 0) |
29 | #define NOUVEAU_GEM_DOMAIN_VRAM (1 << 1) | 39 | #define NOUVEAU_GEM_DOMAIN_VRAM (1 << 1) |
30 | #define NOUVEAU_GEM_DOMAIN_GART (1 << 2) | 40 | #define NOUVEAU_GEM_DOMAIN_GART (1 << 2) |
@@ -123,6 +133,7 @@ struct drm_nouveau_gem_cpu_fini { | |||
123 | #define DRM_NOUVEAU_GROBJ_ALLOC 0x04 /* deprecated */ | 133 | #define DRM_NOUVEAU_GROBJ_ALLOC 0x04 /* deprecated */ |
124 | #define DRM_NOUVEAU_NOTIFIEROBJ_ALLOC 0x05 /* deprecated */ | 134 | #define DRM_NOUVEAU_NOTIFIEROBJ_ALLOC 0x05 /* deprecated */ |
125 | #define DRM_NOUVEAU_GPUOBJ_FREE 0x06 /* deprecated */ | 135 | #define DRM_NOUVEAU_GPUOBJ_FREE 0x06 /* deprecated */ |
136 | #define DRM_NOUVEAU_NVIF 0x07 | ||
126 | #define DRM_NOUVEAU_GEM_NEW 0x40 | 137 | #define DRM_NOUVEAU_GEM_NEW 0x40 |
127 | #define DRM_NOUVEAU_GEM_PUSHBUF 0x41 | 138 | #define DRM_NOUVEAU_GEM_PUSHBUF 0x41 |
128 | #define DRM_NOUVEAU_GEM_CPU_PREP 0x42 | 139 | #define DRM_NOUVEAU_GEM_CPU_PREP 0x42 |
diff --git a/ipc/namespace.c b/ipc/namespace.c index 59451c1e214d..b54468e48e32 100644 --- a/ipc/namespace.c +++ b/ipc/namespace.c | |||
@@ -154,11 +154,11 @@ static void *ipcns_get(struct task_struct *task) | |||
154 | struct ipc_namespace *ns = NULL; | 154 | struct ipc_namespace *ns = NULL; |
155 | struct nsproxy *nsproxy; | 155 | struct nsproxy *nsproxy; |
156 | 156 | ||
157 | rcu_read_lock(); | 157 | task_lock(task); |
158 | nsproxy = task_nsproxy(task); | 158 | nsproxy = task->nsproxy; |
159 | if (nsproxy) | 159 | if (nsproxy) |
160 | ns = get_ipc_ns(nsproxy->ipc_ns); | 160 | ns = get_ipc_ns(nsproxy->ipc_ns); |
161 | rcu_read_unlock(); | 161 | task_unlock(task); |
162 | 162 | ||
163 | return ns; | 163 | return ns; |
164 | } | 164 | } |
diff --git a/kernel/nsproxy.c b/kernel/nsproxy.c index 8e7811086b82..ef42d0ab3115 100644 --- a/kernel/nsproxy.c +++ b/kernel/nsproxy.c | |||
@@ -204,20 +204,13 @@ void switch_task_namespaces(struct task_struct *p, struct nsproxy *new) | |||
204 | 204 | ||
205 | might_sleep(); | 205 | might_sleep(); |
206 | 206 | ||
207 | task_lock(p); | ||
207 | ns = p->nsproxy; | 208 | ns = p->nsproxy; |
209 | p->nsproxy = new; | ||
210 | task_unlock(p); | ||
208 | 211 | ||
209 | rcu_assign_pointer(p->nsproxy, new); | 212 | if (ns && atomic_dec_and_test(&ns->count)) |
210 | |||
211 | if (ns && atomic_dec_and_test(&ns->count)) { | ||
212 | /* | ||
213 | * wait for others to get what they want from this nsproxy. | ||
214 | * | ||
215 | * cannot release this nsproxy via the call_rcu() since | ||
216 | * put_mnt_ns() will want to sleep | ||
217 | */ | ||
218 | synchronize_rcu(); | ||
219 | free_nsproxy(ns); | 213 | free_nsproxy(ns); |
220 | } | ||
221 | } | 214 | } |
222 | 215 | ||
223 | void exit_task_namespaces(struct task_struct *p) | 216 | void exit_task_namespaces(struct task_struct *p) |
diff --git a/kernel/signal.c b/kernel/signal.c index 40b76e351e64..8f0876f9f6dd 100644 --- a/kernel/signal.c +++ b/kernel/signal.c | |||
@@ -2170,8 +2170,7 @@ static int ptrace_signal(int signr, siginfo_t *info) | |||
2170 | return signr; | 2170 | return signr; |
2171 | } | 2171 | } |
2172 | 2172 | ||
2173 | int get_signal_to_deliver(siginfo_t *info, struct k_sigaction *return_ka, | 2173 | int get_signal(struct ksignal *ksig) |
2174 | struct pt_regs *regs, void *cookie) | ||
2175 | { | 2174 | { |
2176 | struct sighand_struct *sighand = current->sighand; | 2175 | struct sighand_struct *sighand = current->sighand; |
2177 | struct signal_struct *signal = current->signal; | 2176 | struct signal_struct *signal = current->signal; |
@@ -2241,13 +2240,13 @@ relock: | |||
2241 | goto relock; | 2240 | goto relock; |
2242 | } | 2241 | } |
2243 | 2242 | ||
2244 | signr = dequeue_signal(current, ¤t->blocked, info); | 2243 | signr = dequeue_signal(current, ¤t->blocked, &ksig->info); |
2245 | 2244 | ||
2246 | if (!signr) | 2245 | if (!signr) |
2247 | break; /* will return 0 */ | 2246 | break; /* will return 0 */ |
2248 | 2247 | ||
2249 | if (unlikely(current->ptrace) && signr != SIGKILL) { | 2248 | if (unlikely(current->ptrace) && signr != SIGKILL) { |
2250 | signr = ptrace_signal(signr, info); | 2249 | signr = ptrace_signal(signr, &ksig->info); |
2251 | if (!signr) | 2250 | if (!signr) |
2252 | continue; | 2251 | continue; |
2253 | } | 2252 | } |
@@ -2255,13 +2254,13 @@ relock: | |||
2255 | ka = &sighand->action[signr-1]; | 2254 | ka = &sighand->action[signr-1]; |
2256 | 2255 | ||
2257 | /* Trace actually delivered signals. */ | 2256 | /* Trace actually delivered signals. */ |
2258 | trace_signal_deliver(signr, info, ka); | 2257 | trace_signal_deliver(signr, &ksig->info, ka); |
2259 | 2258 | ||
2260 | if (ka->sa.sa_handler == SIG_IGN) /* Do nothing. */ | 2259 | if (ka->sa.sa_handler == SIG_IGN) /* Do nothing. */ |
2261 | continue; | 2260 | continue; |
2262 | if (ka->sa.sa_handler != SIG_DFL) { | 2261 | if (ka->sa.sa_handler != SIG_DFL) { |
2263 | /* Run the handler. */ | 2262 | /* Run the handler. */ |
2264 | *return_ka = *ka; | 2263 | ksig->ka = *ka; |
2265 | 2264 | ||
2266 | if (ka->sa.sa_flags & SA_ONESHOT) | 2265 | if (ka->sa.sa_flags & SA_ONESHOT) |
2267 | ka->sa.sa_handler = SIG_DFL; | 2266 | ka->sa.sa_handler = SIG_DFL; |
@@ -2311,7 +2310,7 @@ relock: | |||
2311 | spin_lock_irq(&sighand->siglock); | 2310 | spin_lock_irq(&sighand->siglock); |
2312 | } | 2311 | } |
2313 | 2312 | ||
2314 | if (likely(do_signal_stop(info->si_signo))) { | 2313 | if (likely(do_signal_stop(ksig->info.si_signo))) { |
2315 | /* It released the siglock. */ | 2314 | /* It released the siglock. */ |
2316 | goto relock; | 2315 | goto relock; |
2317 | } | 2316 | } |
@@ -2332,7 +2331,7 @@ relock: | |||
2332 | 2331 | ||
2333 | if (sig_kernel_coredump(signr)) { | 2332 | if (sig_kernel_coredump(signr)) { |
2334 | if (print_fatal_signals) | 2333 | if (print_fatal_signals) |
2335 | print_fatal_signal(info->si_signo); | 2334 | print_fatal_signal(ksig->info.si_signo); |
2336 | proc_coredump_connector(current); | 2335 | proc_coredump_connector(current); |
2337 | /* | 2336 | /* |
2338 | * If it was able to dump core, this kills all | 2337 | * If it was able to dump core, this kills all |
@@ -2342,34 +2341,32 @@ relock: | |||
2342 | * first and our do_group_exit call below will use | 2341 | * first and our do_group_exit call below will use |
2343 | * that value and ignore the one we pass it. | 2342 | * that value and ignore the one we pass it. |
2344 | */ | 2343 | */ |
2345 | do_coredump(info); | 2344 | do_coredump(&ksig->info); |
2346 | } | 2345 | } |
2347 | 2346 | ||
2348 | /* | 2347 | /* |
2349 | * Death signals, no core dump. | 2348 | * Death signals, no core dump. |
2350 | */ | 2349 | */ |
2351 | do_group_exit(info->si_signo); | 2350 | do_group_exit(ksig->info.si_signo); |
2352 | /* NOTREACHED */ | 2351 | /* NOTREACHED */ |
2353 | } | 2352 | } |
2354 | spin_unlock_irq(&sighand->siglock); | 2353 | spin_unlock_irq(&sighand->siglock); |
2355 | return signr; | 2354 | |
2355 | ksig->sig = signr; | ||
2356 | return ksig->sig > 0; | ||
2356 | } | 2357 | } |
2357 | 2358 | ||
2358 | /** | 2359 | /** |
2359 | * signal_delivered - | 2360 | * signal_delivered - |
2360 | * @sig: number of signal being delivered | 2361 | * @ksig: kernel signal struct |
2361 | * @info: siginfo_t of signal being delivered | ||
2362 | * @ka: sigaction setting that chose the handler | ||
2363 | * @regs: user register state | ||
2364 | * @stepping: nonzero if debugger single-step or block-step in use | 2362 | * @stepping: nonzero if debugger single-step or block-step in use |
2365 | * | 2363 | * |
2366 | * This function should be called when a signal has successfully been | 2364 | * This function should be called when a signal has successfully been |
2367 | * delivered. It updates the blocked signals accordingly (@ka->sa.sa_mask | 2365 | * delivered. It updates the blocked signals accordingly (@ksig->ka.sa.sa_mask |
2368 | * is always blocked, and the signal itself is blocked unless %SA_NODEFER | 2366 | * is always blocked, and the signal itself is blocked unless %SA_NODEFER |
2369 | * is set in @ka->sa.sa_flags. Tracing is notified. | 2367 | * is set in @ksig->ka.sa.sa_flags. Tracing is notified. |
2370 | */ | 2368 | */ |
2371 | void signal_delivered(int sig, siginfo_t *info, struct k_sigaction *ka, | 2369 | static void signal_delivered(struct ksignal *ksig, int stepping) |
2372 | struct pt_regs *regs, int stepping) | ||
2373 | { | 2370 | { |
2374 | sigset_t blocked; | 2371 | sigset_t blocked; |
2375 | 2372 | ||
@@ -2379,11 +2376,11 @@ void signal_delivered(int sig, siginfo_t *info, struct k_sigaction *ka, | |||
2379 | simply clear the restore sigmask flag. */ | 2376 | simply clear the restore sigmask flag. */ |
2380 | clear_restore_sigmask(); | 2377 | clear_restore_sigmask(); |
2381 | 2378 | ||
2382 | sigorsets(&blocked, ¤t->blocked, &ka->sa.sa_mask); | 2379 | sigorsets(&blocked, ¤t->blocked, &ksig->ka.sa.sa_mask); |
2383 | if (!(ka->sa.sa_flags & SA_NODEFER)) | 2380 | if (!(ksig->ka.sa.sa_flags & SA_NODEFER)) |
2384 | sigaddset(&blocked, sig); | 2381 | sigaddset(&blocked, ksig->sig); |
2385 | set_current_blocked(&blocked); | 2382 | set_current_blocked(&blocked); |
2386 | tracehook_signal_handler(sig, info, ka, regs, stepping); | 2383 | tracehook_signal_handler(stepping); |
2387 | } | 2384 | } |
2388 | 2385 | ||
2389 | void signal_setup_done(int failed, struct ksignal *ksig, int stepping) | 2386 | void signal_setup_done(int failed, struct ksignal *ksig, int stepping) |
@@ -2391,8 +2388,7 @@ void signal_setup_done(int failed, struct ksignal *ksig, int stepping) | |||
2391 | if (failed) | 2388 | if (failed) |
2392 | force_sigsegv(ksig->sig, current); | 2389 | force_sigsegv(ksig->sig, current); |
2393 | else | 2390 | else |
2394 | signal_delivered(ksig->sig, &ksig->info, &ksig->ka, | 2391 | signal_delivered(ksig, stepping); |
2395 | signal_pt_regs(), stepping); | ||
2396 | } | 2392 | } |
2397 | 2393 | ||
2398 | /* | 2394 | /* |
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index 925f629658d6..afb04b9b818a 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c | |||
@@ -1968,7 +1968,7 @@ rb_add_time_stamp(struct ring_buffer_event *event, u64 delta) | |||
1968 | 1968 | ||
1969 | /** | 1969 | /** |
1970 | * rb_update_event - update event type and data | 1970 | * rb_update_event - update event type and data |
1971 | * @event: the even to update | 1971 | * @event: the event to update |
1972 | * @type: the type of event | 1972 | * @type: the type of event |
1973 | * @length: the size of the event field in the ring buffer | 1973 | * @length: the size of the event field in the ring buffer |
1974 | * | 1974 | * |
@@ -3341,21 +3341,16 @@ static void rb_iter_reset(struct ring_buffer_iter *iter) | |||
3341 | struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; | 3341 | struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; |
3342 | 3342 | ||
3343 | /* Iterator usage is expected to have record disabled */ | 3343 | /* Iterator usage is expected to have record disabled */ |
3344 | if (list_empty(&cpu_buffer->reader_page->list)) { | 3344 | iter->head_page = cpu_buffer->reader_page; |
3345 | iter->head_page = rb_set_head_page(cpu_buffer); | 3345 | iter->head = cpu_buffer->reader_page->read; |
3346 | if (unlikely(!iter->head_page)) | 3346 | |
3347 | return; | 3347 | iter->cache_reader_page = iter->head_page; |
3348 | iter->head = iter->head_page->read; | 3348 | iter->cache_read = iter->head; |
3349 | } else { | 3349 | |
3350 | iter->head_page = cpu_buffer->reader_page; | ||
3351 | iter->head = cpu_buffer->reader_page->read; | ||
3352 | } | ||
3353 | if (iter->head) | 3350 | if (iter->head) |
3354 | iter->read_stamp = cpu_buffer->read_stamp; | 3351 | iter->read_stamp = cpu_buffer->read_stamp; |
3355 | else | 3352 | else |
3356 | iter->read_stamp = iter->head_page->page->time_stamp; | 3353 | iter->read_stamp = iter->head_page->page->time_stamp; |
3357 | iter->cache_reader_page = cpu_buffer->reader_page; | ||
3358 | iter->cache_read = cpu_buffer->read; | ||
3359 | } | 3354 | } |
3360 | 3355 | ||
3361 | /** | 3356 | /** |
@@ -3748,12 +3743,14 @@ rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts) | |||
3748 | return NULL; | 3743 | return NULL; |
3749 | 3744 | ||
3750 | /* | 3745 | /* |
3751 | * We repeat when a time extend is encountered. | 3746 | * We repeat when a time extend is encountered or we hit |
3752 | * Since the time extend is always attached to a data event, | 3747 | * the end of the page. Since the time extend is always attached |
3753 | * we should never loop more than once. | 3748 | * to a data event, we should never loop more than three times. |
3754 | * (We never hit the following condition more than twice). | 3749 | * Once for going to next page, once on time extend, and |
3750 | * finally once to get the event. | ||
3751 | * (We never hit the following condition more than thrice). | ||
3755 | */ | 3752 | */ |
3756 | if (RB_WARN_ON(cpu_buffer, ++nr_loops > 2)) | 3753 | if (RB_WARN_ON(cpu_buffer, ++nr_loops > 3)) |
3757 | return NULL; | 3754 | return NULL; |
3758 | 3755 | ||
3759 | if (rb_per_cpu_empty(cpu_buffer)) | 3756 | if (rb_per_cpu_empty(cpu_buffer)) |
diff --git a/kernel/utsname.c b/kernel/utsname.c index fd393124e507..883aaaa7de8a 100644 --- a/kernel/utsname.c +++ b/kernel/utsname.c | |||
@@ -93,13 +93,13 @@ static void *utsns_get(struct task_struct *task) | |||
93 | struct uts_namespace *ns = NULL; | 93 | struct uts_namespace *ns = NULL; |
94 | struct nsproxy *nsproxy; | 94 | struct nsproxy *nsproxy; |
95 | 95 | ||
96 | rcu_read_lock(); | 96 | task_lock(task); |
97 | nsproxy = task_nsproxy(task); | 97 | nsproxy = task->nsproxy; |
98 | if (nsproxy) { | 98 | if (nsproxy) { |
99 | ns = nsproxy->uts_ns; | 99 | ns = nsproxy->uts_ns; |
100 | get_uts_ns(ns); | 100 | get_uts_ns(ns); |
101 | } | 101 | } |
102 | rcu_read_unlock(); | 102 | task_unlock(task); |
103 | 103 | ||
104 | return ns; | 104 | return ns; |
105 | } | 105 | } |
diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c index 85b62691f4f2..7c6b51a58968 100644 --- a/net/core/net_namespace.c +++ b/net/core/net_namespace.c | |||
@@ -373,9 +373,11 @@ struct net *get_net_ns_by_pid(pid_t pid) | |||
373 | tsk = find_task_by_vpid(pid); | 373 | tsk = find_task_by_vpid(pid); |
374 | if (tsk) { | 374 | if (tsk) { |
375 | struct nsproxy *nsproxy; | 375 | struct nsproxy *nsproxy; |
376 | nsproxy = task_nsproxy(tsk); | 376 | task_lock(tsk); |
377 | nsproxy = tsk->nsproxy; | ||
377 | if (nsproxy) | 378 | if (nsproxy) |
378 | net = get_net(nsproxy->net_ns); | 379 | net = get_net(nsproxy->net_ns); |
380 | task_unlock(tsk); | ||
379 | } | 381 | } |
380 | rcu_read_unlock(); | 382 | rcu_read_unlock(); |
381 | return net; | 383 | return net; |
@@ -632,11 +634,11 @@ static void *netns_get(struct task_struct *task) | |||
632 | struct net *net = NULL; | 634 | struct net *net = NULL; |
633 | struct nsproxy *nsproxy; | 635 | struct nsproxy *nsproxy; |
634 | 636 | ||
635 | rcu_read_lock(); | 637 | task_lock(task); |
636 | nsproxy = task_nsproxy(task); | 638 | nsproxy = task->nsproxy; |
637 | if (nsproxy) | 639 | if (nsproxy) |
638 | net = get_net(nsproxy->net_ns); | 640 | net = get_net(nsproxy->net_ns); |
639 | rcu_read_unlock(); | 641 | task_unlock(task); |
640 | 642 | ||
641 | return net; | 643 | return net; |
642 | } | 644 | } |
diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c index 4ce5eccec1f6..c548ab213f76 100644 --- a/net/sunrpc/auth_gss/svcauth_gss.c +++ b/net/sunrpc/auth_gss/svcauth_gss.c | |||
@@ -886,7 +886,7 @@ unwrap_priv_data(struct svc_rqst *rqstp, struct xdr_buf *buf, u32 seq, struct gs | |||
886 | u32 priv_len, maj_stat; | 886 | u32 priv_len, maj_stat; |
887 | int pad, saved_len, remaining_len, offset; | 887 | int pad, saved_len, remaining_len, offset; |
888 | 888 | ||
889 | rqstp->rq_splice_ok = 0; | 889 | rqstp->rq_splice_ok = false; |
890 | 890 | ||
891 | priv_len = svc_getnl(&buf->head[0]); | 891 | priv_len = svc_getnl(&buf->head[0]); |
892 | if (rqstp->rq_deferred) { | 892 | if (rqstp->rq_deferred) { |
diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c index 5de6801cd924..1db5007ddbce 100644 --- a/net/sunrpc/svc.c +++ b/net/sunrpc/svc.c | |||
@@ -1086,9 +1086,9 @@ svc_process_common(struct svc_rqst *rqstp, struct kvec *argv, struct kvec *resv) | |||
1086 | goto err_short_len; | 1086 | goto err_short_len; |
1087 | 1087 | ||
1088 | /* Will be turned off only in gss privacy case: */ | 1088 | /* Will be turned off only in gss privacy case: */ |
1089 | rqstp->rq_splice_ok = 1; | 1089 | rqstp->rq_splice_ok = true; |
1090 | /* Will be turned off only when NFSv4 Sessions are used */ | 1090 | /* Will be turned off only when NFSv4 Sessions are used */ |
1091 | rqstp->rq_usedeferral = 1; | 1091 | rqstp->rq_usedeferral = true; |
1092 | rqstp->rq_dropme = false; | 1092 | rqstp->rq_dropme = false; |
1093 | 1093 | ||
1094 | /* Setup reply header */ | 1094 | /* Setup reply header */ |
diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c index b4737fbdec13..6666c6745858 100644 --- a/net/sunrpc/svc_xprt.c +++ b/net/sunrpc/svc_xprt.c | |||
@@ -23,6 +23,7 @@ static int svc_deferred_recv(struct svc_rqst *rqstp); | |||
23 | static struct cache_deferred_req *svc_defer(struct cache_req *req); | 23 | static struct cache_deferred_req *svc_defer(struct cache_req *req); |
24 | static void svc_age_temp_xprts(unsigned long closure); | 24 | static void svc_age_temp_xprts(unsigned long closure); |
25 | static void svc_delete_xprt(struct svc_xprt *xprt); | 25 | static void svc_delete_xprt(struct svc_xprt *xprt); |
26 | static void svc_xprt_do_enqueue(struct svc_xprt *xprt); | ||
26 | 27 | ||
27 | /* apparently the "standard" is that clients close | 28 | /* apparently the "standard" is that clients close |
28 | * idle connections after 5 minutes, servers after | 29 | * idle connections after 5 minutes, servers after |
@@ -222,11 +223,12 @@ static void svc_xprt_received(struct svc_xprt *xprt) | |||
222 | if (!test_bit(XPT_BUSY, &xprt->xpt_flags)) | 223 | if (!test_bit(XPT_BUSY, &xprt->xpt_flags)) |
223 | return; | 224 | return; |
224 | /* As soon as we clear busy, the xprt could be closed and | 225 | /* As soon as we clear busy, the xprt could be closed and |
225 | * 'put', so we need a reference to call svc_xprt_enqueue with: | 226 | * 'put', so we need a reference to call svc_xprt_do_enqueue with: |
226 | */ | 227 | */ |
227 | svc_xprt_get(xprt); | 228 | svc_xprt_get(xprt); |
229 | smp_mb__before_atomic(); | ||
228 | clear_bit(XPT_BUSY, &xprt->xpt_flags); | 230 | clear_bit(XPT_BUSY, &xprt->xpt_flags); |
229 | svc_xprt_enqueue(xprt); | 231 | svc_xprt_do_enqueue(xprt); |
230 | svc_xprt_put(xprt); | 232 | svc_xprt_put(xprt); |
231 | } | 233 | } |
232 | 234 | ||
@@ -335,12 +337,7 @@ static bool svc_xprt_has_something_to_do(struct svc_xprt *xprt) | |||
335 | return false; | 337 | return false; |
336 | } | 338 | } |
337 | 339 | ||
338 | /* | 340 | static void svc_xprt_do_enqueue(struct svc_xprt *xprt) |
339 | * Queue up a transport with data pending. If there are idle nfsd | ||
340 | * processes, wake 'em up. | ||
341 | * | ||
342 | */ | ||
343 | void svc_xprt_enqueue(struct svc_xprt *xprt) | ||
344 | { | 341 | { |
345 | struct svc_pool *pool; | 342 | struct svc_pool *pool; |
346 | struct svc_rqst *rqstp; | 343 | struct svc_rqst *rqstp; |
@@ -398,6 +395,18 @@ void svc_xprt_enqueue(struct svc_xprt *xprt) | |||
398 | out_unlock: | 395 | out_unlock: |
399 | spin_unlock_bh(&pool->sp_lock); | 396 | spin_unlock_bh(&pool->sp_lock); |
400 | } | 397 | } |
398 | |||
399 | /* | ||
400 | * Queue up a transport with data pending. If there are idle nfsd | ||
401 | * processes, wake 'em up. | ||
402 | * | ||
403 | */ | ||
404 | void svc_xprt_enqueue(struct svc_xprt *xprt) | ||
405 | { | ||
406 | if (test_bit(XPT_BUSY, &xprt->xpt_flags)) | ||
407 | return; | ||
408 | svc_xprt_do_enqueue(xprt); | ||
409 | } | ||
401 | EXPORT_SYMBOL_GPL(svc_xprt_enqueue); | 410 | EXPORT_SYMBOL_GPL(svc_xprt_enqueue); |
402 | 411 | ||
403 | /* | 412 | /* |
@@ -439,6 +448,8 @@ void svc_reserve(struct svc_rqst *rqstp, int space) | |||
439 | atomic_sub((rqstp->rq_reserved - space), &xprt->xpt_reserved); | 448 | atomic_sub((rqstp->rq_reserved - space), &xprt->xpt_reserved); |
440 | rqstp->rq_reserved = space; | 449 | rqstp->rq_reserved = space; |
441 | 450 | ||
451 | if (xprt->xpt_ops->xpo_adjust_wspace) | ||
452 | xprt->xpt_ops->xpo_adjust_wspace(xprt); | ||
442 | svc_xprt_enqueue(xprt); | 453 | svc_xprt_enqueue(xprt); |
443 | } | 454 | } |
444 | } | 455 | } |
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c index b507cd327d9b..c24a8ff33f8f 100644 --- a/net/sunrpc/svcsock.c +++ b/net/sunrpc/svcsock.c | |||
@@ -446,15 +446,43 @@ static void svc_write_space(struct sock *sk) | |||
446 | } | 446 | } |
447 | } | 447 | } |
448 | 448 | ||
449 | static int svc_tcp_has_wspace(struct svc_xprt *xprt) | ||
450 | { | ||
451 | struct svc_sock *svsk = container_of(xprt, struct svc_sock, sk_xprt); | ||
452 | struct svc_serv *serv = svsk->sk_xprt.xpt_server; | ||
453 | int required; | ||
454 | |||
455 | if (test_bit(XPT_LISTENER, &xprt->xpt_flags)) | ||
456 | return 1; | ||
457 | required = atomic_read(&xprt->xpt_reserved) + serv->sv_max_mesg; | ||
458 | if (sk_stream_wspace(svsk->sk_sk) >= required || | ||
459 | (sk_stream_min_wspace(svsk->sk_sk) == 0 && | ||
460 | atomic_read(&xprt->xpt_reserved) == 0)) | ||
461 | return 1; | ||
462 | set_bit(SOCK_NOSPACE, &svsk->sk_sock->flags); | ||
463 | return 0; | ||
464 | } | ||
465 | |||
449 | static void svc_tcp_write_space(struct sock *sk) | 466 | static void svc_tcp_write_space(struct sock *sk) |
450 | { | 467 | { |
468 | struct svc_sock *svsk = (struct svc_sock *)(sk->sk_user_data); | ||
451 | struct socket *sock = sk->sk_socket; | 469 | struct socket *sock = sk->sk_socket; |
452 | 470 | ||
453 | if (sk_stream_is_writeable(sk) && sock) | 471 | if (!sk_stream_is_writeable(sk) || !sock) |
472 | return; | ||
473 | if (!svsk || svc_tcp_has_wspace(&svsk->sk_xprt)) | ||
454 | clear_bit(SOCK_NOSPACE, &sock->flags); | 474 | clear_bit(SOCK_NOSPACE, &sock->flags); |
455 | svc_write_space(sk); | 475 | svc_write_space(sk); |
456 | } | 476 | } |
457 | 477 | ||
478 | static void svc_tcp_adjust_wspace(struct svc_xprt *xprt) | ||
479 | { | ||
480 | struct svc_sock *svsk = container_of(xprt, struct svc_sock, sk_xprt); | ||
481 | |||
482 | if (svc_tcp_has_wspace(xprt)) | ||
483 | clear_bit(SOCK_NOSPACE, &svsk->sk_sock->flags); | ||
484 | } | ||
485 | |||
458 | /* | 486 | /* |
459 | * See net/ipv6/ip_sockglue.c : ip_cmsg_recv_pktinfo | 487 | * See net/ipv6/ip_sockglue.c : ip_cmsg_recv_pktinfo |
460 | */ | 488 | */ |
@@ -692,6 +720,7 @@ static struct svc_xprt_class svc_udp_class = { | |||
692 | .xcl_owner = THIS_MODULE, | 720 | .xcl_owner = THIS_MODULE, |
693 | .xcl_ops = &svc_udp_ops, | 721 | .xcl_ops = &svc_udp_ops, |
694 | .xcl_max_payload = RPCSVC_MAXPAYLOAD_UDP, | 722 | .xcl_max_payload = RPCSVC_MAXPAYLOAD_UDP, |
723 | .xcl_ident = XPRT_TRANSPORT_UDP, | ||
695 | }; | 724 | }; |
696 | 725 | ||
697 | static void svc_udp_init(struct svc_sock *svsk, struct svc_serv *serv) | 726 | static void svc_udp_init(struct svc_sock *svsk, struct svc_serv *serv) |
@@ -1197,23 +1226,6 @@ static void svc_tcp_prep_reply_hdr(struct svc_rqst *rqstp) | |||
1197 | svc_putnl(resv, 0); | 1226 | svc_putnl(resv, 0); |
1198 | } | 1227 | } |
1199 | 1228 | ||
1200 | static int svc_tcp_has_wspace(struct svc_xprt *xprt) | ||
1201 | { | ||
1202 | struct svc_sock *svsk = container_of(xprt, struct svc_sock, sk_xprt); | ||
1203 | struct svc_serv *serv = svsk->sk_xprt.xpt_server; | ||
1204 | int required; | ||
1205 | |||
1206 | if (test_bit(XPT_LISTENER, &xprt->xpt_flags)) | ||
1207 | return 1; | ||
1208 | required = atomic_read(&xprt->xpt_reserved) + serv->sv_max_mesg; | ||
1209 | if (sk_stream_wspace(svsk->sk_sk) >= required || | ||
1210 | (sk_stream_min_wspace(svsk->sk_sk) == 0 && | ||
1211 | atomic_read(&xprt->xpt_reserved) == 0)) | ||
1212 | return 1; | ||
1213 | set_bit(SOCK_NOSPACE, &svsk->sk_sock->flags); | ||
1214 | return 0; | ||
1215 | } | ||
1216 | |||
1217 | static struct svc_xprt *svc_tcp_create(struct svc_serv *serv, | 1229 | static struct svc_xprt *svc_tcp_create(struct svc_serv *serv, |
1218 | struct net *net, | 1230 | struct net *net, |
1219 | struct sockaddr *sa, int salen, | 1231 | struct sockaddr *sa, int salen, |
@@ -1285,6 +1297,7 @@ static struct svc_xprt_ops svc_tcp_ops = { | |||
1285 | .xpo_has_wspace = svc_tcp_has_wspace, | 1297 | .xpo_has_wspace = svc_tcp_has_wspace, |
1286 | .xpo_accept = svc_tcp_accept, | 1298 | .xpo_accept = svc_tcp_accept, |
1287 | .xpo_secure_port = svc_sock_secure_port, | 1299 | .xpo_secure_port = svc_sock_secure_port, |
1300 | .xpo_adjust_wspace = svc_tcp_adjust_wspace, | ||
1288 | }; | 1301 | }; |
1289 | 1302 | ||
1290 | static struct svc_xprt_class svc_tcp_class = { | 1303 | static struct svc_xprt_class svc_tcp_class = { |
@@ -1292,6 +1305,7 @@ static struct svc_xprt_class svc_tcp_class = { | |||
1292 | .xcl_owner = THIS_MODULE, | 1305 | .xcl_owner = THIS_MODULE, |
1293 | .xcl_ops = &svc_tcp_ops, | 1306 | .xcl_ops = &svc_tcp_ops, |
1294 | .xcl_max_payload = RPCSVC_MAXPAYLOAD_TCP, | 1307 | .xcl_max_payload = RPCSVC_MAXPAYLOAD_TCP, |
1308 | .xcl_ident = XPRT_TRANSPORT_TCP, | ||
1295 | }; | 1309 | }; |
1296 | 1310 | ||
1297 | void svc_init_xprt_sock(void) | 1311 | void svc_init_xprt_sock(void) |
diff --git a/net/sunrpc/xdr.c b/net/sunrpc/xdr.c index 23fb4e75e245..290af97bf6f9 100644 --- a/net/sunrpc/xdr.c +++ b/net/sunrpc/xdr.c | |||
@@ -509,7 +509,8 @@ void xdr_commit_encode(struct xdr_stream *xdr) | |||
509 | } | 509 | } |
510 | EXPORT_SYMBOL_GPL(xdr_commit_encode); | 510 | EXPORT_SYMBOL_GPL(xdr_commit_encode); |
511 | 511 | ||
512 | __be32 *xdr_get_next_encode_buffer(struct xdr_stream *xdr, size_t nbytes) | 512 | static __be32 *xdr_get_next_encode_buffer(struct xdr_stream *xdr, |
513 | size_t nbytes) | ||
513 | { | 514 | { |
514 | static __be32 *p; | 515 | static __be32 *p; |
515 | int space_left; | 516 | int space_left; |
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c index c3b2b3369e52..51c63165073c 100644 --- a/net/sunrpc/xprt.c +++ b/net/sunrpc/xprt.c | |||
@@ -1306,7 +1306,7 @@ struct rpc_xprt *xprt_create_transport(struct xprt_create *args) | |||
1306 | } | 1306 | } |
1307 | } | 1307 | } |
1308 | spin_unlock(&xprt_list_lock); | 1308 | spin_unlock(&xprt_list_lock); |
1309 | printk(KERN_ERR "RPC: transport (%d) not supported\n", args->ident); | 1309 | dprintk("RPC: transport (%d) not supported\n", args->ident); |
1310 | return ERR_PTR(-EIO); | 1310 | return ERR_PTR(-EIO); |
1311 | 1311 | ||
1312 | found: | 1312 | found: |
diff --git a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c index 8f92a61ee2df..e0110270d650 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c +++ b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c | |||
@@ -43,6 +43,7 @@ | |||
43 | #include <linux/sunrpc/debug.h> | 43 | #include <linux/sunrpc/debug.h> |
44 | #include <linux/sunrpc/rpc_rdma.h> | 44 | #include <linux/sunrpc/rpc_rdma.h> |
45 | #include <linux/spinlock.h> | 45 | #include <linux/spinlock.h> |
46 | #include <linux/highmem.h> | ||
46 | #include <asm/unaligned.h> | 47 | #include <asm/unaligned.h> |
47 | #include <rdma/ib_verbs.h> | 48 | #include <rdma/ib_verbs.h> |
48 | #include <rdma/rdma_cm.h> | 49 | #include <rdma/rdma_cm.h> |
@@ -435,6 +436,32 @@ static int rdma_read_chunks(struct svcxprt_rdma *xprt, | |||
435 | return ret; | 436 | return ret; |
436 | } | 437 | } |
437 | 438 | ||
439 | /* | ||
440 | * To avoid a separate RDMA READ just for a handful of zero bytes, | ||
441 | * RFC 5666 section 3.7 allows the client to omit the XDR zero pad | ||
442 | * in chunk lists. | ||
443 | */ | ||
444 | static void | ||
445 | rdma_fix_xdr_pad(struct xdr_buf *buf) | ||
446 | { | ||
447 | unsigned int page_len = buf->page_len; | ||
448 | unsigned int size = (XDR_QUADLEN(page_len) << 2) - page_len; | ||
449 | unsigned int offset, pg_no; | ||
450 | char *p; | ||
451 | |||
452 | if (size == 0) | ||
453 | return; | ||
454 | |||
455 | pg_no = page_len >> PAGE_SHIFT; | ||
456 | offset = page_len & ~PAGE_MASK; | ||
457 | p = page_address(buf->pages[pg_no]); | ||
458 | memset(p + offset, 0, size); | ||
459 | |||
460 | buf->page_len += size; | ||
461 | buf->buflen += size; | ||
462 | buf->len += size; | ||
463 | } | ||
464 | |||
438 | static int rdma_read_complete(struct svc_rqst *rqstp, | 465 | static int rdma_read_complete(struct svc_rqst *rqstp, |
439 | struct svc_rdma_op_ctxt *head) | 466 | struct svc_rdma_op_ctxt *head) |
440 | { | 467 | { |
@@ -449,6 +476,7 @@ static int rdma_read_complete(struct svc_rqst *rqstp, | |||
449 | rqstp->rq_pages[page_no] = head->pages[page_no]; | 476 | rqstp->rq_pages[page_no] = head->pages[page_no]; |
450 | } | 477 | } |
451 | /* Point rq_arg.pages past header */ | 478 | /* Point rq_arg.pages past header */ |
479 | rdma_fix_xdr_pad(&head->arg); | ||
452 | rqstp->rq_arg.pages = &rqstp->rq_pages[head->hdr_count]; | 480 | rqstp->rq_arg.pages = &rqstp->rq_pages[head->hdr_count]; |
453 | rqstp->rq_arg.page_len = head->arg.page_len; | 481 | rqstp->rq_arg.page_len = head->arg.page_len; |
454 | rqstp->rq_arg.page_base = head->arg.page_base; | 482 | rqstp->rq_arg.page_base = head->arg.page_base; |
diff --git a/net/sunrpc/xprtrdma/svc_rdma_sendto.c b/net/sunrpc/xprtrdma/svc_rdma_sendto.c index 49fd21a5c215..9f1b50689c0f 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_sendto.c +++ b/net/sunrpc/xprtrdma/svc_rdma_sendto.c | |||
@@ -192,6 +192,8 @@ static int send_write(struct svcxprt_rdma *xprt, struct svc_rqst *rqstp, | |||
192 | xdr_sge_no++; | 192 | xdr_sge_no++; |
193 | BUG_ON(xdr_sge_no > vec->count); | 193 | BUG_ON(xdr_sge_no > vec->count); |
194 | bc -= sge_bytes; | 194 | bc -= sge_bytes; |
195 | if (sge_no == xprt->sc_max_sge) | ||
196 | break; | ||
195 | } | 197 | } |
196 | 198 | ||
197 | /* Prepare WRITE WR */ | 199 | /* Prepare WRITE WR */ |
@@ -209,7 +211,7 @@ static int send_write(struct svcxprt_rdma *xprt, struct svc_rqst *rqstp, | |||
209 | atomic_inc(&rdma_stat_write); | 211 | atomic_inc(&rdma_stat_write); |
210 | if (svc_rdma_send(xprt, &write_wr)) | 212 | if (svc_rdma_send(xprt, &write_wr)) |
211 | goto err; | 213 | goto err; |
212 | return 0; | 214 | return write_len - bc; |
213 | err: | 215 | err: |
214 | svc_rdma_unmap_dma(ctxt); | 216 | svc_rdma_unmap_dma(ctxt); |
215 | svc_rdma_put_context(ctxt, 0); | 217 | svc_rdma_put_context(ctxt, 0); |
@@ -225,7 +227,6 @@ static int send_write_chunks(struct svcxprt_rdma *xprt, | |||
225 | { | 227 | { |
226 | u32 xfer_len = rqstp->rq_res.page_len + rqstp->rq_res.tail[0].iov_len; | 228 | u32 xfer_len = rqstp->rq_res.page_len + rqstp->rq_res.tail[0].iov_len; |
227 | int write_len; | 229 | int write_len; |
228 | int max_write; | ||
229 | u32 xdr_off; | 230 | u32 xdr_off; |
230 | int chunk_off; | 231 | int chunk_off; |
231 | int chunk_no; | 232 | int chunk_no; |
@@ -239,8 +240,6 @@ static int send_write_chunks(struct svcxprt_rdma *xprt, | |||
239 | res_ary = (struct rpcrdma_write_array *) | 240 | res_ary = (struct rpcrdma_write_array *) |
240 | &rdma_resp->rm_body.rm_chunks[1]; | 241 | &rdma_resp->rm_body.rm_chunks[1]; |
241 | 242 | ||
242 | max_write = xprt->sc_max_sge * PAGE_SIZE; | ||
243 | |||
244 | /* Write chunks start at the pagelist */ | 243 | /* Write chunks start at the pagelist */ |
245 | for (xdr_off = rqstp->rq_res.head[0].iov_len, chunk_no = 0; | 244 | for (xdr_off = rqstp->rq_res.head[0].iov_len, chunk_no = 0; |
246 | xfer_len && chunk_no < arg_ary->wc_nchunks; | 245 | xfer_len && chunk_no < arg_ary->wc_nchunks; |
@@ -260,23 +259,21 @@ static int send_write_chunks(struct svcxprt_rdma *xprt, | |||
260 | write_len); | 259 | write_len); |
261 | chunk_off = 0; | 260 | chunk_off = 0; |
262 | while (write_len) { | 261 | while (write_len) { |
263 | int this_write; | ||
264 | this_write = min(write_len, max_write); | ||
265 | ret = send_write(xprt, rqstp, | 262 | ret = send_write(xprt, rqstp, |
266 | ntohl(arg_ch->rs_handle), | 263 | ntohl(arg_ch->rs_handle), |
267 | rs_offset + chunk_off, | 264 | rs_offset + chunk_off, |
268 | xdr_off, | 265 | xdr_off, |
269 | this_write, | 266 | write_len, |
270 | vec); | 267 | vec); |
271 | if (ret) { | 268 | if (ret <= 0) { |
272 | dprintk("svcrdma: RDMA_WRITE failed, ret=%d\n", | 269 | dprintk("svcrdma: RDMA_WRITE failed, ret=%d\n", |
273 | ret); | 270 | ret); |
274 | return -EIO; | 271 | return -EIO; |
275 | } | 272 | } |
276 | chunk_off += this_write; | 273 | chunk_off += ret; |
277 | xdr_off += this_write; | 274 | xdr_off += ret; |
278 | xfer_len -= this_write; | 275 | xfer_len -= ret; |
279 | write_len -= this_write; | 276 | write_len -= ret; |
280 | } | 277 | } |
281 | } | 278 | } |
282 | /* Update the req with the number of chunks actually used */ | 279 | /* Update the req with the number of chunks actually used */ |
@@ -293,7 +290,6 @@ static int send_reply_chunks(struct svcxprt_rdma *xprt, | |||
293 | { | 290 | { |
294 | u32 xfer_len = rqstp->rq_res.len; | 291 | u32 xfer_len = rqstp->rq_res.len; |
295 | int write_len; | 292 | int write_len; |
296 | int max_write; | ||
297 | u32 xdr_off; | 293 | u32 xdr_off; |
298 | int chunk_no; | 294 | int chunk_no; |
299 | int chunk_off; | 295 | int chunk_off; |
@@ -311,8 +307,6 @@ static int send_reply_chunks(struct svcxprt_rdma *xprt, | |||
311 | res_ary = (struct rpcrdma_write_array *) | 307 | res_ary = (struct rpcrdma_write_array *) |
312 | &rdma_resp->rm_body.rm_chunks[2]; | 308 | &rdma_resp->rm_body.rm_chunks[2]; |
313 | 309 | ||
314 | max_write = xprt->sc_max_sge * PAGE_SIZE; | ||
315 | |||
316 | /* xdr offset starts at RPC message */ | 310 | /* xdr offset starts at RPC message */ |
317 | nchunks = ntohl(arg_ary->wc_nchunks); | 311 | nchunks = ntohl(arg_ary->wc_nchunks); |
318 | for (xdr_off = 0, chunk_no = 0; | 312 | for (xdr_off = 0, chunk_no = 0; |
@@ -330,24 +324,21 @@ static int send_reply_chunks(struct svcxprt_rdma *xprt, | |||
330 | write_len); | 324 | write_len); |
331 | chunk_off = 0; | 325 | chunk_off = 0; |
332 | while (write_len) { | 326 | while (write_len) { |
333 | int this_write; | ||
334 | |||
335 | this_write = min(write_len, max_write); | ||
336 | ret = send_write(xprt, rqstp, | 327 | ret = send_write(xprt, rqstp, |
337 | ntohl(ch->rs_handle), | 328 | ntohl(ch->rs_handle), |
338 | rs_offset + chunk_off, | 329 | rs_offset + chunk_off, |
339 | xdr_off, | 330 | xdr_off, |
340 | this_write, | 331 | write_len, |
341 | vec); | 332 | vec); |
342 | if (ret) { | 333 | if (ret <= 0) { |
343 | dprintk("svcrdma: RDMA_WRITE failed, ret=%d\n", | 334 | dprintk("svcrdma: RDMA_WRITE failed, ret=%d\n", |
344 | ret); | 335 | ret); |
345 | return -EIO; | 336 | return -EIO; |
346 | } | 337 | } |
347 | chunk_off += this_write; | 338 | chunk_off += ret; |
348 | xdr_off += this_write; | 339 | xdr_off += ret; |
349 | xfer_len -= this_write; | 340 | xfer_len -= ret; |
350 | write_len -= this_write; | 341 | write_len -= ret; |
351 | } | 342 | } |
352 | } | 343 | } |
353 | /* Update the req with the number of chunks actually used */ | 344 | /* Update the req with the number of chunks actually used */ |
diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c index e7323fbbd348..374feb44afea 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_transport.c +++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c | |||
@@ -92,6 +92,7 @@ struct svc_xprt_class svc_rdma_class = { | |||
92 | .xcl_owner = THIS_MODULE, | 92 | .xcl_owner = THIS_MODULE, |
93 | .xcl_ops = &svc_rdma_ops, | 93 | .xcl_ops = &svc_rdma_ops, |
94 | .xcl_max_payload = RPCSVC_MAXPAYLOAD_TCP, | 94 | .xcl_max_payload = RPCSVC_MAXPAYLOAD_TCP, |
95 | .xcl_ident = XPRT_TRANSPORT_RDMA, | ||
95 | }; | 96 | }; |
96 | 97 | ||
97 | struct svc_rdma_op_ctxt *svc_rdma_get_context(struct svcxprt_rdma *xprt) | 98 | struct svc_rdma_op_ctxt *svc_rdma_get_context(struct svcxprt_rdma *xprt) |
@@ -942,23 +943,8 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt) | |||
942 | 943 | ||
943 | ret = rdma_create_qp(newxprt->sc_cm_id, newxprt->sc_pd, &qp_attr); | 944 | ret = rdma_create_qp(newxprt->sc_cm_id, newxprt->sc_pd, &qp_attr); |
944 | if (ret) { | 945 | if (ret) { |
945 | /* | 946 | dprintk("svcrdma: failed to create QP, ret=%d\n", ret); |
946 | * XXX: This is a hack. We need a xx_request_qp interface | 947 | goto errout; |
947 | * that will adjust the qp_attr's with a best-effort | ||
948 | * number | ||
949 | */ | ||
950 | qp_attr.cap.max_send_sge -= 2; | ||
951 | qp_attr.cap.max_recv_sge -= 2; | ||
952 | ret = rdma_create_qp(newxprt->sc_cm_id, newxprt->sc_pd, | ||
953 | &qp_attr); | ||
954 | if (ret) { | ||
955 | dprintk("svcrdma: failed to create QP, ret=%d\n", ret); | ||
956 | goto errout; | ||
957 | } | ||
958 | newxprt->sc_max_sge = qp_attr.cap.max_send_sge; | ||
959 | newxprt->sc_max_sge = qp_attr.cap.max_recv_sge; | ||
960 | newxprt->sc_sq_depth = qp_attr.cap.max_send_wr; | ||
961 | newxprt->sc_max_requests = qp_attr.cap.max_recv_wr; | ||
962 | } | 948 | } |
963 | newxprt->sc_qp = newxprt->sc_cm_id->qp; | 949 | newxprt->sc_qp = newxprt->sc_cm_id->qp; |
964 | 950 | ||
diff --git a/security/selinux/netif.c b/security/selinux/netif.c index 3c3de4ca0ebc..50ce177d71a0 100644 --- a/security/selinux/netif.c +++ b/security/selinux/netif.c | |||
@@ -272,7 +272,7 @@ static struct notifier_block sel_netif_netdev_notifier = { | |||
272 | 272 | ||
273 | static __init int sel_netif_init(void) | 273 | static __init int sel_netif_init(void) |
274 | { | 274 | { |
275 | int i, err; | 275 | int i; |
276 | 276 | ||
277 | if (!selinux_enabled) | 277 | if (!selinux_enabled) |
278 | return 0; | 278 | return 0; |
@@ -282,7 +282,7 @@ static __init int sel_netif_init(void) | |||
282 | 282 | ||
283 | register_netdevice_notifier(&sel_netif_netdev_notifier); | 283 | register_netdevice_notifier(&sel_netif_netdev_notifier); |
284 | 284 | ||
285 | return err; | 285 | return 0; |
286 | } | 286 | } |
287 | 287 | ||
288 | __initcall(sel_netif_init); | 288 | __initcall(sel_netif_init); |
diff --git a/security/selinux/netnode.c b/security/selinux/netnode.c index ddf315260839..da923f89d2a9 100644 --- a/security/selinux/netnode.c +++ b/security/selinux/netnode.c | |||
@@ -303,7 +303,6 @@ void sel_netnode_flush(void) | |||
303 | static __init int sel_netnode_init(void) | 303 | static __init int sel_netnode_init(void) |
304 | { | 304 | { |
305 | int iter; | 305 | int iter; |
306 | int ret; | ||
307 | 306 | ||
308 | if (!selinux_enabled) | 307 | if (!selinux_enabled) |
309 | return 0; | 308 | return 0; |
@@ -313,7 +312,7 @@ static __init int sel_netnode_init(void) | |||
313 | sel_netnode_hash[iter].size = 0; | 312 | sel_netnode_hash[iter].size = 0; |
314 | } | 313 | } |
315 | 314 | ||
316 | return ret; | 315 | return 0; |
317 | } | 316 | } |
318 | 317 | ||
319 | __initcall(sel_netnode_init); | 318 | __initcall(sel_netnode_init); |
diff --git a/security/selinux/netport.c b/security/selinux/netport.c index 73ac6784d091..3311cc393cb4 100644 --- a/security/selinux/netport.c +++ b/security/selinux/netport.c | |||
@@ -237,7 +237,6 @@ void sel_netport_flush(void) | |||
237 | static __init int sel_netport_init(void) | 237 | static __init int sel_netport_init(void) |
238 | { | 238 | { |
239 | int iter; | 239 | int iter; |
240 | int ret; | ||
241 | 240 | ||
242 | if (!selinux_enabled) | 241 | if (!selinux_enabled) |
243 | return 0; | 242 | return 0; |
@@ -247,7 +246,7 @@ static __init int sel_netport_init(void) | |||
247 | sel_netport_hash[iter].size = 0; | 246 | sel_netport_hash[iter].size = 0; |
248 | } | 247 | } |
249 | 248 | ||
250 | return ret; | 249 | return 0; |
251 | } | 250 | } |
252 | 251 | ||
253 | __initcall(sel_netport_init); | 252 | __initcall(sel_netport_init); |
diff --git a/tools/testing/selftests/Makefile b/tools/testing/selftests/Makefile index 6fd2a4402069..36ff2e4c7b6f 100644 --- a/tools/testing/selftests/Makefile +++ b/tools/testing/selftests/Makefile | |||
@@ -5,6 +5,7 @@ TARGETS += kcmp | |||
5 | TARGETS += memfd | 5 | TARGETS += memfd |
6 | TARGETS += memory-hotplug | 6 | TARGETS += memory-hotplug |
7 | TARGETS += mqueue | 7 | TARGETS += mqueue |
8 | TARGETS += mount | ||
8 | TARGETS += net | 9 | TARGETS += net |
9 | TARGETS += ptrace | 10 | TARGETS += ptrace |
10 | TARGETS += timers | 11 | TARGETS += timers |
diff --git a/tools/testing/selftests/mount/Makefile b/tools/testing/selftests/mount/Makefile new file mode 100644 index 000000000000..337d853c2b72 --- /dev/null +++ b/tools/testing/selftests/mount/Makefile | |||
@@ -0,0 +1,17 @@ | |||
1 | # Makefile for mount selftests. | ||
2 | |||
3 | all: unprivileged-remount-test | ||
4 | |||
5 | unprivileged-remount-test: unprivileged-remount-test.c | ||
6 | gcc -Wall -O2 unprivileged-remount-test.c -o unprivileged-remount-test | ||
7 | |||
8 | # Allow specific tests to be selected. | ||
9 | test_unprivileged_remount: unprivileged-remount-test | ||
10 | @if [ -f /proc/self/uid_map ] ; then ./unprivileged-remount-test ; fi | ||
11 | |||
12 | run_tests: all test_unprivileged_remount | ||
13 | |||
14 | clean: | ||
15 | rm -f unprivileged-remount-test | ||
16 | |||
17 | .PHONY: all test_unprivileged_remount | ||
diff --git a/tools/testing/selftests/mount/unprivileged-remount-test.c b/tools/testing/selftests/mount/unprivileged-remount-test.c new file mode 100644 index 000000000000..1b3ff2fda4d0 --- /dev/null +++ b/tools/testing/selftests/mount/unprivileged-remount-test.c | |||
@@ -0,0 +1,242 @@ | |||
1 | #define _GNU_SOURCE | ||
2 | #include <sched.h> | ||
3 | #include <stdio.h> | ||
4 | #include <errno.h> | ||
5 | #include <string.h> | ||
6 | #include <sys/types.h> | ||
7 | #include <sys/mount.h> | ||
8 | #include <sys/wait.h> | ||
9 | #include <stdlib.h> | ||
10 | #include <unistd.h> | ||
11 | #include <fcntl.h> | ||
12 | #include <grp.h> | ||
13 | #include <stdbool.h> | ||
14 | #include <stdarg.h> | ||
15 | |||
16 | #ifndef CLONE_NEWNS | ||
17 | # define CLONE_NEWNS 0x00020000 | ||
18 | #endif | ||
19 | #ifndef CLONE_NEWUTS | ||
20 | # define CLONE_NEWUTS 0x04000000 | ||
21 | #endif | ||
22 | #ifndef CLONE_NEWIPC | ||
23 | # define CLONE_NEWIPC 0x08000000 | ||
24 | #endif | ||
25 | #ifndef CLONE_NEWNET | ||
26 | # define CLONE_NEWNET 0x40000000 | ||
27 | #endif | ||
28 | #ifndef CLONE_NEWUSER | ||
29 | # define CLONE_NEWUSER 0x10000000 | ||
30 | #endif | ||
31 | #ifndef CLONE_NEWPID | ||
32 | # define CLONE_NEWPID 0x20000000 | ||
33 | #endif | ||
34 | |||
35 | #ifndef MS_RELATIME | ||
36 | #define MS_RELATIME (1 << 21) | ||
37 | #endif | ||
38 | #ifndef MS_STRICTATIME | ||
39 | #define MS_STRICTATIME (1 << 24) | ||
40 | #endif | ||
41 | |||
42 | static void die(char *fmt, ...) | ||
43 | { | ||
44 | va_list ap; | ||
45 | va_start(ap, fmt); | ||
46 | vfprintf(stderr, fmt, ap); | ||
47 | va_end(ap); | ||
48 | exit(EXIT_FAILURE); | ||
49 | } | ||
50 | |||
51 | static void write_file(char *filename, char *fmt, ...) | ||
52 | { | ||
53 | char buf[4096]; | ||
54 | int fd; | ||
55 | ssize_t written; | ||
56 | int buf_len; | ||
57 | va_list ap; | ||
58 | |||
59 | va_start(ap, fmt); | ||
60 | buf_len = vsnprintf(buf, sizeof(buf), fmt, ap); | ||
61 | va_end(ap); | ||
62 | if (buf_len < 0) { | ||
63 | die("vsnprintf failed: %s\n", | ||
64 | strerror(errno)); | ||
65 | } | ||
66 | if (buf_len >= sizeof(buf)) { | ||
67 | die("vsnprintf output truncated\n"); | ||
68 | } | ||
69 | |||
70 | fd = open(filename, O_WRONLY); | ||
71 | if (fd < 0) { | ||
72 | die("open of %s failed: %s\n", | ||
73 | filename, strerror(errno)); | ||
74 | } | ||
75 | written = write(fd, buf, buf_len); | ||
76 | if (written != buf_len) { | ||
77 | if (written >= 0) { | ||
78 | die("short write to %s\n", filename); | ||
79 | } else { | ||
80 | die("write to %s failed: %s\n", | ||
81 | filename, strerror(errno)); | ||
82 | } | ||
83 | } | ||
84 | if (close(fd) != 0) { | ||
85 | die("close of %s failed: %s\n", | ||
86 | filename, strerror(errno)); | ||
87 | } | ||
88 | } | ||
89 | |||
90 | static void create_and_enter_userns(void) | ||
91 | { | ||
92 | uid_t uid; | ||
93 | gid_t gid; | ||
94 | |||
95 | uid = getuid(); | ||
96 | gid = getgid(); | ||
97 | |||
98 | if (unshare(CLONE_NEWUSER) !=0) { | ||
99 | die("unshare(CLONE_NEWUSER) failed: %s\n", | ||
100 | strerror(errno)); | ||
101 | } | ||
102 | |||
103 | write_file("/proc/self/uid_map", "0 %d 1", uid); | ||
104 | write_file("/proc/self/gid_map", "0 %d 1", gid); | ||
105 | |||
106 | if (setgroups(0, NULL) != 0) { | ||
107 | die("setgroups failed: %s\n", | ||
108 | strerror(errno)); | ||
109 | } | ||
110 | if (setgid(0) != 0) { | ||
111 | die ("setgid(0) failed %s\n", | ||
112 | strerror(errno)); | ||
113 | } | ||
114 | if (setuid(0) != 0) { | ||
115 | die("setuid(0) failed %s\n", | ||
116 | strerror(errno)); | ||
117 | } | ||
118 | } | ||
119 | |||
120 | static | ||
121 | bool test_unpriv_remount(int mount_flags, int remount_flags, int invalid_flags) | ||
122 | { | ||
123 | pid_t child; | ||
124 | |||
125 | child = fork(); | ||
126 | if (child == -1) { | ||
127 | die("fork failed: %s\n", | ||
128 | strerror(errno)); | ||
129 | } | ||
130 | if (child != 0) { /* parent */ | ||
131 | pid_t pid; | ||
132 | int status; | ||
133 | pid = waitpid(child, &status, 0); | ||
134 | if (pid == -1) { | ||
135 | die("waitpid failed: %s\n", | ||
136 | strerror(errno)); | ||
137 | } | ||
138 | if (pid != child) { | ||
139 | die("waited for %d got %d\n", | ||
140 | child, pid); | ||
141 | } | ||
142 | if (!WIFEXITED(status)) { | ||
143 | die("child did not terminate cleanly\n"); | ||
144 | } | ||
145 | return WEXITSTATUS(status) == EXIT_SUCCESS ? true : false; | ||
146 | } | ||
147 | |||
148 | create_and_enter_userns(); | ||
149 | if (unshare(CLONE_NEWNS) != 0) { | ||
150 | die("unshare(CLONE_NEWNS) failed: %s\n", | ||
151 | strerror(errno)); | ||
152 | } | ||
153 | |||
154 | if (mount("testing", "/tmp", "ramfs", mount_flags, NULL) != 0) { | ||
155 | die("mount of /tmp failed: %s\n", | ||
156 | strerror(errno)); | ||
157 | } | ||
158 | |||
159 | create_and_enter_userns(); | ||
160 | |||
161 | if (unshare(CLONE_NEWNS) != 0) { | ||
162 | die("unshare(CLONE_NEWNS) failed: %s\n", | ||
163 | strerror(errno)); | ||
164 | } | ||
165 | |||
166 | if (mount("/tmp", "/tmp", "none", | ||
167 | MS_REMOUNT | MS_BIND | remount_flags, NULL) != 0) { | ||
168 | /* system("cat /proc/self/mounts"); */ | ||
169 | die("remount of /tmp failed: %s\n", | ||
170 | strerror(errno)); | ||
171 | } | ||
172 | |||
173 | if (mount("/tmp", "/tmp", "none", | ||
174 | MS_REMOUNT | MS_BIND | invalid_flags, NULL) == 0) { | ||
175 | /* system("cat /proc/self/mounts"); */ | ||
176 | die("remount of /tmp with invalid flags " | ||
177 | "succeeded unexpectedly\n"); | ||
178 | } | ||
179 | exit(EXIT_SUCCESS); | ||
180 | } | ||
181 | |||
182 | static bool test_unpriv_remount_simple(int mount_flags) | ||
183 | { | ||
184 | return test_unpriv_remount(mount_flags, mount_flags, 0); | ||
185 | } | ||
186 | |||
187 | static bool test_unpriv_remount_atime(int mount_flags, int invalid_flags) | ||
188 | { | ||
189 | return test_unpriv_remount(mount_flags, mount_flags, invalid_flags); | ||
190 | } | ||
191 | |||
192 | int main(int argc, char **argv) | ||
193 | { | ||
194 | if (!test_unpriv_remount_simple(MS_RDONLY|MS_NODEV)) { | ||
195 | die("MS_RDONLY malfunctions\n"); | ||
196 | } | ||
197 | if (!test_unpriv_remount_simple(MS_NODEV)) { | ||
198 | die("MS_NODEV malfunctions\n"); | ||
199 | } | ||
200 | if (!test_unpriv_remount_simple(MS_NOSUID|MS_NODEV)) { | ||
201 | die("MS_NOSUID malfunctions\n"); | ||
202 | } | ||
203 | if (!test_unpriv_remount_simple(MS_NOEXEC|MS_NODEV)) { | ||
204 | die("MS_NOEXEC malfunctions\n"); | ||
205 | } | ||
206 | if (!test_unpriv_remount_atime(MS_RELATIME|MS_NODEV, | ||
207 | MS_NOATIME|MS_NODEV)) | ||
208 | { | ||
209 | die("MS_RELATIME malfunctions\n"); | ||
210 | } | ||
211 | if (!test_unpriv_remount_atime(MS_STRICTATIME|MS_NODEV, | ||
212 | MS_NOATIME|MS_NODEV)) | ||
213 | { | ||
214 | die("MS_STRICTATIME malfunctions\n"); | ||
215 | } | ||
216 | if (!test_unpriv_remount_atime(MS_NOATIME|MS_NODEV, | ||
217 | MS_STRICTATIME|MS_NODEV)) | ||
218 | { | ||
219 | die("MS_RELATIME malfunctions\n"); | ||
220 | } | ||
221 | if (!test_unpriv_remount_atime(MS_RELATIME|MS_NODIRATIME|MS_NODEV, | ||
222 | MS_NOATIME|MS_NODEV)) | ||
223 | { | ||
224 | die("MS_RELATIME malfunctions\n"); | ||
225 | } | ||
226 | if (!test_unpriv_remount_atime(MS_STRICTATIME|MS_NODIRATIME|MS_NODEV, | ||
227 | MS_NOATIME|MS_NODEV)) | ||
228 | { | ||
229 | die("MS_RELATIME malfunctions\n"); | ||
230 | } | ||
231 | if (!test_unpriv_remount_atime(MS_NOATIME|MS_NODIRATIME|MS_NODEV, | ||
232 | MS_STRICTATIME|MS_NODEV)) | ||
233 | { | ||
234 | die("MS_RELATIME malfunctions\n"); | ||
235 | } | ||
236 | if (!test_unpriv_remount(MS_STRICTATIME|MS_NODEV, MS_NODEV, | ||
237 | MS_NOATIME|MS_NODEV)) | ||
238 | { | ||
239 | die("Default atime malfunctions\n"); | ||
240 | } | ||
241 | return EXIT_SUCCESS; | ||
242 | } | ||