diff options
205 files changed, 3130 insertions, 1342 deletions
diff --git a/Documentation/hid/uhid.txt b/Documentation/hid/uhid.txt index 54c8f9706a95..c8656dd029a9 100644 --- a/Documentation/hid/uhid.txt +++ b/Documentation/hid/uhid.txt | |||
| @@ -1,28 +1,13 @@ | |||
| 1 | UHID - User-space I/O driver support for HID subsystem | 1 | UHID - User-space I/O driver support for HID subsystem |
| 2 | ======================================================== | 2 | ======================================================== |
| 3 | 3 | ||
| 4 | The HID subsystem needs two kinds of drivers. In this document we call them: | 4 | UHID allows user-space to implement HID transport drivers. Please see |
| 5 | hid-transport.txt for an introduction into HID transport drivers. This document | ||
| 6 | relies heavily on the definitions declared there. | ||
| 5 | 7 | ||
| 6 | 1. The "HID I/O Driver" is the driver that performs raw data I/O to the | 8 | With UHID, a user-space transport driver can create kernel hid-devices for each |
| 7 | low-level device. Internally, they register an hid_ll_driver structure with | 9 | device connected to the user-space controlled bus. The UHID API defines the I/O |
| 8 | the HID core. They perform device setup, read raw data from the device and | 10 | events provided from the kernel to user-space and vice versa. |
| 9 | push it into the HID subsystem and they provide a callback so the HID | ||
| 10 | subsystem can send data to the device. | ||
| 11 | |||
| 12 | 2. The "HID Device Driver" is the driver that parses HID reports and reacts on | ||
| 13 | them. There are generic drivers like "generic-usb" and "generic-bluetooth" | ||
| 14 | which adhere to the HID specification and provide the standardizes features. | ||
| 15 | But there may be special drivers and quirks for each non-standard device out | ||
| 16 | there. Internally, they use the hid_driver structure. | ||
| 17 | |||
| 18 | Historically, the USB stack was the first subsystem to provide an HID I/O | ||
| 19 | Driver. However, other standards like Bluetooth have adopted the HID specs and | ||
| 20 | may provide HID I/O Drivers, too. The UHID driver allows to implement HID I/O | ||
| 21 | Drivers in user-space and feed the data into the kernel HID-subsystem. | ||
| 22 | |||
| 23 | This allows user-space to operate on the same level as USB-HID, Bluetooth-HID | ||
| 24 | and similar. It does not provide a way to write HID Device Drivers, though. Use | ||
| 25 | hidraw for this purpose. | ||
| 26 | 11 | ||
| 27 | There is an example user-space application in ./samples/uhid/uhid-example.c | 12 | There is an example user-space application in ./samples/uhid/uhid-example.c |
| 28 | 13 | ||
| @@ -42,8 +27,9 @@ by setting O_NONBLOCK. | |||
| 42 | struct uhid_event { | 27 | struct uhid_event { |
| 43 | __u32 type; | 28 | __u32 type; |
| 44 | union { | 29 | union { |
| 45 | struct uhid_create_req create; | 30 | struct uhid_create2_req create2; |
| 46 | struct uhid_data_req data; | 31 | struct uhid_output_req output; |
| 32 | struct uhid_input2_req input2; | ||
| 47 | ... | 33 | ... |
| 48 | } u; | 34 | } u; |
| 49 | }; | 35 | }; |
| @@ -54,8 +40,11 @@ multiple write()'s. A single event must always be sent as a whole. Furthermore, | |||
| 54 | only a single event can be sent per read() or write(). Pending data is ignored. | 40 | only a single event can be sent per read() or write(). Pending data is ignored. |
| 55 | If you want to handle multiple events in a single syscall, then use vectored | 41 | If you want to handle multiple events in a single syscall, then use vectored |
| 56 | I/O with readv()/writev(). | 42 | I/O with readv()/writev(). |
| 43 | The "type" field defines the payload. For each type, there is a | ||
| 44 | payload-structure available in the union "u" (except for empty payloads). This | ||
| 45 | payload contains management and/or device data. | ||
| 57 | 46 | ||
| 58 | The first thing you should do is sending an UHID_CREATE event. This will | 47 | The first thing you should do is sending an UHID_CREATE2 event. This will |
| 59 | register the device. UHID will respond with an UHID_START event. You can now | 48 | register the device. UHID will respond with an UHID_START event. You can now |
| 60 | start sending data to and reading data from UHID. However, unless UHID sends the | 49 | start sending data to and reading data from UHID. However, unless UHID sends the |
| 61 | UHID_OPEN event, the internally attached HID Device Driver has no user attached. | 50 | UHID_OPEN event, the internally attached HID Device Driver has no user attached. |
| @@ -69,12 +58,20 @@ ref-counting for you. | |||
| 69 | You may decide to ignore UHID_OPEN/UHID_CLOSE, though. I/O is allowed even | 58 | You may decide to ignore UHID_OPEN/UHID_CLOSE, though. I/O is allowed even |
| 70 | though the device may have no users. | 59 | though the device may have no users. |
| 71 | 60 | ||
| 72 | If you want to send data to the HID subsystem, you send an HID_INPUT event with | 61 | If you want to send data on the interrupt channel to the HID subsystem, you send |
| 73 | your raw data payload. If the kernel wants to send data to the device, you will | 62 | an HID_INPUT2 event with your raw data payload. If the kernel wants to send data |
| 74 | read an UHID_OUTPUT or UHID_OUTPUT_EV event. | 63 | on the interrupt channel to the device, you will read an UHID_OUTPUT event. |
| 64 | Data requests on the control channel are currently limited to GET_REPORT and | ||
| 65 | SET_REPORT (no other data reports on the control channel are defined so far). | ||
| 66 | Those requests are always synchronous. That means, the kernel sends | ||
| 67 | UHID_GET_REPORT and UHID_SET_REPORT events and requires you to forward them to | ||
| 68 | the device on the control channel. Once the device responds, you must forward | ||
| 69 | the response via UHID_GET_REPORT_REPLY and UHID_SET_REPORT_REPLY to the kernel. | ||
| 70 | The kernel blocks internal driver-execution during such round-trips (times out | ||
| 71 | after a hard-coded period). | ||
| 75 | 72 | ||
| 76 | If your device disconnects, you should send an UHID_DESTROY event. This will | 73 | If your device disconnects, you should send an UHID_DESTROY event. This will |
| 77 | unregister the device. You can now send UHID_CREATE again to register a new | 74 | unregister the device. You can now send UHID_CREATE2 again to register a new |
| 78 | device. | 75 | device. |
| 79 | If you close() the fd, the device is automatically unregistered and destroyed | 76 | If you close() the fd, the device is automatically unregistered and destroyed |
| 80 | internally. | 77 | internally. |
| @@ -82,73 +79,79 @@ internally. | |||
| 82 | write() | 79 | write() |
| 83 | ------- | 80 | ------- |
| 84 | write() allows you to modify the state of the device and feed input data into | 81 | write() allows you to modify the state of the device and feed input data into |
| 85 | the kernel. The following types are supported: UHID_CREATE, UHID_DESTROY and | 82 | the kernel. The kernel will parse the event immediately and if the event ID is |
| 86 | UHID_INPUT. The kernel will parse the event immediately and if the event ID is | ||
| 87 | not supported, it will return -EOPNOTSUPP. If the payload is invalid, then | 83 | not supported, it will return -EOPNOTSUPP. If the payload is invalid, then |
| 88 | -EINVAL is returned, otherwise, the amount of data that was read is returned and | 84 | -EINVAL is returned, otherwise, the amount of data that was read is returned and |
| 89 | the request was handled successfully. | 85 | the request was handled successfully. O_NONBLOCK does not affect write() as |
| 86 | writes are always handled immediately in a non-blocking fashion. Future requests | ||
| 87 | might make use of O_NONBLOCK, though. | ||
| 90 | 88 | ||
| 91 | UHID_CREATE: | 89 | UHID_CREATE2: |
| 92 | This creates the internal HID device. No I/O is possible until you send this | 90 | This creates the internal HID device. No I/O is possible until you send this |
| 93 | event to the kernel. The payload is of type struct uhid_create_req and | 91 | event to the kernel. The payload is of type struct uhid_create2_req and |
| 94 | contains information about your device. You can start I/O now. | 92 | contains information about your device. You can start I/O now. |
| 95 | 93 | ||
| 96 | UHID_CREATE2: | ||
| 97 | Same as UHID_CREATE, but the HID report descriptor data (rd_data) is an array | ||
| 98 | inside struct uhid_create2_req, instead of a pointer to a separate array. | ||
| 99 | Enables use from languages that don't support pointers, e.g. Python. | ||
| 100 | |||
| 101 | UHID_DESTROY: | 94 | UHID_DESTROY: |
| 102 | This destroys the internal HID device. No further I/O will be accepted. There | 95 | This destroys the internal HID device. No further I/O will be accepted. There |
| 103 | may still be pending messages that you can receive with read() but no further | 96 | may still be pending messages that you can receive with read() but no further |
| 104 | UHID_INPUT events can be sent to the kernel. | 97 | UHID_INPUT events can be sent to the kernel. |
| 105 | You can create a new device by sending UHID_CREATE again. There is no need to | 98 | You can create a new device by sending UHID_CREATE2 again. There is no need to |
| 106 | reopen the character device. | 99 | reopen the character device. |
| 107 | 100 | ||
| 108 | UHID_INPUT: | ||
| 109 | You must send UHID_CREATE before sending input to the kernel! This event | ||
| 110 | contains a data-payload. This is the raw data that you read from your device. | ||
| 111 | The kernel will parse the HID reports and react on it. | ||
| 112 | |||
| 113 | UHID_INPUT2: | 101 | UHID_INPUT2: |
| 114 | Same as UHID_INPUT, but the data array is the last field of uhid_input2_req. | 102 | You must send UHID_CREATE2 before sending input to the kernel! This event |
| 115 | Enables userspace to write only the required bytes to kernel (ev.type + | 103 | contains a data-payload. This is the raw data that you read from your device |
| 116 | ev.u.input2.size + the part of the data array that matters), instead of | 104 | on the interrupt channel. The kernel will parse the HID reports. |
| 117 | the entire struct uhid_input2_req. | 105 | |
| 118 | 106 | UHID_GET_REPORT_REPLY: | |
| 119 | UHID_FEATURE_ANSWER: | 107 | If you receive a UHID_GET_REPORT request you must answer with this request. |
| 120 | If you receive a UHID_FEATURE request you must answer with this request. You | 108 | You must copy the "id" field from the request into the answer. Set the "err" |
| 121 | must copy the "id" field from the request into the answer. Set the "err" field | 109 | field to 0 if no error occurred or to EIO if an I/O error occurred. |
| 122 | to 0 if no error occurred or to EIO if an I/O error occurred. | ||
| 123 | If "err" is 0 then you should fill the buffer of the answer with the results | 110 | If "err" is 0 then you should fill the buffer of the answer with the results |
| 124 | of the feature request and set "size" correspondingly. | 111 | of the GET_REPORT request and set "size" correspondingly. |
| 112 | |||
| 113 | UHID_SET_REPORT_REPLY: | ||
| 114 | This is the SET_REPORT equivalent of UHID_GET_REPORT_REPLY. Unlike GET_REPORT, | ||
| 115 | SET_REPORT never returns a data buffer, therefore, it's sufficient to set the | ||
| 116 | "id" and "err" fields correctly. | ||
| 125 | 117 | ||
| 126 | read() | 118 | read() |
| 127 | ------ | 119 | ------ |
| 128 | read() will return a queued output report. These output reports can be of type | 120 | read() will return a queued output report. No reaction is required to any of |
| 129 | UHID_START, UHID_STOP, UHID_OPEN, UHID_CLOSE, UHID_OUTPUT or UHID_OUTPUT_EV. No | 121 | them but you should handle them according to your needs. |
| 130 | reaction is required to any of them but you should handle them according to your | ||
| 131 | needs. Only UHID_OUTPUT and UHID_OUTPUT_EV have payloads. | ||
| 132 | 122 | ||
| 133 | UHID_START: | 123 | UHID_START: |
| 134 | This is sent when the HID device is started. Consider this as an answer to | 124 | This is sent when the HID device is started. Consider this as an answer to |
| 135 | UHID_CREATE. This is always the first event that is sent. | 125 | UHID_CREATE2. This is always the first event that is sent. Note that this |
| 126 | event might not be available immediately after write(UHID_CREATE2) returns. | ||
| 127 | Device drivers might required delayed setups. | ||
| 128 | This event contains a payload of type uhid_start_req. The "dev_flags" field | ||
| 129 | describes special behaviors of a device. The following flags are defined: | ||
| 130 | UHID_DEV_NUMBERED_FEATURE_REPORTS: | ||
| 131 | UHID_DEV_NUMBERED_OUTPUT_REPORTS: | ||
| 132 | UHID_DEV_NUMBERED_INPUT_REPORTS: | ||
| 133 | Each of these flags defines whether a given report-type uses numbered | ||
| 134 | reports. If numbered reports are used for a type, all messages from | ||
| 135 | the kernel already have the report-number as prefix. Otherwise, no | ||
| 136 | prefix is added by the kernel. | ||
| 137 | For messages sent by user-space to the kernel, you must adjust the | ||
| 138 | prefixes according to these flags. | ||
| 136 | 139 | ||
| 137 | UHID_STOP: | 140 | UHID_STOP: |
| 138 | This is sent when the HID device is stopped. Consider this as an answer to | 141 | This is sent when the HID device is stopped. Consider this as an answer to |
| 139 | UHID_DESTROY. | 142 | UHID_DESTROY. |
| 140 | If the kernel HID device driver closes the device manually (that is, you | 143 | If you didn't destroy your device via UHID_DESTROY, but the kernel sends an |
| 141 | didn't send UHID_DESTROY) then you should consider this device closed and send | 144 | UHID_STOP event, this should usually be ignored. It means that the kernel |
| 142 | an UHID_DESTROY event. You may want to reregister your device, though. This is | 145 | reloaded/changed the device driver loaded on your HID device (or some other |
| 143 | always the last message that is sent to you unless you reopen the device with | 146 | maintenance actions happened). |
| 144 | UHID_CREATE. | 147 | You can usually ignored any UHID_STOP events safely. |
| 145 | 148 | ||
| 146 | UHID_OPEN: | 149 | UHID_OPEN: |
| 147 | This is sent when the HID device is opened. That is, the data that the HID | 150 | This is sent when the HID device is opened. That is, the data that the HID |
| 148 | device provides is read by some other process. You may ignore this event but | 151 | device provides is read by some other process. You may ignore this event but |
| 149 | it is useful for power-management. As long as you haven't received this event | 152 | it is useful for power-management. As long as you haven't received this event |
| 150 | there is actually no other process that reads your data so there is no need to | 153 | there is actually no other process that reads your data so there is no need to |
| 151 | send UHID_INPUT events to the kernel. | 154 | send UHID_INPUT2 events to the kernel. |
| 152 | 155 | ||
| 153 | UHID_CLOSE: | 156 | UHID_CLOSE: |
| 154 | This is sent when there are no more processes which read the HID data. It is | 157 | This is sent when there are no more processes which read the HID data. It is |
| @@ -156,27 +159,29 @@ needs. Only UHID_OUTPUT and UHID_OUTPUT_EV have payloads. | |||
| 156 | 159 | ||
| 157 | UHID_OUTPUT: | 160 | UHID_OUTPUT: |
| 158 | This is sent if the HID device driver wants to send raw data to the I/O | 161 | This is sent if the HID device driver wants to send raw data to the I/O |
| 159 | device. You should read the payload and forward it to the device. The payload | 162 | device on the interrupt channel. You should read the payload and forward it to |
| 160 | is of type "struct uhid_data_req". | 163 | the device. The payload is of type "struct uhid_data_req". |
| 161 | This may be received even though you haven't received UHID_OPEN, yet. | 164 | This may be received even though you haven't received UHID_OPEN, yet. |
| 162 | 165 | ||
| 163 | UHID_OUTPUT_EV (obsolete): | 166 | UHID_GET_REPORT: |
| 164 | Same as UHID_OUTPUT but this contains a "struct input_event" as payload. This | 167 | This event is sent if the kernel driver wants to perform a GET_REPORT request |
| 165 | is called for force-feedback, LED or similar events which are received through | 168 | on the control channeld as described in the HID specs. The report-type and |
| 166 | an input device by the HID subsystem. You should convert this into raw reports | 169 | report-number are available in the payload. |
| 167 | and send them to your device similar to events of type UHID_OUTPUT. | 170 | The kernel serializes GET_REPORT requests so there will never be two in |
| 168 | This is no longer sent by newer kernels. Instead, HID core converts it into a | 171 | parallel. However, if you fail to respond with a UHID_GET_REPORT_REPLY, the |
| 169 | raw output report and sends it via UHID_OUTPUT. | 172 | request might silently time out. |
| 170 | 173 | Once you read a GET_REPORT request, you shall forward it to the hid device and | |
| 171 | UHID_FEATURE: | 174 | remember the "id" field in the payload. Once your hid device responds to the |
| 172 | This event is sent if the kernel driver wants to perform a feature request as | 175 | GET_REPORT (or if it fails), you must send a UHID_GET_REPORT_REPLY to the |
| 173 | described in the HID specs. The report-type and report-number are available in | 176 | kernel with the exact same "id" as in the request. If the request already |
| 174 | the payload. | 177 | timed out, the kernel will ignore the response silently. The "id" field is |
| 175 | The kernel serializes feature requests so there will never be two in parallel. | 178 | never re-used, so conflicts cannot happen. |
| 176 | However, if you fail to respond with a UHID_FEATURE_ANSWER in a time-span of 5 | 179 | |
| 177 | seconds, then the requests will be dropped and a new one might be sent. | 180 | UHID_SET_REPORT: |
| 178 | Therefore, the payload also contains an "id" field that identifies every | 181 | This is the SET_REPORT equivalent of UHID_GET_REPORT. On receipt, you shall |
| 179 | request. | 182 | send a SET_REPORT request to your hid device. Once it replies, you must tell |
| 180 | 183 | the kernel about it via UHID_SET_REPORT_REPLY. | |
| 181 | Document by: | 184 | The same restrictions as for UHID_GET_REPORT apply. |
| 182 | David Herrmann <dh.herrmann@googlemail.com> | 185 | |
| 186 | ---------------------------------------------------- | ||
| 187 | Written 2012, David Herrmann <dh.herrmann@gmail.com> | ||
diff --git a/Documentation/this_cpu_ops.txt b/Documentation/this_cpu_ops.txt index 1a4ce7e3e05f..0ec995712176 100644 --- a/Documentation/this_cpu_ops.txt +++ b/Documentation/this_cpu_ops.txt | |||
| @@ -2,26 +2,26 @@ this_cpu operations | |||
| 2 | ------------------- | 2 | ------------------- |
| 3 | 3 | ||
| 4 | this_cpu operations are a way of optimizing access to per cpu | 4 | this_cpu operations are a way of optimizing access to per cpu |
| 5 | variables associated with the *currently* executing processor through | 5 | variables associated with the *currently* executing processor. This is |
| 6 | the use of segment registers (or a dedicated register where the cpu | 6 | done through the use of segment registers (or a dedicated register where |
| 7 | permanently stored the beginning of the per cpu area for a specific | 7 | the cpu permanently stored the beginning of the per cpu area for a |
| 8 | processor). | 8 | specific processor). |
| 9 | 9 | ||
| 10 | The this_cpu operations add a per cpu variable offset to the processor | 10 | this_cpu operations add a per cpu variable offset to the processor |
| 11 | specific percpu base and encode that operation in the instruction | 11 | specific per cpu base and encode that operation in the instruction |
| 12 | operating on the per cpu variable. | 12 | operating on the per cpu variable. |
| 13 | 13 | ||
| 14 | This means there are no atomicity issues between the calculation of | 14 | This means that there are no atomicity issues between the calculation of |
| 15 | the offset and the operation on the data. Therefore it is not | 15 | the offset and the operation on the data. Therefore it is not |
| 16 | necessary to disable preempt or interrupts to ensure that the | 16 | necessary to disable preemption or interrupts to ensure that the |
| 17 | processor is not changed between the calculation of the address and | 17 | processor is not changed between the calculation of the address and |
| 18 | the operation on the data. | 18 | the operation on the data. |
| 19 | 19 | ||
| 20 | Read-modify-write operations are of particular interest. Frequently | 20 | Read-modify-write operations are of particular interest. Frequently |
| 21 | processors have special lower latency instructions that can operate | 21 | processors have special lower latency instructions that can operate |
| 22 | without the typical synchronization overhead but still provide some | 22 | without the typical synchronization overhead, but still provide some |
| 23 | sort of relaxed atomicity guarantee. The x86 for example can execute | 23 | sort of relaxed atomicity guarantees. The x86, for example, can execute |
| 24 | RMV (Read Modify Write) instructions like inc/dec/cmpxchg without the | 24 | RMW (Read Modify Write) instructions like inc/dec/cmpxchg without the |
| 25 | lock prefix and the associated latency penalty. | 25 | lock prefix and the associated latency penalty. |
| 26 | 26 | ||
| 27 | Access to the variable without the lock prefix is not synchronized but | 27 | Access to the variable without the lock prefix is not synchronized but |
| @@ -30,6 +30,38 @@ data specific to the currently executing processor. Only the current | |||
| 30 | processor should be accessing that variable and therefore there are no | 30 | processor should be accessing that variable and therefore there are no |
| 31 | concurrency issues with other processors in the system. | 31 | concurrency issues with other processors in the system. |
| 32 | 32 | ||
| 33 | Please note that accesses by remote processors to a per cpu area are | ||
| 34 | exceptional situations and may impact performance and/or correctness | ||
| 35 | (remote write operations) of local RMW operations via this_cpu_*. | ||
| 36 | |||
| 37 | The main use of the this_cpu operations has been to optimize counter | ||
| 38 | operations. | ||
| 39 | |||
| 40 | The following this_cpu() operations with implied preemption protection | ||
| 41 | are defined. These operations can be used without worrying about | ||
| 42 | preemption and interrupts. | ||
| 43 | |||
| 44 | this_cpu_add() | ||
| 45 | this_cpu_read(pcp) | ||
| 46 | this_cpu_write(pcp, val) | ||
| 47 | this_cpu_add(pcp, val) | ||
| 48 | this_cpu_and(pcp, val) | ||
| 49 | this_cpu_or(pcp, val) | ||
| 50 | this_cpu_add_return(pcp, val) | ||
| 51 | this_cpu_xchg(pcp, nval) | ||
| 52 | this_cpu_cmpxchg(pcp, oval, nval) | ||
| 53 | this_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) | ||
| 54 | this_cpu_sub(pcp, val) | ||
| 55 | this_cpu_inc(pcp) | ||
| 56 | this_cpu_dec(pcp) | ||
| 57 | this_cpu_sub_return(pcp, val) | ||
| 58 | this_cpu_inc_return(pcp) | ||
| 59 | this_cpu_dec_return(pcp) | ||
| 60 | |||
| 61 | |||
| 62 | Inner working of this_cpu operations | ||
| 63 | ------------------------------------ | ||
| 64 | |||
| 33 | On x86 the fs: or the gs: segment registers contain the base of the | 65 | On x86 the fs: or the gs: segment registers contain the base of the |
| 34 | per cpu area. It is then possible to simply use the segment override | 66 | per cpu area. It is then possible to simply use the segment override |
| 35 | to relocate a per cpu relative address to the proper per cpu area for | 67 | to relocate a per cpu relative address to the proper per cpu area for |
| @@ -48,22 +80,21 @@ results in a single instruction | |||
| 48 | mov ax, gs:[x] | 80 | mov ax, gs:[x] |
| 49 | 81 | ||
| 50 | instead of a sequence of calculation of the address and then a fetch | 82 | instead of a sequence of calculation of the address and then a fetch |
| 51 | from that address which occurs with the percpu operations. Before | 83 | from that address which occurs with the per cpu operations. Before |
| 52 | this_cpu_ops such sequence also required preempt disable/enable to | 84 | this_cpu_ops such sequence also required preempt disable/enable to |
| 53 | prevent the kernel from moving the thread to a different processor | 85 | prevent the kernel from moving the thread to a different processor |
| 54 | while the calculation is performed. | 86 | while the calculation is performed. |
| 55 | 87 | ||
| 56 | The main use of the this_cpu operations has been to optimize counter | 88 | Consider the following this_cpu operation: |
| 57 | operations. | ||
| 58 | 89 | ||
| 59 | this_cpu_inc(x) | 90 | this_cpu_inc(x) |
| 60 | 91 | ||
| 61 | results in the following single instruction (no lock prefix!) | 92 | The above results in the following single instruction (no lock prefix!) |
| 62 | 93 | ||
| 63 | inc gs:[x] | 94 | inc gs:[x] |
| 64 | 95 | ||
| 65 | instead of the following operations required if there is no segment | 96 | instead of the following operations required if there is no segment |
| 66 | register. | 97 | register: |
| 67 | 98 | ||
| 68 | int *y; | 99 | int *y; |
| 69 | int cpu; | 100 | int cpu; |
| @@ -73,10 +104,10 @@ register. | |||
| 73 | (*y)++; | 104 | (*y)++; |
| 74 | put_cpu(); | 105 | put_cpu(); |
| 75 | 106 | ||
| 76 | Note that these operations can only be used on percpu data that is | 107 | Note that these operations can only be used on per cpu data that is |
| 77 | reserved for a specific processor. Without disabling preemption in the | 108 | reserved for a specific processor. Without disabling preemption in the |
| 78 | surrounding code this_cpu_inc() will only guarantee that one of the | 109 | surrounding code this_cpu_inc() will only guarantee that one of the |
| 79 | percpu counters is correctly incremented. However, there is no | 110 | per cpu counters is correctly incremented. However, there is no |
| 80 | guarantee that the OS will not move the process directly before or | 111 | guarantee that the OS will not move the process directly before or |
| 81 | after the this_cpu instruction is executed. In general this means that | 112 | after the this_cpu instruction is executed. In general this means that |
| 82 | the value of the individual counters for each processor are | 113 | the value of the individual counters for each processor are |
| @@ -86,9 +117,9 @@ that is of interest. | |||
| 86 | Per cpu variables are used for performance reasons. Bouncing cache | 117 | Per cpu variables are used for performance reasons. Bouncing cache |
| 87 | lines can be avoided if multiple processors concurrently go through | 118 | lines can be avoided if multiple processors concurrently go through |
| 88 | the same code paths. Since each processor has its own per cpu | 119 | the same code paths. Since each processor has its own per cpu |
| 89 | variables no concurrent cacheline updates take place. The price that | 120 | variables no concurrent cache line updates take place. The price that |
| 90 | has to be paid for this optimization is the need to add up the per cpu | 121 | has to be paid for this optimization is the need to add up the per cpu |
| 91 | counters when the value of the counter is needed. | 122 | counters when the value of a counter is needed. |
| 92 | 123 | ||
| 93 | 124 | ||
| 94 | Special operations: | 125 | Special operations: |
| @@ -100,33 +131,39 @@ Takes the offset of a per cpu variable (&x !) and returns the address | |||
| 100 | of the per cpu variable that belongs to the currently executing | 131 | of the per cpu variable that belongs to the currently executing |
| 101 | processor. this_cpu_ptr avoids multiple steps that the common | 132 | processor. this_cpu_ptr avoids multiple steps that the common |
| 102 | get_cpu/put_cpu sequence requires. No processor number is | 133 | get_cpu/put_cpu sequence requires. No processor number is |
| 103 | available. Instead the offset of the local per cpu area is simply | 134 | available. Instead, the offset of the local per cpu area is simply |
| 104 | added to the percpu offset. | 135 | added to the per cpu offset. |
| 105 | 136 | ||
| 137 | Note that this operation is usually used in a code segment when | ||
| 138 | preemption has been disabled. The pointer is then used to | ||
| 139 | access local per cpu data in a critical section. When preemption | ||
| 140 | is re-enabled this pointer is usually no longer useful since it may | ||
| 141 | no longer point to per cpu data of the current processor. | ||
| 106 | 142 | ||
| 107 | 143 | ||
| 108 | Per cpu variables and offsets | 144 | Per cpu variables and offsets |
| 109 | ----------------------------- | 145 | ----------------------------- |
| 110 | 146 | ||
| 111 | Per cpu variables have *offsets* to the beginning of the percpu | 147 | Per cpu variables have *offsets* to the beginning of the per cpu |
| 112 | area. They do not have addresses although they look like that in the | 148 | area. They do not have addresses although they look like that in the |
| 113 | code. Offsets cannot be directly dereferenced. The offset must be | 149 | code. Offsets cannot be directly dereferenced. The offset must be |
| 114 | added to a base pointer of a percpu area of a processor in order to | 150 | added to a base pointer of a per cpu area of a processor in order to |
| 115 | form a valid address. | 151 | form a valid address. |
| 116 | 152 | ||
| 117 | Therefore the use of x or &x outside of the context of per cpu | 153 | Therefore the use of x or &x outside of the context of per cpu |
| 118 | operations is invalid and will generally be treated like a NULL | 154 | operations is invalid and will generally be treated like a NULL |
| 119 | pointer dereference. | 155 | pointer dereference. |
| 120 | 156 | ||
| 121 | In the context of per cpu operations | 157 | DEFINE_PER_CPU(int, x); |
| 122 | 158 | ||
| 123 | x is a per cpu variable. Most this_cpu operations take a cpu | 159 | In the context of per cpu operations the above implies that x is a per |
| 124 | variable. | 160 | cpu variable. Most this_cpu operations take a cpu variable. |
| 125 | 161 | ||
| 126 | &x is the *offset* a per cpu variable. this_cpu_ptr() takes | 162 | int __percpu *p = &x; |
| 127 | the offset of a per cpu variable which makes this look a bit | ||
| 128 | strange. | ||
| 129 | 163 | ||
| 164 | &x and hence p is the *offset* of a per cpu variable. this_cpu_ptr() | ||
| 165 | takes the offset of a per cpu variable which makes this look a bit | ||
| 166 | strange. | ||
| 130 | 167 | ||
| 131 | 168 | ||
| 132 | Operations on a field of a per cpu structure | 169 | Operations on a field of a per cpu structure |
| @@ -152,7 +189,7 @@ If we have an offset to struct s: | |||
| 152 | 189 | ||
| 153 | struct s __percpu *ps = &p; | 190 | struct s __percpu *ps = &p; |
| 154 | 191 | ||
| 155 | z = this_cpu_dec(ps->m); | 192 | this_cpu_dec(ps->m); |
| 156 | 193 | ||
| 157 | z = this_cpu_inc_return(ps->n); | 194 | z = this_cpu_inc_return(ps->n); |
| 158 | 195 | ||
| @@ -172,29 +209,52 @@ if we do not make use of this_cpu ops later to manipulate fields: | |||
| 172 | Variants of this_cpu ops | 209 | Variants of this_cpu ops |
| 173 | ------------------------- | 210 | ------------------------- |
| 174 | 211 | ||
| 175 | this_cpu ops are interrupt safe. Some architecture do not support | 212 | this_cpu ops are interrupt safe. Some architectures do not support |
| 176 | these per cpu local operations. In that case the operation must be | 213 | these per cpu local operations. In that case the operation must be |
| 177 | replaced by code that disables interrupts, then does the operations | 214 | replaced by code that disables interrupts, then does the operations |
| 178 | that are guaranteed to be atomic and then reenable interrupts. Doing | 215 | that are guaranteed to be atomic and then re-enable interrupts. Doing |
| 179 | so is expensive. If there are other reasons why the scheduler cannot | 216 | so is expensive. If there are other reasons why the scheduler cannot |
| 180 | change the processor we are executing on then there is no reason to | 217 | change the processor we are executing on then there is no reason to |
| 181 | disable interrupts. For that purpose the __this_cpu operations are | 218 | disable interrupts. For that purpose the following __this_cpu operations |
| 182 | provided. For example. | 219 | are provided. |
| 183 | 220 | ||
| 184 | __this_cpu_inc(x); | 221 | These operations have no guarantee against concurrent interrupts or |
| 185 | 222 | preemption. If a per cpu variable is not used in an interrupt context | |
| 186 | Will increment x and will not fallback to code that disables | 223 | and the scheduler cannot preempt, then they are safe. If any interrupts |
| 224 | still occur while an operation is in progress and if the interrupt too | ||
| 225 | modifies the variable, then RMW actions can not be guaranteed to be | ||
| 226 | safe. | ||
| 227 | |||
| 228 | __this_cpu_add() | ||
| 229 | __this_cpu_read(pcp) | ||
| 230 | __this_cpu_write(pcp, val) | ||
| 231 | __this_cpu_add(pcp, val) | ||
| 232 | __this_cpu_and(pcp, val) | ||
| 233 | __this_cpu_or(pcp, val) | ||
| 234 | __this_cpu_add_return(pcp, val) | ||
| 235 | __this_cpu_xchg(pcp, nval) | ||
| 236 | __this_cpu_cmpxchg(pcp, oval, nval) | ||
| 237 | __this_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) | ||
| 238 | __this_cpu_sub(pcp, val) | ||
| 239 | __this_cpu_inc(pcp) | ||
| 240 | __this_cpu_dec(pcp) | ||
| 241 | __this_cpu_sub_return(pcp, val) | ||
| 242 | __this_cpu_inc_return(pcp) | ||
| 243 | __this_cpu_dec_return(pcp) | ||
| 244 | |||
| 245 | |||
| 246 | Will increment x and will not fall-back to code that disables | ||
| 187 | interrupts on platforms that cannot accomplish atomicity through | 247 | interrupts on platforms that cannot accomplish atomicity through |
| 188 | address relocation and a Read-Modify-Write operation in the same | 248 | address relocation and a Read-Modify-Write operation in the same |
| 189 | instruction. | 249 | instruction. |
| 190 | 250 | ||
| 191 | 251 | ||
| 192 | |||
| 193 | &this_cpu_ptr(pp)->n vs this_cpu_ptr(&pp->n) | 252 | &this_cpu_ptr(pp)->n vs this_cpu_ptr(&pp->n) |
| 194 | -------------------------------------------- | 253 | -------------------------------------------- |
| 195 | 254 | ||
| 196 | The first operation takes the offset and forms an address and then | 255 | The first operation takes the offset and forms an address and then |
| 197 | adds the offset of the n field. | 256 | adds the offset of the n field. This may result in two add |
| 257 | instructions emitted by the compiler. | ||
| 198 | 258 | ||
| 199 | The second one first adds the two offsets and then does the | 259 | The second one first adds the two offsets and then does the |
| 200 | relocation. IMHO the second form looks cleaner and has an easier time | 260 | relocation. IMHO the second form looks cleaner and has an easier time |
| @@ -202,4 +262,73 @@ with (). The second form also is consistent with the way | |||
| 202 | this_cpu_read() and friends are used. | 262 | this_cpu_read() and friends are used. |
| 203 | 263 | ||
| 204 | 264 | ||
| 205 | Christoph Lameter, April 3rd, 2013 | 265 | Remote access to per cpu data |
| 266 | ------------------------------ | ||
| 267 | |||
| 268 | Per cpu data structures are designed to be used by one cpu exclusively. | ||
| 269 | If you use the variables as intended, this_cpu_ops() are guaranteed to | ||
| 270 | be "atomic" as no other CPU has access to these data structures. | ||
| 271 | |||
| 272 | There are special cases where you might need to access per cpu data | ||
| 273 | structures remotely. It is usually safe to do a remote read access | ||
| 274 | and that is frequently done to summarize counters. Remote write access | ||
| 275 | something which could be problematic because this_cpu ops do not | ||
| 276 | have lock semantics. A remote write may interfere with a this_cpu | ||
| 277 | RMW operation. | ||
| 278 | |||
| 279 | Remote write accesses to percpu data structures are highly discouraged | ||
| 280 | unless absolutely necessary. Please consider using an IPI to wake up | ||
| 281 | the remote CPU and perform the update to its per cpu area. | ||
| 282 | |||
| 283 | To access per-cpu data structure remotely, typically the per_cpu_ptr() | ||
| 284 | function is used: | ||
| 285 | |||
| 286 | |||
| 287 | DEFINE_PER_CPU(struct data, datap); | ||
| 288 | |||
| 289 | struct data *p = per_cpu_ptr(&datap, cpu); | ||
| 290 | |||
| 291 | This makes it explicit that we are getting ready to access a percpu | ||
| 292 | area remotely. | ||
| 293 | |||
| 294 | You can also do the following to convert the datap offset to an address | ||
| 295 | |||
| 296 | struct data *p = this_cpu_ptr(&datap); | ||
| 297 | |||
| 298 | but, passing of pointers calculated via this_cpu_ptr to other cpus is | ||
| 299 | unusual and should be avoided. | ||
| 300 | |||
| 301 | Remote access are typically only for reading the status of another cpus | ||
| 302 | per cpu data. Write accesses can cause unique problems due to the | ||
| 303 | relaxed synchronization requirements for this_cpu operations. | ||
| 304 | |||
| 305 | One example that illustrates some concerns with write operations is | ||
| 306 | the following scenario that occurs because two per cpu variables | ||
| 307 | share a cache-line but the relaxed synchronization is applied to | ||
| 308 | only one process updating the cache-line. | ||
| 309 | |||
| 310 | Consider the following example | ||
| 311 | |||
| 312 | |||
| 313 | struct test { | ||
| 314 | atomic_t a; | ||
| 315 | int b; | ||
| 316 | }; | ||
| 317 | |||
| 318 | DEFINE_PER_CPU(struct test, onecacheline); | ||
| 319 | |||
| 320 | There is some concern about what would happen if the field 'a' is updated | ||
| 321 | remotely from one processor and the local processor would use this_cpu ops | ||
| 322 | to update field b. Care should be taken that such simultaneous accesses to | ||
| 323 | data within the same cache line are avoided. Also costly synchronization | ||
| 324 | may be necessary. IPIs are generally recommended in such scenarios instead | ||
| 325 | of a remote write to the per cpu area of another processor. | ||
| 326 | |||
| 327 | Even in cases where the remote writes are rare, please bear in | ||
| 328 | mind that a remote write will evict the cache line from the processor | ||
| 329 | that most likely will access it. If the processor wakes up and finds a | ||
| 330 | missing local cache line of a per cpu area, its performance and hence | ||
| 331 | the wake up times will be affected. | ||
| 332 | |||
| 333 | Christoph Lameter, August 4th, 2014 | ||
| 334 | Pranith Kumar, Aug 2nd, 2014 | ||
diff --git a/Documentation/x86/tlb.txt b/Documentation/x86/tlb.txt index 2b3a82e69151..39d172326703 100644 --- a/Documentation/x86/tlb.txt +++ b/Documentation/x86/tlb.txt | |||
| @@ -35,7 +35,7 @@ invlpg instruction (or instructions _near_ it) show up high in | |||
| 35 | profiles. If you believe that individual invalidations being | 35 | profiles. If you believe that individual invalidations being |
| 36 | called too often, you can lower the tunable: | 36 | called too often, you can lower the tunable: |
| 37 | 37 | ||
| 38 | /sys/debug/kernel/x86/tlb_single_page_flush_ceiling | 38 | /sys/kernel/debug/x86/tlb_single_page_flush_ceiling |
| 39 | 39 | ||
| 40 | This will cause us to do the global flush for more cases. | 40 | This will cause us to do the global flush for more cases. |
| 41 | Lowering it to 0 will disable the use of the individual flushes. | 41 | Lowering it to 0 will disable the use of the individual flushes. |
diff --git a/MAINTAINERS b/MAINTAINERS index a3e0d10e386e..1ff06dee651d 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
| @@ -1277,6 +1277,7 @@ F: drivers/scsi/arm/ | |||
| 1277 | ARM/Rockchip SoC support | 1277 | ARM/Rockchip SoC support |
| 1278 | M: Heiko Stuebner <heiko@sntech.de> | 1278 | M: Heiko Stuebner <heiko@sntech.de> |
| 1279 | L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) | 1279 | L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) |
| 1280 | L: linux-rockchip@lists.infradead.org | ||
| 1280 | S: Maintained | 1281 | S: Maintained |
| 1281 | F: arch/arm/mach-rockchip/ | 1282 | F: arch/arm/mach-rockchip/ |
| 1282 | F: drivers/*/*rockchip* | 1283 | F: drivers/*/*rockchip* |
| @@ -1843,6 +1844,12 @@ S: Orphan | |||
| 1843 | F: Documentation/filesystems/befs.txt | 1844 | F: Documentation/filesystems/befs.txt |
| 1844 | F: fs/befs/ | 1845 | F: fs/befs/ |
| 1845 | 1846 | ||
| 1847 | BECKHOFF CX5020 ETHERCAT MASTER DRIVER | ||
| 1848 | M: Dariusz Marcinkiewicz <reksio@newterm.pl> | ||
| 1849 | L: netdev@vger.kernel.org | ||
| 1850 | S: Maintained | ||
| 1851 | F: drivers/net/ethernet/ec_bhf.c | ||
| 1852 | |||
| 1846 | BFS FILE SYSTEM | 1853 | BFS FILE SYSTEM |
| 1847 | M: "Tigran A. Aivazian" <tigran@aivazian.fsnet.co.uk> | 1854 | M: "Tigran A. Aivazian" <tigran@aivazian.fsnet.co.uk> |
| 1848 | S: Maintained | 1855 | S: Maintained |
| @@ -2059,7 +2066,7 @@ S: Supported | |||
| 2059 | F: drivers/scsi/bnx2i/ | 2066 | F: drivers/scsi/bnx2i/ |
| 2060 | 2067 | ||
| 2061 | BROADCOM KONA GPIO DRIVER | 2068 | BROADCOM KONA GPIO DRIVER |
| 2062 | M: Markus Mayer <markus.mayer@linaro.org> | 2069 | M: Ray Jui <rjui@broadcom.com> |
| 2063 | L: bcm-kernel-feedback-list@broadcom.com | 2070 | L: bcm-kernel-feedback-list@broadcom.com |
| 2064 | S: Supported | 2071 | S: Supported |
| 2065 | F: drivers/gpio/gpio-bcm-kona.c | 2072 | F: drivers/gpio/gpio-bcm-kona.c |
| @@ -3115,6 +3122,17 @@ F: include/linux/host1x.h | |||
| 3115 | F: include/uapi/drm/tegra_drm.h | 3122 | F: include/uapi/drm/tegra_drm.h |
| 3116 | F: Documentation/devicetree/bindings/gpu/nvidia,tegra20-host1x.txt | 3123 | F: Documentation/devicetree/bindings/gpu/nvidia,tegra20-host1x.txt |
| 3117 | 3124 | ||
| 3125 | DRM DRIVERS FOR RENESAS | ||
| 3126 | M: Laurent Pinchart <laurent.pinchart@ideasonboard.com> | ||
| 3127 | L: dri-devel@lists.freedesktop.org | ||
| 3128 | L: linux-sh@vger.kernel.org | ||
| 3129 | T: git git://people.freedesktop.org/~airlied/linux | ||
| 3130 | S: Supported | ||
| 3131 | F: drivers/gpu/drm/rcar-du/ | ||
| 3132 | F: drivers/gpu/drm/shmobile/ | ||
| 3133 | F: include/linux/platform_data/rcar-du.h | ||
| 3134 | F: include/linux/platform_data/shmob_drm.h | ||
| 3135 | |||
| 3118 | DSBR100 USB FM RADIO DRIVER | 3136 | DSBR100 USB FM RADIO DRIVER |
| 3119 | M: Alexey Klimov <klimov.linux@gmail.com> | 3137 | M: Alexey Klimov <klimov.linux@gmail.com> |
| 3120 | L: linux-media@vger.kernel.org | 3138 | L: linux-media@vger.kernel.org |
| @@ -5982,6 +6000,12 @@ T: git git://linuxtv.org/media_tree.git | |||
| 5982 | S: Maintained | 6000 | S: Maintained |
| 5983 | F: drivers/media/radio/radio-mr800.c | 6001 | F: drivers/media/radio/radio-mr800.c |
| 5984 | 6002 | ||
| 6003 | MRF24J40 IEEE 802.15.4 RADIO DRIVER | ||
| 6004 | M: Alan Ott <alan@signal11.us> | ||
| 6005 | L: linux-wpan@vger.kernel.org | ||
| 6006 | S: Maintained | ||
| 6007 | F: drivers/net/ieee802154/mrf24j40.c | ||
| 6008 | |||
| 5985 | MSI LAPTOP SUPPORT | 6009 | MSI LAPTOP SUPPORT |
| 5986 | M: "Lee, Chun-Yi" <jlee@suse.com> | 6010 | M: "Lee, Chun-Yi" <jlee@suse.com> |
| 5987 | L: platform-driver-x86@vger.kernel.org | 6011 | L: platform-driver-x86@vger.kernel.org |
| @@ -1,7 +1,7 @@ | |||
| 1 | VERSION = 3 | 1 | VERSION = 3 |
| 2 | PATCHLEVEL = 17 | 2 | PATCHLEVEL = 17 |
| 3 | SUBLEVEL = 0 | 3 | SUBLEVEL = 0 |
| 4 | EXTRAVERSION = -rc1 | 4 | EXTRAVERSION = -rc2 |
| 5 | NAME = Shuffling Zombie Juror | 5 | NAME = Shuffling Zombie Juror |
| 6 | 6 | ||
| 7 | # *DOCUMENTATION* | 7 | # *DOCUMENTATION* |
diff --git a/arch/arm/boot/dts/exynos4412-odroid-common.dtsi b/arch/arm/boot/dts/exynos4412-odroid-common.dtsi index 6d6d23c83d30..adadaf97ac01 100644 --- a/arch/arm/boot/dts/exynos4412-odroid-common.dtsi +++ b/arch/arm/boot/dts/exynos4412-odroid-common.dtsi | |||
| @@ -134,6 +134,8 @@ | |||
| 134 | i2c@13860000 { | 134 | i2c@13860000 { |
| 135 | pinctrl-0 = <&i2c0_bus>; | 135 | pinctrl-0 = <&i2c0_bus>; |
| 136 | pinctrl-names = "default"; | 136 | pinctrl-names = "default"; |
| 137 | samsung,i2c-sda-delay = <100>; | ||
| 138 | samsung,i2c-max-bus-freq = <400000>; | ||
| 137 | status = "okay"; | 139 | status = "okay"; |
| 138 | 140 | ||
| 139 | usb3503: usb3503@08 { | 141 | usb3503: usb3503@08 { |
| @@ -148,6 +150,10 @@ | |||
| 148 | 150 | ||
| 149 | max77686: pmic@09 { | 151 | max77686: pmic@09 { |
| 150 | compatible = "maxim,max77686"; | 152 | compatible = "maxim,max77686"; |
| 153 | interrupt-parent = <&gpx3>; | ||
| 154 | interrupts = <2 0>; | ||
| 155 | pinctrl-names = "default"; | ||
| 156 | pinctrl-0 = <&max77686_irq>; | ||
| 151 | reg = <0x09>; | 157 | reg = <0x09>; |
| 152 | #clock-cells = <1>; | 158 | #clock-cells = <1>; |
| 153 | 159 | ||
| @@ -368,4 +374,11 @@ | |||
| 368 | samsung,pins = "gpx1-3"; | 374 | samsung,pins = "gpx1-3"; |
| 369 | samsung,pin-pud = <0>; | 375 | samsung,pin-pud = <0>; |
| 370 | }; | 376 | }; |
| 377 | |||
| 378 | max77686_irq: max77686-irq { | ||
| 379 | samsung,pins = "gpx3-2"; | ||
| 380 | samsung,pin-function = <0>; | ||
| 381 | samsung,pin-pud = <0>; | ||
| 382 | samsung,pin-drv = <0>; | ||
| 383 | }; | ||
| 371 | }; | 384 | }; |
diff --git a/arch/arm/boot/dts/imx53.dtsi b/arch/arm/boot/dts/imx53.dtsi index 64fa27b36be0..c6c58c1c00e3 100644 --- a/arch/arm/boot/dts/imx53.dtsi +++ b/arch/arm/boot/dts/imx53.dtsi | |||
| @@ -731,7 +731,7 @@ | |||
| 731 | compatible = "fsl,imx53-vpu"; | 731 | compatible = "fsl,imx53-vpu"; |
| 732 | reg = <0x63ff4000 0x1000>; | 732 | reg = <0x63ff4000 0x1000>; |
| 733 | interrupts = <9>; | 733 | interrupts = <9>; |
| 734 | clocks = <&clks IMX5_CLK_VPU_GATE>, | 734 | clocks = <&clks IMX5_CLK_VPU_REFERENCE_GATE>, |
| 735 | <&clks IMX5_CLK_VPU_GATE>; | 735 | <&clks IMX5_CLK_VPU_GATE>; |
| 736 | clock-names = "per", "ahb"; | 736 | clock-names = "per", "ahb"; |
| 737 | resets = <&src 1>; | 737 | resets = <&src 1>; |
diff --git a/arch/arm/boot/dts/imx6q-dmo-edmqmx6.dts b/arch/arm/boot/dts/imx6q-dmo-edmqmx6.dts index 8c1cb53464a0..4fa254347798 100644 --- a/arch/arm/boot/dts/imx6q-dmo-edmqmx6.dts +++ b/arch/arm/boot/dts/imx6q-dmo-edmqmx6.dts | |||
| @@ -119,7 +119,7 @@ | |||
| 119 | pinctrl-names = "default"; | 119 | pinctrl-names = "default"; |
| 120 | pinctrl-0 = <&pinctrl_enet>; | 120 | pinctrl-0 = <&pinctrl_enet>; |
| 121 | phy-mode = "rgmii"; | 121 | phy-mode = "rgmii"; |
| 122 | phy-reset-gpios = <&gpio3 23 0>; | 122 | phy-reset-gpios = <&gpio1 25 0>; |
| 123 | phy-supply = <&vgen2_1v2_eth>; | 123 | phy-supply = <&vgen2_1v2_eth>; |
| 124 | status = "okay"; | 124 | status = "okay"; |
| 125 | }; | 125 | }; |
| @@ -339,6 +339,7 @@ | |||
| 339 | MX6QDL_PAD_ENET_REF_CLK__ENET_TX_CLK 0x1b0b0 | 339 | MX6QDL_PAD_ENET_REF_CLK__ENET_TX_CLK 0x1b0b0 |
| 340 | MX6QDL_PAD_ENET_MDIO__ENET_MDIO 0x1b0b0 | 340 | MX6QDL_PAD_ENET_MDIO__ENET_MDIO 0x1b0b0 |
| 341 | MX6QDL_PAD_ENET_MDC__ENET_MDC 0x1b0b0 | 341 | MX6QDL_PAD_ENET_MDC__ENET_MDC 0x1b0b0 |
| 342 | MX6QDL_PAD_ENET_CRS_DV__GPIO1_IO25 0x1b0b0 | ||
| 342 | MX6QDL_PAD_GPIO_16__ENET_REF_CLK 0x4001b0a8 | 343 | MX6QDL_PAD_GPIO_16__ENET_REF_CLK 0x4001b0a8 |
| 343 | >; | 344 | >; |
| 344 | }; | 345 | }; |
diff --git a/arch/arm/boot/dts/imx6sx-pinfunc.h b/arch/arm/boot/dts/imx6sx-pinfunc.h index 3e0b816dac08..bb9c6b78cb97 100644 --- a/arch/arm/boot/dts/imx6sx-pinfunc.h +++ b/arch/arm/boot/dts/imx6sx-pinfunc.h | |||
| @@ -78,7 +78,7 @@ | |||
| 78 | #define MX6SX_PAD_GPIO1_IO07__USDHC2_WP 0x0030 0x0378 0x0870 0x1 0x1 | 78 | #define MX6SX_PAD_GPIO1_IO07__USDHC2_WP 0x0030 0x0378 0x0870 0x1 0x1 |
| 79 | #define MX6SX_PAD_GPIO1_IO07__ENET2_MDIO 0x0030 0x0378 0x0770 0x2 0x0 | 79 | #define MX6SX_PAD_GPIO1_IO07__ENET2_MDIO 0x0030 0x0378 0x0770 0x2 0x0 |
| 80 | #define MX6SX_PAD_GPIO1_IO07__AUDMUX_MCLK 0x0030 0x0378 0x0000 0x3 0x0 | 80 | #define MX6SX_PAD_GPIO1_IO07__AUDMUX_MCLK 0x0030 0x0378 0x0000 0x3 0x0 |
| 81 | #define MX6SX_PAD_GPIO1_IO07__UART1_CTS_B 0x0030 0x0378 0x082C 0x4 0x1 | 81 | #define MX6SX_PAD_GPIO1_IO07__UART1_CTS_B 0x0030 0x0378 0x0000 0x4 0x0 |
| 82 | #define MX6SX_PAD_GPIO1_IO07__GPIO1_IO_7 0x0030 0x0378 0x0000 0x5 0x0 | 82 | #define MX6SX_PAD_GPIO1_IO07__GPIO1_IO_7 0x0030 0x0378 0x0000 0x5 0x0 |
| 83 | #define MX6SX_PAD_GPIO1_IO07__SRC_EARLY_RESET 0x0030 0x0378 0x0000 0x6 0x0 | 83 | #define MX6SX_PAD_GPIO1_IO07__SRC_EARLY_RESET 0x0030 0x0378 0x0000 0x6 0x0 |
| 84 | #define MX6SX_PAD_GPIO1_IO07__DCIC2_OUT 0x0030 0x0378 0x0000 0x7 0x0 | 84 | #define MX6SX_PAD_GPIO1_IO07__DCIC2_OUT 0x0030 0x0378 0x0000 0x7 0x0 |
| @@ -96,7 +96,7 @@ | |||
| 96 | #define MX6SX_PAD_GPIO1_IO09__WDOG2_WDOG_B 0x0038 0x0380 0x0000 0x1 0x0 | 96 | #define MX6SX_PAD_GPIO1_IO09__WDOG2_WDOG_B 0x0038 0x0380 0x0000 0x1 0x0 |
| 97 | #define MX6SX_PAD_GPIO1_IO09__SDMA_EXT_EVENT_1 0x0038 0x0380 0x0820 0x2 0x0 | 97 | #define MX6SX_PAD_GPIO1_IO09__SDMA_EXT_EVENT_1 0x0038 0x0380 0x0820 0x2 0x0 |
| 98 | #define MX6SX_PAD_GPIO1_IO09__CCM_OUT0 0x0038 0x0380 0x0000 0x3 0x0 | 98 | #define MX6SX_PAD_GPIO1_IO09__CCM_OUT0 0x0038 0x0380 0x0000 0x3 0x0 |
| 99 | #define MX6SX_PAD_GPIO1_IO09__UART2_CTS_B 0x0038 0x0380 0x0834 0x4 0x1 | 99 | #define MX6SX_PAD_GPIO1_IO09__UART2_CTS_B 0x0038 0x0380 0x0000 0x4 0x0 |
| 100 | #define MX6SX_PAD_GPIO1_IO09__GPIO1_IO_9 0x0038 0x0380 0x0000 0x5 0x0 | 100 | #define MX6SX_PAD_GPIO1_IO09__GPIO1_IO_9 0x0038 0x0380 0x0000 0x5 0x0 |
| 101 | #define MX6SX_PAD_GPIO1_IO09__SRC_INT_BOOT 0x0038 0x0380 0x0000 0x6 0x0 | 101 | #define MX6SX_PAD_GPIO1_IO09__SRC_INT_BOOT 0x0038 0x0380 0x0000 0x6 0x0 |
| 102 | #define MX6SX_PAD_GPIO1_IO09__OBSERVE_MUX_OUT_4 0x0038 0x0380 0x0000 0x7 0x0 | 102 | #define MX6SX_PAD_GPIO1_IO09__OBSERVE_MUX_OUT_4 0x0038 0x0380 0x0000 0x7 0x0 |
| @@ -213,7 +213,7 @@ | |||
| 213 | #define MX6SX_PAD_CSI_DATA07__ESAI_TX3_RX2 0x0068 0x03B0 0x079C 0x1 0x1 | 213 | #define MX6SX_PAD_CSI_DATA07__ESAI_TX3_RX2 0x0068 0x03B0 0x079C 0x1 0x1 |
| 214 | #define MX6SX_PAD_CSI_DATA07__I2C4_SDA 0x0068 0x03B0 0x07C4 0x2 0x2 | 214 | #define MX6SX_PAD_CSI_DATA07__I2C4_SDA 0x0068 0x03B0 0x07C4 0x2 0x2 |
| 215 | #define MX6SX_PAD_CSI_DATA07__KPP_ROW_7 0x0068 0x03B0 0x07DC 0x3 0x0 | 215 | #define MX6SX_PAD_CSI_DATA07__KPP_ROW_7 0x0068 0x03B0 0x07DC 0x3 0x0 |
| 216 | #define MX6SX_PAD_CSI_DATA07__UART6_CTS_B 0x0068 0x03B0 0x0854 0x4 0x1 | 216 | #define MX6SX_PAD_CSI_DATA07__UART6_CTS_B 0x0068 0x03B0 0x0000 0x4 0x0 |
| 217 | #define MX6SX_PAD_CSI_DATA07__GPIO1_IO_21 0x0068 0x03B0 0x0000 0x5 0x0 | 217 | #define MX6SX_PAD_CSI_DATA07__GPIO1_IO_21 0x0068 0x03B0 0x0000 0x5 0x0 |
| 218 | #define MX6SX_PAD_CSI_DATA07__WEIM_DATA_16 0x0068 0x03B0 0x0000 0x6 0x0 | 218 | #define MX6SX_PAD_CSI_DATA07__WEIM_DATA_16 0x0068 0x03B0 0x0000 0x6 0x0 |
| 219 | #define MX6SX_PAD_CSI_DATA07__DCIC1_OUT 0x0068 0x03B0 0x0000 0x7 0x0 | 219 | #define MX6SX_PAD_CSI_DATA07__DCIC1_OUT 0x0068 0x03B0 0x0000 0x7 0x0 |
| @@ -254,7 +254,7 @@ | |||
| 254 | #define MX6SX_PAD_CSI_VSYNC__CSI1_VSYNC 0x0078 0x03C0 0x0708 0x0 0x0 | 254 | #define MX6SX_PAD_CSI_VSYNC__CSI1_VSYNC 0x0078 0x03C0 0x0708 0x0 0x0 |
| 255 | #define MX6SX_PAD_CSI_VSYNC__ESAI_TX5_RX0 0x0078 0x03C0 0x07A4 0x1 0x1 | 255 | #define MX6SX_PAD_CSI_VSYNC__ESAI_TX5_RX0 0x0078 0x03C0 0x07A4 0x1 0x1 |
| 256 | #define MX6SX_PAD_CSI_VSYNC__AUDMUX_AUD6_RXD 0x0078 0x03C0 0x0674 0x2 0x1 | 256 | #define MX6SX_PAD_CSI_VSYNC__AUDMUX_AUD6_RXD 0x0078 0x03C0 0x0674 0x2 0x1 |
| 257 | #define MX6SX_PAD_CSI_VSYNC__UART4_CTS_B 0x0078 0x03C0 0x0844 0x3 0x3 | 257 | #define MX6SX_PAD_CSI_VSYNC__UART4_CTS_B 0x0078 0x03C0 0x0000 0x3 0x0 |
| 258 | #define MX6SX_PAD_CSI_VSYNC__MQS_RIGHT 0x0078 0x03C0 0x0000 0x4 0x0 | 258 | #define MX6SX_PAD_CSI_VSYNC__MQS_RIGHT 0x0078 0x03C0 0x0000 0x4 0x0 |
| 259 | #define MX6SX_PAD_CSI_VSYNC__GPIO1_IO_25 0x0078 0x03C0 0x0000 0x5 0x0 | 259 | #define MX6SX_PAD_CSI_VSYNC__GPIO1_IO_25 0x0078 0x03C0 0x0000 0x5 0x0 |
| 260 | #define MX6SX_PAD_CSI_VSYNC__WEIM_DATA_24 0x0078 0x03C0 0x0000 0x6 0x0 | 260 | #define MX6SX_PAD_CSI_VSYNC__WEIM_DATA_24 0x0078 0x03C0 0x0000 0x6 0x0 |
| @@ -352,7 +352,7 @@ | |||
| 352 | #define MX6SX_PAD_ENET2_TX_CLK__ENET2_TX_CLK 0x00A0 0x03E8 0x0000 0x0 0x0 | 352 | #define MX6SX_PAD_ENET2_TX_CLK__ENET2_TX_CLK 0x00A0 0x03E8 0x0000 0x0 0x0 |
| 353 | #define MX6SX_PAD_ENET2_TX_CLK__ENET2_REF_CLK2 0x00A0 0x03E8 0x076C 0x1 0x1 | 353 | #define MX6SX_PAD_ENET2_TX_CLK__ENET2_REF_CLK2 0x00A0 0x03E8 0x076C 0x1 0x1 |
| 354 | #define MX6SX_PAD_ENET2_TX_CLK__I2C3_SDA 0x00A0 0x03E8 0x07BC 0x2 0x1 | 354 | #define MX6SX_PAD_ENET2_TX_CLK__I2C3_SDA 0x00A0 0x03E8 0x07BC 0x2 0x1 |
| 355 | #define MX6SX_PAD_ENET2_TX_CLK__UART1_CTS_B 0x00A0 0x03E8 0x082C 0x3 0x3 | 355 | #define MX6SX_PAD_ENET2_TX_CLK__UART1_CTS_B 0x00A0 0x03E8 0x0000 0x3 0x0 |
| 356 | #define MX6SX_PAD_ENET2_TX_CLK__MLB_CLK 0x00A0 0x03E8 0x07E8 0x4 0x1 | 356 | #define MX6SX_PAD_ENET2_TX_CLK__MLB_CLK 0x00A0 0x03E8 0x07E8 0x4 0x1 |
| 357 | #define MX6SX_PAD_ENET2_TX_CLK__GPIO2_IO_9 0x00A0 0x03E8 0x0000 0x5 0x0 | 357 | #define MX6SX_PAD_ENET2_TX_CLK__GPIO2_IO_9 0x00A0 0x03E8 0x0000 0x5 0x0 |
| 358 | #define MX6SX_PAD_ENET2_TX_CLK__USB_OTG2_PWR 0x00A0 0x03E8 0x0000 0x6 0x0 | 358 | #define MX6SX_PAD_ENET2_TX_CLK__USB_OTG2_PWR 0x00A0 0x03E8 0x0000 0x6 0x0 |
| @@ -404,7 +404,7 @@ | |||
| 404 | #define MX6SX_PAD_KEY_COL4__SAI2_RX_BCLK 0x00B4 0x03FC 0x0808 0x7 0x0 | 404 | #define MX6SX_PAD_KEY_COL4__SAI2_RX_BCLK 0x00B4 0x03FC 0x0808 0x7 0x0 |
| 405 | #define MX6SX_PAD_KEY_ROW0__KPP_ROW_0 0x00B8 0x0400 0x0000 0x0 0x0 | 405 | #define MX6SX_PAD_KEY_ROW0__KPP_ROW_0 0x00B8 0x0400 0x0000 0x0 0x0 |
| 406 | #define MX6SX_PAD_KEY_ROW0__USDHC3_WP 0x00B8 0x0400 0x0000 0x1 0x0 | 406 | #define MX6SX_PAD_KEY_ROW0__USDHC3_WP 0x00B8 0x0400 0x0000 0x1 0x0 |
| 407 | #define MX6SX_PAD_KEY_ROW0__UART6_CTS_B 0x00B8 0x0400 0x0854 0x2 0x3 | 407 | #define MX6SX_PAD_KEY_ROW0__UART6_CTS_B 0x00B8 0x0400 0x0000 0x2 0x0 |
| 408 | #define MX6SX_PAD_KEY_ROW0__ECSPI1_MOSI 0x00B8 0x0400 0x0718 0x3 0x0 | 408 | #define MX6SX_PAD_KEY_ROW0__ECSPI1_MOSI 0x00B8 0x0400 0x0718 0x3 0x0 |
| 409 | #define MX6SX_PAD_KEY_ROW0__AUDMUX_AUD5_TXD 0x00B8 0x0400 0x0660 0x4 0x0 | 409 | #define MX6SX_PAD_KEY_ROW0__AUDMUX_AUD5_TXD 0x00B8 0x0400 0x0660 0x4 0x0 |
| 410 | #define MX6SX_PAD_KEY_ROW0__GPIO2_IO_15 0x00B8 0x0400 0x0000 0x5 0x0 | 410 | #define MX6SX_PAD_KEY_ROW0__GPIO2_IO_15 0x00B8 0x0400 0x0000 0x5 0x0 |
| @@ -423,7 +423,7 @@ | |||
| 423 | #define MX6SX_PAD_KEY_ROW1__M4_NMI 0x00BC 0x0404 0x0000 0x8 0x0 | 423 | #define MX6SX_PAD_KEY_ROW1__M4_NMI 0x00BC 0x0404 0x0000 0x8 0x0 |
| 424 | #define MX6SX_PAD_KEY_ROW2__KPP_ROW_2 0x00C0 0x0408 0x0000 0x0 0x0 | 424 | #define MX6SX_PAD_KEY_ROW2__KPP_ROW_2 0x00C0 0x0408 0x0000 0x0 0x0 |
| 425 | #define MX6SX_PAD_KEY_ROW2__USDHC4_WP 0x00C0 0x0408 0x0878 0x1 0x1 | 425 | #define MX6SX_PAD_KEY_ROW2__USDHC4_WP 0x00C0 0x0408 0x0878 0x1 0x1 |
| 426 | #define MX6SX_PAD_KEY_ROW2__UART5_CTS_B 0x00C0 0x0408 0x084C 0x2 0x3 | 426 | #define MX6SX_PAD_KEY_ROW2__UART5_CTS_B 0x00C0 0x0408 0x0000 0x2 0x0 |
| 427 | #define MX6SX_PAD_KEY_ROW2__CAN1_RX 0x00C0 0x0408 0x068C 0x3 0x1 | 427 | #define MX6SX_PAD_KEY_ROW2__CAN1_RX 0x00C0 0x0408 0x068C 0x3 0x1 |
| 428 | #define MX6SX_PAD_KEY_ROW2__CANFD_RX1 0x00C0 0x0408 0x0694 0x4 0x1 | 428 | #define MX6SX_PAD_KEY_ROW2__CANFD_RX1 0x00C0 0x0408 0x0694 0x4 0x1 |
| 429 | #define MX6SX_PAD_KEY_ROW2__GPIO2_IO_17 0x00C0 0x0408 0x0000 0x5 0x0 | 429 | #define MX6SX_PAD_KEY_ROW2__GPIO2_IO_17 0x00C0 0x0408 0x0000 0x5 0x0 |
| @@ -815,7 +815,7 @@ | |||
| 815 | #define MX6SX_PAD_NAND_DATA05__RAWNAND_DATA05 0x0164 0x04AC 0x0000 0x0 0x0 | 815 | #define MX6SX_PAD_NAND_DATA05__RAWNAND_DATA05 0x0164 0x04AC 0x0000 0x0 0x0 |
| 816 | #define MX6SX_PAD_NAND_DATA05__USDHC2_DATA5 0x0164 0x04AC 0x0000 0x1 0x0 | 816 | #define MX6SX_PAD_NAND_DATA05__USDHC2_DATA5 0x0164 0x04AC 0x0000 0x1 0x0 |
| 817 | #define MX6SX_PAD_NAND_DATA05__QSPI2_B_DQS 0x0164 0x04AC 0x0000 0x2 0x0 | 817 | #define MX6SX_PAD_NAND_DATA05__QSPI2_B_DQS 0x0164 0x04AC 0x0000 0x2 0x0 |
| 818 | #define MX6SX_PAD_NAND_DATA05__UART3_CTS_B 0x0164 0x04AC 0x083C 0x3 0x1 | 818 | #define MX6SX_PAD_NAND_DATA05__UART3_CTS_B 0x0164 0x04AC 0x0000 0x3 0x0 |
| 819 | #define MX6SX_PAD_NAND_DATA05__AUDMUX_AUD4_RXC 0x0164 0x04AC 0x064C 0x4 0x0 | 819 | #define MX6SX_PAD_NAND_DATA05__AUDMUX_AUD4_RXC 0x0164 0x04AC 0x064C 0x4 0x0 |
| 820 | #define MX6SX_PAD_NAND_DATA05__GPIO4_IO_9 0x0164 0x04AC 0x0000 0x5 0x0 | 820 | #define MX6SX_PAD_NAND_DATA05__GPIO4_IO_9 0x0164 0x04AC 0x0000 0x5 0x0 |
| 821 | #define MX6SX_PAD_NAND_DATA05__WEIM_AD_5 0x0164 0x04AC 0x0000 0x6 0x0 | 821 | #define MX6SX_PAD_NAND_DATA05__WEIM_AD_5 0x0164 0x04AC 0x0000 0x6 0x0 |
| @@ -957,7 +957,7 @@ | |||
| 957 | #define MX6SX_PAD_QSPI1A_SS1_B__SIM_M_HADDR_12 0x019C 0x04E4 0x0000 0x7 0x0 | 957 | #define MX6SX_PAD_QSPI1A_SS1_B__SIM_M_HADDR_12 0x019C 0x04E4 0x0000 0x7 0x0 |
| 958 | #define MX6SX_PAD_QSPI1A_SS1_B__SDMA_DEBUG_PC_3 0x019C 0x04E4 0x0000 0x9 0x0 | 958 | #define MX6SX_PAD_QSPI1A_SS1_B__SDMA_DEBUG_PC_3 0x019C 0x04E4 0x0000 0x9 0x0 |
| 959 | #define MX6SX_PAD_QSPI1B_DATA0__QSPI1_B_DATA_0 0x01A0 0x04E8 0x0000 0x0 0x0 | 959 | #define MX6SX_PAD_QSPI1B_DATA0__QSPI1_B_DATA_0 0x01A0 0x04E8 0x0000 0x0 0x0 |
| 960 | #define MX6SX_PAD_QSPI1B_DATA0__UART3_CTS_B 0x01A0 0x04E8 0x083C 0x1 0x4 | 960 | #define MX6SX_PAD_QSPI1B_DATA0__UART3_CTS_B 0x01A0 0x04E8 0x0000 0x1 0x0 |
| 961 | #define MX6SX_PAD_QSPI1B_DATA0__ECSPI3_MOSI 0x01A0 0x04E8 0x0738 0x2 0x1 | 961 | #define MX6SX_PAD_QSPI1B_DATA0__ECSPI3_MOSI 0x01A0 0x04E8 0x0738 0x2 0x1 |
| 962 | #define MX6SX_PAD_QSPI1B_DATA0__ESAI_RX_FS 0x01A0 0x04E8 0x0778 0x3 0x2 | 962 | #define MX6SX_PAD_QSPI1B_DATA0__ESAI_RX_FS 0x01A0 0x04E8 0x0778 0x3 0x2 |
| 963 | #define MX6SX_PAD_QSPI1B_DATA0__CSI1_DATA_22 0x01A0 0x04E8 0x06F4 0x4 0x1 | 963 | #define MX6SX_PAD_QSPI1B_DATA0__CSI1_DATA_22 0x01A0 0x04E8 0x06F4 0x4 0x1 |
| @@ -1236,7 +1236,7 @@ | |||
| 1236 | #define MX6SX_PAD_SD1_DATA2__AUDMUX_AUD5_TXFS 0x0230 0x0578 0x0670 0x1 0x1 | 1236 | #define MX6SX_PAD_SD1_DATA2__AUDMUX_AUD5_TXFS 0x0230 0x0578 0x0670 0x1 0x1 |
| 1237 | #define MX6SX_PAD_SD1_DATA2__PWM3_OUT 0x0230 0x0578 0x0000 0x2 0x0 | 1237 | #define MX6SX_PAD_SD1_DATA2__PWM3_OUT 0x0230 0x0578 0x0000 0x2 0x0 |
| 1238 | #define MX6SX_PAD_SD1_DATA2__GPT_COMPARE2 0x0230 0x0578 0x0000 0x3 0x0 | 1238 | #define MX6SX_PAD_SD1_DATA2__GPT_COMPARE2 0x0230 0x0578 0x0000 0x3 0x0 |
| 1239 | #define MX6SX_PAD_SD1_DATA2__UART2_CTS_B 0x0230 0x0578 0x0834 0x4 0x2 | 1239 | #define MX6SX_PAD_SD1_DATA2__UART2_CTS_B 0x0230 0x0578 0x0000 0x4 0x0 |
| 1240 | #define MX6SX_PAD_SD1_DATA2__GPIO6_IO_4 0x0230 0x0578 0x0000 0x5 0x0 | 1240 | #define MX6SX_PAD_SD1_DATA2__GPIO6_IO_4 0x0230 0x0578 0x0000 0x5 0x0 |
| 1241 | #define MX6SX_PAD_SD1_DATA2__ECSPI4_RDY 0x0230 0x0578 0x0000 0x6 0x0 | 1241 | #define MX6SX_PAD_SD1_DATA2__ECSPI4_RDY 0x0230 0x0578 0x0000 0x6 0x0 |
| 1242 | #define MX6SX_PAD_SD1_DATA2__CCM_OUT0 0x0230 0x0578 0x0000 0x7 0x0 | 1242 | #define MX6SX_PAD_SD1_DATA2__CCM_OUT0 0x0230 0x0578 0x0000 0x7 0x0 |
| @@ -1315,7 +1315,7 @@ | |||
| 1315 | #define MX6SX_PAD_SD2_DATA3__VADC_CLAMP_CURRENT_3 0x024C 0x0594 0x0000 0x8 0x0 | 1315 | #define MX6SX_PAD_SD2_DATA3__VADC_CLAMP_CURRENT_3 0x024C 0x0594 0x0000 0x8 0x0 |
| 1316 | #define MX6SX_PAD_SD2_DATA3__MMDC_DEBUG_31 0x024C 0x0594 0x0000 0x9 0x0 | 1316 | #define MX6SX_PAD_SD2_DATA3__MMDC_DEBUG_31 0x024C 0x0594 0x0000 0x9 0x0 |
| 1317 | #define MX6SX_PAD_SD3_CLK__USDHC3_CLK 0x0250 0x0598 0x0000 0x0 0x0 | 1317 | #define MX6SX_PAD_SD3_CLK__USDHC3_CLK 0x0250 0x0598 0x0000 0x0 0x0 |
| 1318 | #define MX6SX_PAD_SD3_CLK__UART4_CTS_B 0x0250 0x0598 0x0844 0x1 0x0 | 1318 | #define MX6SX_PAD_SD3_CLK__UART4_CTS_B 0x0250 0x0598 0x0000 0x1 0x0 |
| 1319 | #define MX6SX_PAD_SD3_CLK__ECSPI4_SCLK 0x0250 0x0598 0x0740 0x2 0x0 | 1319 | #define MX6SX_PAD_SD3_CLK__ECSPI4_SCLK 0x0250 0x0598 0x0740 0x2 0x0 |
| 1320 | #define MX6SX_PAD_SD3_CLK__AUDMUX_AUD6_RXFS 0x0250 0x0598 0x0680 0x3 0x0 | 1320 | #define MX6SX_PAD_SD3_CLK__AUDMUX_AUD6_RXFS 0x0250 0x0598 0x0680 0x3 0x0 |
| 1321 | #define MX6SX_PAD_SD3_CLK__LCDIF2_VSYNC 0x0250 0x0598 0x0000 0x4 0x0 | 1321 | #define MX6SX_PAD_SD3_CLK__LCDIF2_VSYNC 0x0250 0x0598 0x0000 0x4 0x0 |
| @@ -1409,7 +1409,7 @@ | |||
| 1409 | #define MX6SX_PAD_SD3_DATA7__USDHC3_DATA7 0x0274 0x05BC 0x0000 0x0 0x0 | 1409 | #define MX6SX_PAD_SD3_DATA7__USDHC3_DATA7 0x0274 0x05BC 0x0000 0x0 0x0 |
| 1410 | #define MX6SX_PAD_SD3_DATA7__CAN1_RX 0x0274 0x05BC 0x068C 0x1 0x0 | 1410 | #define MX6SX_PAD_SD3_DATA7__CAN1_RX 0x0274 0x05BC 0x068C 0x1 0x0 |
| 1411 | #define MX6SX_PAD_SD3_DATA7__CANFD_RX1 0x0274 0x05BC 0x0694 0x2 0x0 | 1411 | #define MX6SX_PAD_SD3_DATA7__CANFD_RX1 0x0274 0x05BC 0x0694 0x2 0x0 |
| 1412 | #define MX6SX_PAD_SD3_DATA7__UART3_CTS_B 0x0274 0x05BC 0x083C 0x3 0x3 | 1412 | #define MX6SX_PAD_SD3_DATA7__UART3_CTS_B 0x0274 0x05BC 0x0000 0x3 0x0 |
| 1413 | #define MX6SX_PAD_SD3_DATA7__LCDIF2_DATA_5 0x0274 0x05BC 0x0000 0x4 0x0 | 1413 | #define MX6SX_PAD_SD3_DATA7__LCDIF2_DATA_5 0x0274 0x05BC 0x0000 0x4 0x0 |
| 1414 | #define MX6SX_PAD_SD3_DATA7__GPIO7_IO_9 0x0274 0x05BC 0x0000 0x5 0x0 | 1414 | #define MX6SX_PAD_SD3_DATA7__GPIO7_IO_9 0x0274 0x05BC 0x0000 0x5 0x0 |
| 1415 | #define MX6SX_PAD_SD3_DATA7__ENET1_1588_EVENT0_IN 0x0274 0x05BC 0x0000 0x6 0x0 | 1415 | #define MX6SX_PAD_SD3_DATA7__ENET1_1588_EVENT0_IN 0x0274 0x05BC 0x0000 0x6 0x0 |
| @@ -1510,7 +1510,7 @@ | |||
| 1510 | #define MX6SX_PAD_SD4_DATA6__SDMA_DEBUG_EVENT_CHANNEL_1 0x0298 0x05E0 0x0000 0x9 0x0 | 1510 | #define MX6SX_PAD_SD4_DATA6__SDMA_DEBUG_EVENT_CHANNEL_1 0x0298 0x05E0 0x0000 0x9 0x0 |
| 1511 | #define MX6SX_PAD_SD4_DATA7__USDHC4_DATA7 0x029C 0x05E4 0x0000 0x0 0x0 | 1511 | #define MX6SX_PAD_SD4_DATA7__USDHC4_DATA7 0x029C 0x05E4 0x0000 0x0 0x0 |
| 1512 | #define MX6SX_PAD_SD4_DATA7__RAWNAND_DATA08 0x029C 0x05E4 0x0000 0x1 0x0 | 1512 | #define MX6SX_PAD_SD4_DATA7__RAWNAND_DATA08 0x029C 0x05E4 0x0000 0x1 0x0 |
| 1513 | #define MX6SX_PAD_SD4_DATA7__UART5_CTS_B 0x029C 0x05E4 0x084C 0x2 0x1 | 1513 | #define MX6SX_PAD_SD4_DATA7__UART5_CTS_B 0x029C 0x05E4 0x0000 0x2 0x0 |
| 1514 | #define MX6SX_PAD_SD4_DATA7__ECSPI3_SS0 0x029C 0x05E4 0x073C 0x3 0x0 | 1514 | #define MX6SX_PAD_SD4_DATA7__ECSPI3_SS0 0x029C 0x05E4 0x073C 0x3 0x0 |
| 1515 | #define MX6SX_PAD_SD4_DATA7__LCDIF2_DATA_15 0x029C 0x05E4 0x0000 0x4 0x0 | 1515 | #define MX6SX_PAD_SD4_DATA7__LCDIF2_DATA_15 0x029C 0x05E4 0x0000 0x4 0x0 |
| 1516 | #define MX6SX_PAD_SD4_DATA7__GPIO6_IO_21 0x029C 0x05E4 0x0000 0x5 0x0 | 1516 | #define MX6SX_PAD_SD4_DATA7__GPIO6_IO_21 0x029C 0x05E4 0x0000 0x5 0x0 |
diff --git a/arch/arm/boot/dts/r8a7791-koelsch.dts b/arch/arm/boot/dts/r8a7791-koelsch.dts index 23486c081a69..be59014474b2 100644 --- a/arch/arm/boot/dts/r8a7791-koelsch.dts +++ b/arch/arm/boot/dts/r8a7791-koelsch.dts | |||
| @@ -275,11 +275,6 @@ | |||
| 275 | renesas,function = "msiof0"; | 275 | renesas,function = "msiof0"; |
| 276 | }; | 276 | }; |
| 277 | 277 | ||
| 278 | i2c6_pins: i2c6 { | ||
| 279 | renesas,groups = "i2c6"; | ||
| 280 | renesas,function = "i2c6"; | ||
| 281 | }; | ||
| 282 | |||
| 283 | usb0_pins: usb0 { | 278 | usb0_pins: usb0 { |
| 284 | renesas,groups = "usb0"; | 279 | renesas,groups = "usb0"; |
| 285 | renesas,function = "usb0"; | 280 | renesas,function = "usb0"; |
| @@ -420,8 +415,6 @@ | |||
| 420 | }; | 415 | }; |
| 421 | 416 | ||
| 422 | &i2c6 { | 417 | &i2c6 { |
| 423 | pinctrl-names = "default"; | ||
| 424 | pinctrl-0 = <&i2c6_pins>; | ||
| 425 | status = "okay"; | 418 | status = "okay"; |
| 426 | clock-frequency = <100000>; | 419 | clock-frequency = <100000>; |
| 427 | 420 | ||
diff --git a/arch/arm/boot/dts/rk3066a-bqcurie2.dts b/arch/arm/boot/dts/rk3066a-bqcurie2.dts index 042f821d9e4d..c9d912da6141 100644 --- a/arch/arm/boot/dts/rk3066a-bqcurie2.dts +++ b/arch/arm/boot/dts/rk3066a-bqcurie2.dts | |||
| @@ -149,6 +149,8 @@ | |||
| 149 | &mmc0 { /* sdmmc */ | 149 | &mmc0 { /* sdmmc */ |
| 150 | num-slots = <1>; | 150 | num-slots = <1>; |
| 151 | status = "okay"; | 151 | status = "okay"; |
| 152 | pinctrl-names = "default"; | ||
| 153 | pinctrl-0 = <&sd0_clk>, <&sd0_cmd>, <&sd0_cd>, <&sd0_bus4>; | ||
| 152 | vmmc-supply = <&vcc_sd0>; | 154 | vmmc-supply = <&vcc_sd0>; |
| 153 | 155 | ||
| 154 | slot@0 { | 156 | slot@0 { |
diff --git a/arch/arm/boot/dts/rk3188-radxarock.dts b/arch/arm/boot/dts/rk3188-radxarock.dts index 171b610db709..5e4e3c238b2d 100644 --- a/arch/arm/boot/dts/rk3188-radxarock.dts +++ b/arch/arm/boot/dts/rk3188-radxarock.dts | |||
| @@ -179,6 +179,8 @@ | |||
| 179 | &mmc0 { | 179 | &mmc0 { |
| 180 | num-slots = <1>; | 180 | num-slots = <1>; |
| 181 | status = "okay"; | 181 | status = "okay"; |
| 182 | pinctrl-names = "default"; | ||
| 183 | pinctrl-0 = <&sd0_clk>, <&sd0_cmd>, <&sd0_cd>, <&sd0_bus4>; | ||
| 182 | vmmc-supply = <&vcc_sd0>; | 184 | vmmc-supply = <&vcc_sd0>; |
| 183 | 185 | ||
| 184 | slot@0 { | 186 | slot@0 { |
diff --git a/arch/arm/boot/dts/sun6i-a31.dtsi b/arch/arm/boot/dts/sun6i-a31.dtsi index 44b07e512c24..e06fbfc55bb7 100644 --- a/arch/arm/boot/dts/sun6i-a31.dtsi +++ b/arch/arm/boot/dts/sun6i-a31.dtsi | |||
| @@ -660,6 +660,8 @@ | |||
| 660 | clock-frequency = <100000>; | 660 | clock-frequency = <100000>; |
| 661 | resets = <&apb2_rst 0>; | 661 | resets = <&apb2_rst 0>; |
| 662 | status = "disabled"; | 662 | status = "disabled"; |
| 663 | #address-cells = <1>; | ||
| 664 | #size-cells = <0>; | ||
| 663 | }; | 665 | }; |
| 664 | 666 | ||
| 665 | i2c1: i2c@01c2b000 { | 667 | i2c1: i2c@01c2b000 { |
| @@ -670,6 +672,8 @@ | |||
| 670 | clock-frequency = <100000>; | 672 | clock-frequency = <100000>; |
| 671 | resets = <&apb2_rst 1>; | 673 | resets = <&apb2_rst 1>; |
| 672 | status = "disabled"; | 674 | status = "disabled"; |
| 675 | #address-cells = <1>; | ||
| 676 | #size-cells = <0>; | ||
| 673 | }; | 677 | }; |
| 674 | 678 | ||
| 675 | i2c2: i2c@01c2b400 { | 679 | i2c2: i2c@01c2b400 { |
| @@ -680,6 +684,8 @@ | |||
| 680 | clock-frequency = <100000>; | 684 | clock-frequency = <100000>; |
| 681 | resets = <&apb2_rst 2>; | 685 | resets = <&apb2_rst 2>; |
| 682 | status = "disabled"; | 686 | status = "disabled"; |
| 687 | #address-cells = <1>; | ||
| 688 | #size-cells = <0>; | ||
| 683 | }; | 689 | }; |
| 684 | 690 | ||
| 685 | i2c3: i2c@01c2b800 { | 691 | i2c3: i2c@01c2b800 { |
| @@ -690,6 +696,8 @@ | |||
| 690 | clock-frequency = <100000>; | 696 | clock-frequency = <100000>; |
| 691 | resets = <&apb2_rst 3>; | 697 | resets = <&apb2_rst 3>; |
| 692 | status = "disabled"; | 698 | status = "disabled"; |
| 699 | #address-cells = <1>; | ||
| 700 | #size-cells = <0>; | ||
| 693 | }; | 701 | }; |
| 694 | 702 | ||
| 695 | gmac: ethernet@01c30000 { | 703 | gmac: ethernet@01c30000 { |
diff --git a/arch/arm/boot/dts/tegra30-apalis.dtsi b/arch/arm/boot/dts/tegra30-apalis.dtsi index 8adaa7871dd3..a5446cba9804 100644 --- a/arch/arm/boot/dts/tegra30-apalis.dtsi +++ b/arch/arm/boot/dts/tegra30-apalis.dtsi | |||
| @@ -423,7 +423,7 @@ | |||
| 423 | vcc4-supply = <&sys_3v3_reg>; | 423 | vcc4-supply = <&sys_3v3_reg>; |
| 424 | vcc5-supply = <&sys_3v3_reg>; | 424 | vcc5-supply = <&sys_3v3_reg>; |
| 425 | vcc6-supply = <&vio_reg>; | 425 | vcc6-supply = <&vio_reg>; |
| 426 | vcc7-supply = <&sys_5v0_reg>; | 426 | vcc7-supply = <&charge_pump_5v0_reg>; |
| 427 | vccio-supply = <&sys_3v3_reg>; | 427 | vccio-supply = <&sys_3v3_reg>; |
| 428 | 428 | ||
| 429 | regulators { | 429 | regulators { |
| @@ -674,5 +674,14 @@ | |||
| 674 | regulator-max-microvolt = <3300000>; | 674 | regulator-max-microvolt = <3300000>; |
| 675 | regulator-always-on; | 675 | regulator-always-on; |
| 676 | }; | 676 | }; |
| 677 | |||
| 678 | charge_pump_5v0_reg: regulator@101 { | ||
| 679 | compatible = "regulator-fixed"; | ||
| 680 | reg = <101>; | ||
| 681 | regulator-name = "5v0"; | ||
| 682 | regulator-min-microvolt = <5000000>; | ||
| 683 | regulator-max-microvolt = <5000000>; | ||
| 684 | regulator-always-on; | ||
| 685 | }; | ||
| 677 | }; | 686 | }; |
| 678 | }; | 687 | }; |
diff --git a/arch/arm/boot/dts/tegra30-colibri.dtsi b/arch/arm/boot/dts/tegra30-colibri.dtsi index bf16f8e65627..c4ed1bec4d92 100644 --- a/arch/arm/boot/dts/tegra30-colibri.dtsi +++ b/arch/arm/boot/dts/tegra30-colibri.dtsi | |||
| @@ -201,7 +201,7 @@ | |||
| 201 | vcc4-supply = <&sys_3v3_reg>; | 201 | vcc4-supply = <&sys_3v3_reg>; |
| 202 | vcc5-supply = <&sys_3v3_reg>; | 202 | vcc5-supply = <&sys_3v3_reg>; |
| 203 | vcc6-supply = <&vio_reg>; | 203 | vcc6-supply = <&vio_reg>; |
| 204 | vcc7-supply = <&sys_5v0_reg>; | 204 | vcc7-supply = <&charge_pump_5v0_reg>; |
| 205 | vccio-supply = <&sys_3v3_reg>; | 205 | vccio-supply = <&sys_3v3_reg>; |
| 206 | 206 | ||
| 207 | regulators { | 207 | regulators { |
| @@ -373,5 +373,14 @@ | |||
| 373 | regulator-max-microvolt = <3300000>; | 373 | regulator-max-microvolt = <3300000>; |
| 374 | regulator-always-on; | 374 | regulator-always-on; |
| 375 | }; | 375 | }; |
| 376 | |||
| 377 | charge_pump_5v0_reg: regulator@101 { | ||
| 378 | compatible = "regulator-fixed"; | ||
| 379 | reg = <101>; | ||
| 380 | regulator-name = "5v0"; | ||
| 381 | regulator-min-microvolt = <5000000>; | ||
| 382 | regulator-max-microvolt = <5000000>; | ||
| 383 | regulator-always-on; | ||
| 384 | }; | ||
| 376 | }; | 385 | }; |
| 377 | }; | 386 | }; |
diff --git a/arch/arm/boot/dts/vf610-twr.dts b/arch/arm/boot/dts/vf610-twr.dts index 11d733406c7e..b8a5e8c68f06 100644 --- a/arch/arm/boot/dts/vf610-twr.dts +++ b/arch/arm/boot/dts/vf610-twr.dts | |||
| @@ -168,7 +168,7 @@ | |||
| 168 | }; | 168 | }; |
| 169 | 169 | ||
| 170 | pinctrl_esdhc1: esdhc1grp { | 170 | pinctrl_esdhc1: esdhc1grp { |
| 171 | fsl,fsl,pins = < | 171 | fsl,pins = < |
| 172 | VF610_PAD_PTA24__ESDHC1_CLK 0x31ef | 172 | VF610_PAD_PTA24__ESDHC1_CLK 0x31ef |
| 173 | VF610_PAD_PTA25__ESDHC1_CMD 0x31ef | 173 | VF610_PAD_PTA25__ESDHC1_CMD 0x31ef |
| 174 | VF610_PAD_PTA26__ESDHC1_DAT0 0x31ef | 174 | VF610_PAD_PTA26__ESDHC1_DAT0 0x31ef |
diff --git a/arch/arm/mach-imx/Kconfig b/arch/arm/mach-imx/Kconfig index 9de84a215abd..be9a51afe05a 100644 --- a/arch/arm/mach-imx/Kconfig +++ b/arch/arm/mach-imx/Kconfig | |||
| @@ -85,7 +85,6 @@ config SOC_IMX25 | |||
| 85 | 85 | ||
| 86 | config SOC_IMX27 | 86 | config SOC_IMX27 |
| 87 | bool | 87 | bool |
| 88 | select ARCH_HAS_OPP | ||
| 89 | select CPU_ARM926T | 88 | select CPU_ARM926T |
| 90 | select IMX_HAVE_IOMUX_V1 | 89 | select IMX_HAVE_IOMUX_V1 |
| 91 | select MXC_AVIC | 90 | select MXC_AVIC |
| @@ -659,7 +658,6 @@ comment "Device tree only" | |||
| 659 | 658 | ||
| 660 | config SOC_IMX5 | 659 | config SOC_IMX5 |
| 661 | bool | 660 | bool |
| 662 | select ARCH_HAS_OPP | ||
| 663 | select HAVE_IMX_SRC | 661 | select HAVE_IMX_SRC |
| 664 | select MXC_TZIC | 662 | select MXC_TZIC |
| 665 | 663 | ||
diff --git a/arch/arm/mach-imx/Makefile b/arch/arm/mach-imx/Makefile index ac88599ca080..23c02932bf84 100644 --- a/arch/arm/mach-imx/Makefile +++ b/arch/arm/mach-imx/Makefile | |||
| @@ -93,9 +93,11 @@ obj-$(CONFIG_HAVE_IMX_ANATOP) += anatop.o | |||
| 93 | obj-$(CONFIG_HAVE_IMX_GPC) += gpc.o | 93 | obj-$(CONFIG_HAVE_IMX_GPC) += gpc.o |
| 94 | obj-$(CONFIG_HAVE_IMX_MMDC) += mmdc.o | 94 | obj-$(CONFIG_HAVE_IMX_MMDC) += mmdc.o |
| 95 | obj-$(CONFIG_HAVE_IMX_SRC) += src.o | 95 | obj-$(CONFIG_HAVE_IMX_SRC) += src.o |
| 96 | ifdef CONFIG_SOC_IMX6 | ||
| 96 | AFLAGS_headsmp.o :=-Wa,-march=armv7-a | 97 | AFLAGS_headsmp.o :=-Wa,-march=armv7-a |
| 97 | obj-$(CONFIG_SMP) += headsmp.o platsmp.o | 98 | obj-$(CONFIG_SMP) += headsmp.o platsmp.o |
| 98 | obj-$(CONFIG_HOTPLUG_CPU) += hotplug.o | 99 | obj-$(CONFIG_HOTPLUG_CPU) += hotplug.o |
| 100 | endif | ||
| 99 | obj-$(CONFIG_SOC_IMX6Q) += clk-imx6q.o mach-imx6q.o | 101 | obj-$(CONFIG_SOC_IMX6Q) += clk-imx6q.o mach-imx6q.o |
| 100 | obj-$(CONFIG_SOC_IMX6SL) += clk-imx6sl.o mach-imx6sl.o | 102 | obj-$(CONFIG_SOC_IMX6SL) += clk-imx6sl.o mach-imx6sl.o |
| 101 | obj-$(CONFIG_SOC_IMX6SX) += clk-imx6sx.o mach-imx6sx.o | 103 | obj-$(CONFIG_SOC_IMX6SX) += clk-imx6sx.o mach-imx6sx.o |
diff --git a/arch/arm/mach-imx/clk-imx6q.c b/arch/arm/mach-imx/clk-imx6q.c index 6cceb7765c14..29d412975aff 100644 --- a/arch/arm/mach-imx/clk-imx6q.c +++ b/arch/arm/mach-imx/clk-imx6q.c | |||
| @@ -194,6 +194,10 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node) | |||
| 194 | clk[IMX6QDL_CLK_PLL3_80M] = imx_clk_fixed_factor("pll3_80m", "pll3_usb_otg", 1, 6); | 194 | clk[IMX6QDL_CLK_PLL3_80M] = imx_clk_fixed_factor("pll3_80m", "pll3_usb_otg", 1, 6); |
| 195 | clk[IMX6QDL_CLK_PLL3_60M] = imx_clk_fixed_factor("pll3_60m", "pll3_usb_otg", 1, 8); | 195 | clk[IMX6QDL_CLK_PLL3_60M] = imx_clk_fixed_factor("pll3_60m", "pll3_usb_otg", 1, 8); |
| 196 | clk[IMX6QDL_CLK_TWD] = imx_clk_fixed_factor("twd", "arm", 1, 2); | 196 | clk[IMX6QDL_CLK_TWD] = imx_clk_fixed_factor("twd", "arm", 1, 2); |
| 197 | if (cpu_is_imx6dl()) { | ||
| 198 | clk[IMX6QDL_CLK_GPU2D_AXI] = imx_clk_fixed_factor("gpu2d_axi", "mmdc_ch0_axi_podf", 1, 1); | ||
| 199 | clk[IMX6QDL_CLK_GPU3D_AXI] = imx_clk_fixed_factor("gpu3d_axi", "mmdc_ch0_axi_podf", 1, 1); | ||
| 200 | } | ||
| 197 | 201 | ||
| 198 | clk[IMX6QDL_CLK_PLL4_POST_DIV] = clk_register_divider_table(NULL, "pll4_post_div", "pll4_audio", CLK_SET_RATE_PARENT, base + 0x70, 19, 2, 0, post_div_table, &imx_ccm_lock); | 202 | clk[IMX6QDL_CLK_PLL4_POST_DIV] = clk_register_divider_table(NULL, "pll4_post_div", "pll4_audio", CLK_SET_RATE_PARENT, base + 0x70, 19, 2, 0, post_div_table, &imx_ccm_lock); |
| 199 | clk[IMX6QDL_CLK_PLL4_AUDIO_DIV] = clk_register_divider(NULL, "pll4_audio_div", "pll4_post_div", CLK_SET_RATE_PARENT, base + 0x170, 15, 1, 0, &imx_ccm_lock); | 203 | clk[IMX6QDL_CLK_PLL4_AUDIO_DIV] = clk_register_divider(NULL, "pll4_audio_div", "pll4_post_div", CLK_SET_RATE_PARENT, base + 0x170, 15, 1, 0, &imx_ccm_lock); |
| @@ -217,8 +221,10 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node) | |||
| 217 | clk[IMX6QDL_CLK_ESAI_SEL] = imx_clk_mux("esai_sel", base + 0x20, 19, 2, audio_sels, ARRAY_SIZE(audio_sels)); | 221 | clk[IMX6QDL_CLK_ESAI_SEL] = imx_clk_mux("esai_sel", base + 0x20, 19, 2, audio_sels, ARRAY_SIZE(audio_sels)); |
| 218 | clk[IMX6QDL_CLK_ASRC_SEL] = imx_clk_mux("asrc_sel", base + 0x30, 7, 2, audio_sels, ARRAY_SIZE(audio_sels)); | 222 | clk[IMX6QDL_CLK_ASRC_SEL] = imx_clk_mux("asrc_sel", base + 0x30, 7, 2, audio_sels, ARRAY_SIZE(audio_sels)); |
| 219 | clk[IMX6QDL_CLK_SPDIF_SEL] = imx_clk_mux("spdif_sel", base + 0x30, 20, 2, audio_sels, ARRAY_SIZE(audio_sels)); | 223 | clk[IMX6QDL_CLK_SPDIF_SEL] = imx_clk_mux("spdif_sel", base + 0x30, 20, 2, audio_sels, ARRAY_SIZE(audio_sels)); |
| 220 | clk[IMX6QDL_CLK_GPU2D_AXI] = imx_clk_mux("gpu2d_axi", base + 0x18, 0, 1, gpu_axi_sels, ARRAY_SIZE(gpu_axi_sels)); | 224 | if (cpu_is_imx6q()) { |
| 221 | clk[IMX6QDL_CLK_GPU3D_AXI] = imx_clk_mux("gpu3d_axi", base + 0x18, 1, 1, gpu_axi_sels, ARRAY_SIZE(gpu_axi_sels)); | 225 | clk[IMX6QDL_CLK_GPU2D_AXI] = imx_clk_mux("gpu2d_axi", base + 0x18, 0, 1, gpu_axi_sels, ARRAY_SIZE(gpu_axi_sels)); |
| 226 | clk[IMX6QDL_CLK_GPU3D_AXI] = imx_clk_mux("gpu3d_axi", base + 0x18, 1, 1, gpu_axi_sels, ARRAY_SIZE(gpu_axi_sels)); | ||
| 227 | } | ||
| 222 | clk[IMX6QDL_CLK_GPU2D_CORE_SEL] = imx_clk_mux("gpu2d_core_sel", base + 0x18, 16, 2, gpu2d_core_sels, ARRAY_SIZE(gpu2d_core_sels)); | 228 | clk[IMX6QDL_CLK_GPU2D_CORE_SEL] = imx_clk_mux("gpu2d_core_sel", base + 0x18, 16, 2, gpu2d_core_sels, ARRAY_SIZE(gpu2d_core_sels)); |
| 223 | clk[IMX6QDL_CLK_GPU3D_CORE_SEL] = imx_clk_mux("gpu3d_core_sel", base + 0x18, 4, 2, gpu3d_core_sels, ARRAY_SIZE(gpu3d_core_sels)); | 229 | clk[IMX6QDL_CLK_GPU3D_CORE_SEL] = imx_clk_mux("gpu3d_core_sel", base + 0x18, 4, 2, gpu3d_core_sels, ARRAY_SIZE(gpu3d_core_sels)); |
| 224 | clk[IMX6QDL_CLK_GPU3D_SHADER_SEL] = imx_clk_mux("gpu3d_shader_sel", base + 0x18, 8, 2, gpu3d_shader_sels, ARRAY_SIZE(gpu3d_shader_sels)); | 230 | clk[IMX6QDL_CLK_GPU3D_SHADER_SEL] = imx_clk_mux("gpu3d_shader_sel", base + 0x18, 8, 2, gpu3d_shader_sels, ARRAY_SIZE(gpu3d_shader_sels)); |
diff --git a/arch/arm/mach-imx/suspend-imx6.S b/arch/arm/mach-imx/suspend-imx6.S index 74b50f1982db..ca4ea2daf25b 100644 --- a/arch/arm/mach-imx/suspend-imx6.S +++ b/arch/arm/mach-imx/suspend-imx6.S | |||
| @@ -173,6 +173,8 @@ ENTRY(imx6_suspend) | |||
| 173 | ldr r6, [r11, #0x0] | 173 | ldr r6, [r11, #0x0] |
| 174 | ldr r11, [r0, #PM_INFO_MX6Q_GPC_V_OFFSET] | 174 | ldr r11, [r0, #PM_INFO_MX6Q_GPC_V_OFFSET] |
| 175 | ldr r6, [r11, #0x0] | 175 | ldr r6, [r11, #0x0] |
| 176 | ldr r11, [r0, #PM_INFO_MX6Q_IOMUXC_V_OFFSET] | ||
| 177 | ldr r6, [r11, #0x0] | ||
| 176 | 178 | ||
| 177 | /* use r11 to store the IO address */ | 179 | /* use r11 to store the IO address */ |
| 178 | ldr r11, [r0, #PM_INFO_MX6Q_SRC_V_OFFSET] | 180 | ldr r11, [r0, #PM_INFO_MX6Q_SRC_V_OFFSET] |
diff --git a/arch/arm/mach-shmobile/Kconfig b/arch/arm/mach-shmobile/Kconfig index e15dff790dbb..1e6c51c7c2d5 100644 --- a/arch/arm/mach-shmobile/Kconfig +++ b/arch/arm/mach-shmobile/Kconfig | |||
| @@ -75,6 +75,7 @@ config ARCH_SH7372 | |||
| 75 | select ARM_CPU_SUSPEND if PM || CPU_IDLE | 75 | select ARM_CPU_SUSPEND if PM || CPU_IDLE |
| 76 | select CPU_V7 | 76 | select CPU_V7 |
| 77 | select SH_CLK_CPG | 77 | select SH_CLK_CPG |
| 78 | select SH_INTC | ||
| 78 | select SYS_SUPPORTS_SH_CMT | 79 | select SYS_SUPPORTS_SH_CMT |
| 79 | select SYS_SUPPORTS_SH_TMU | 80 | select SYS_SUPPORTS_SH_TMU |
| 80 | 81 | ||
| @@ -85,6 +86,7 @@ config ARCH_SH73A0 | |||
| 85 | select CPU_V7 | 86 | select CPU_V7 |
| 86 | select I2C | 87 | select I2C |
| 87 | select SH_CLK_CPG | 88 | select SH_CLK_CPG |
| 89 | select SH_INTC | ||
| 88 | select RENESAS_INTC_IRQPIN | 90 | select RENESAS_INTC_IRQPIN |
| 89 | select SYS_SUPPORTS_SH_CMT | 91 | select SYS_SUPPORTS_SH_CMT |
| 90 | select SYS_SUPPORTS_SH_TMU | 92 | select SYS_SUPPORTS_SH_TMU |
diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile index 57833546bf00..2df5e5daeebe 100644 --- a/arch/arm64/Makefile +++ b/arch/arm64/Makefile | |||
| @@ -39,7 +39,7 @@ head-y := arch/arm64/kernel/head.o | |||
| 39 | 39 | ||
| 40 | # The byte offset of the kernel image in RAM from the start of RAM. | 40 | # The byte offset of the kernel image in RAM from the start of RAM. |
| 41 | ifeq ($(CONFIG_ARM64_RANDOMIZE_TEXT_OFFSET), y) | 41 | ifeq ($(CONFIG_ARM64_RANDOMIZE_TEXT_OFFSET), y) |
| 42 | TEXT_OFFSET := $(shell awk 'BEGIN {srand(); printf "0x%04x0\n", int(65535 * rand())}') | 42 | TEXT_OFFSET := $(shell awk 'BEGIN {srand(); printf "0x%03x000\n", int(512 * rand())}') |
| 43 | else | 43 | else |
| 44 | TEXT_OFFSET := 0x00080000 | 44 | TEXT_OFFSET := 0x00080000 |
| 45 | endif | 45 | endif |
diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig index 1e52b741d806..d92ef3c54161 100644 --- a/arch/arm64/configs/defconfig +++ b/arch/arm64/configs/defconfig | |||
| @@ -64,6 +64,8 @@ CONFIG_VIRTIO_BLK=y | |||
| 64 | CONFIG_BLK_DEV_SD=y | 64 | CONFIG_BLK_DEV_SD=y |
| 65 | # CONFIG_SCSI_LOWLEVEL is not set | 65 | # CONFIG_SCSI_LOWLEVEL is not set |
| 66 | CONFIG_ATA=y | 66 | CONFIG_ATA=y |
| 67 | CONFIG_AHCI_XGENE=y | ||
| 68 | CONFIG_PHY_XGENE=y | ||
| 67 | CONFIG_PATA_PLATFORM=y | 69 | CONFIG_PATA_PLATFORM=y |
| 68 | CONFIG_PATA_OF_PLATFORM=y | 70 | CONFIG_PATA_OF_PLATFORM=y |
| 69 | CONFIG_NETDEVICES=y | 71 | CONFIG_NETDEVICES=y |
| @@ -71,6 +73,7 @@ CONFIG_TUN=y | |||
| 71 | CONFIG_VIRTIO_NET=y | 73 | CONFIG_VIRTIO_NET=y |
| 72 | CONFIG_SMC91X=y | 74 | CONFIG_SMC91X=y |
| 73 | CONFIG_SMSC911X=y | 75 | CONFIG_SMSC911X=y |
| 76 | CONFIG_NET_XGENE=y | ||
| 74 | # CONFIG_WLAN is not set | 77 | # CONFIG_WLAN is not set |
| 75 | CONFIG_INPUT_EVDEV=y | 78 | CONFIG_INPUT_EVDEV=y |
| 76 | # CONFIG_SERIO_SERPORT is not set | 79 | # CONFIG_SERIO_SERPORT is not set |
diff --git a/arch/arm64/include/asm/sparsemem.h b/arch/arm64/include/asm/sparsemem.h index 1be62bcb9d47..74a9d301819f 100644 --- a/arch/arm64/include/asm/sparsemem.h +++ b/arch/arm64/include/asm/sparsemem.h | |||
| @@ -17,7 +17,7 @@ | |||
| 17 | #define __ASM_SPARSEMEM_H | 17 | #define __ASM_SPARSEMEM_H |
| 18 | 18 | ||
| 19 | #ifdef CONFIG_SPARSEMEM | 19 | #ifdef CONFIG_SPARSEMEM |
| 20 | #define MAX_PHYSMEM_BITS 40 | 20 | #define MAX_PHYSMEM_BITS 48 |
| 21 | #define SECTION_SIZE_BITS 30 | 21 | #define SECTION_SIZE_BITS 30 |
| 22 | #endif | 22 | #endif |
| 23 | 23 | ||
diff --git a/arch/arm64/include/asm/unistd.h b/arch/arm64/include/asm/unistd.h index 4bc95d27e063..6d2bf419431d 100644 --- a/arch/arm64/include/asm/unistd.h +++ b/arch/arm64/include/asm/unistd.h | |||
| @@ -41,7 +41,7 @@ | |||
| 41 | #define __ARM_NR_compat_cacheflush (__ARM_NR_COMPAT_BASE+2) | 41 | #define __ARM_NR_compat_cacheflush (__ARM_NR_COMPAT_BASE+2) |
| 42 | #define __ARM_NR_compat_set_tls (__ARM_NR_COMPAT_BASE+5) | 42 | #define __ARM_NR_compat_set_tls (__ARM_NR_COMPAT_BASE+5) |
| 43 | 43 | ||
| 44 | #define __NR_compat_syscalls 383 | 44 | #define __NR_compat_syscalls 386 |
| 45 | #endif | 45 | #endif |
| 46 | 46 | ||
| 47 | #define __ARCH_WANT_SYS_CLONE | 47 | #define __ARCH_WANT_SYS_CLONE |
diff --git a/arch/arm64/include/asm/unistd32.h b/arch/arm64/include/asm/unistd32.h index e242600c4046..da1f06b535e3 100644 --- a/arch/arm64/include/asm/unistd32.h +++ b/arch/arm64/include/asm/unistd32.h | |||
| @@ -787,3 +787,8 @@ __SYSCALL(__NR_sched_setattr, sys_sched_setattr) | |||
| 787 | __SYSCALL(__NR_sched_getattr, sys_sched_getattr) | 787 | __SYSCALL(__NR_sched_getattr, sys_sched_getattr) |
| 788 | #define __NR_renameat2 382 | 788 | #define __NR_renameat2 382 |
| 789 | __SYSCALL(__NR_renameat2, sys_renameat2) | 789 | __SYSCALL(__NR_renameat2, sys_renameat2) |
| 790 | /* 383 for seccomp */ | ||
| 791 | #define __NR_getrandom 384 | ||
| 792 | __SYSCALL(__NR_getrandom, sys_getrandom) | ||
| 793 | #define __NR_memfd_create 385 | ||
| 794 | __SYSCALL(__NR_memfd_create, sys_memfd_create) | ||
diff --git a/arch/arm64/kernel/cpuinfo.c b/arch/arm64/kernel/cpuinfo.c index f798f66634af..177169623026 100644 --- a/arch/arm64/kernel/cpuinfo.c +++ b/arch/arm64/kernel/cpuinfo.c | |||
| @@ -49,7 +49,7 @@ static void cpuinfo_detect_icache_policy(struct cpuinfo_arm64 *info) | |||
| 49 | 49 | ||
| 50 | if (l1ip != ICACHE_POLICY_PIPT) | 50 | if (l1ip != ICACHE_POLICY_PIPT) |
| 51 | set_bit(ICACHEF_ALIASING, &__icache_flags); | 51 | set_bit(ICACHEF_ALIASING, &__icache_flags); |
| 52 | if (l1ip == ICACHE_POLICY_AIVIVT); | 52 | if (l1ip == ICACHE_POLICY_AIVIVT) |
| 53 | set_bit(ICACHEF_AIVIVT, &__icache_flags); | 53 | set_bit(ICACHEF_AIVIVT, &__icache_flags); |
| 54 | 54 | ||
| 55 | pr_info("Detected %s I-cache on CPU%d\n", icache_policy_str[l1ip], cpu); | 55 | pr_info("Detected %s I-cache on CPU%d\n", icache_policy_str[l1ip], cpu); |
diff --git a/arch/arm64/kernel/efi.c b/arch/arm64/kernel/efi.c index e72f3100958f..03aaa99e1ea0 100644 --- a/arch/arm64/kernel/efi.c +++ b/arch/arm64/kernel/efi.c | |||
| @@ -188,6 +188,8 @@ static __init void reserve_regions(void) | |||
| 188 | if (uefi_debug) | 188 | if (uefi_debug) |
| 189 | pr_cont("\n"); | 189 | pr_cont("\n"); |
| 190 | } | 190 | } |
| 191 | |||
| 192 | set_bit(EFI_MEMMAP, &efi.flags); | ||
| 191 | } | 193 | } |
| 192 | 194 | ||
| 193 | 195 | ||
| @@ -463,6 +465,8 @@ static int __init arm64_enter_virtual_mode(void) | |||
| 463 | efi_native_runtime_setup(); | 465 | efi_native_runtime_setup(); |
| 464 | set_bit(EFI_RUNTIME_SERVICES, &efi.flags); | 466 | set_bit(EFI_RUNTIME_SERVICES, &efi.flags); |
| 465 | 467 | ||
| 468 | efi.runtime_version = efi.systab->hdr.revision; | ||
| 469 | |||
| 466 | return 0; | 470 | return 0; |
| 467 | 471 | ||
| 468 | err_unmap: | 472 | err_unmap: |
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S index 144f10567f82..bed028364a93 100644 --- a/arch/arm64/kernel/head.S +++ b/arch/arm64/kernel/head.S | |||
| @@ -38,11 +38,11 @@ | |||
| 38 | 38 | ||
| 39 | #define KERNEL_RAM_VADDR (PAGE_OFFSET + TEXT_OFFSET) | 39 | #define KERNEL_RAM_VADDR (PAGE_OFFSET + TEXT_OFFSET) |
| 40 | 40 | ||
| 41 | #if (TEXT_OFFSET & 0xf) != 0 | 41 | #if (TEXT_OFFSET & 0xfff) != 0 |
| 42 | #error TEXT_OFFSET must be at least 16B aligned | 42 | #error TEXT_OFFSET must be at least 4KB aligned |
| 43 | #elif (PAGE_OFFSET & 0xfffff) != 0 | 43 | #elif (PAGE_OFFSET & 0x1fffff) != 0 |
| 44 | #error PAGE_OFFSET must be at least 2MB aligned | 44 | #error PAGE_OFFSET must be at least 2MB aligned |
| 45 | #elif TEXT_OFFSET > 0xfffff | 45 | #elif TEXT_OFFSET > 0x1fffff |
| 46 | #error TEXT_OFFSET must be less than 2MB | 46 | #error TEXT_OFFSET must be less than 2MB |
| 47 | #endif | 47 | #endif |
| 48 | 48 | ||
diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c index 0310811bd77d..70526cfda056 100644 --- a/arch/arm64/kernel/ptrace.c +++ b/arch/arm64/kernel/ptrace.c | |||
| @@ -1115,19 +1115,15 @@ asmlinkage int syscall_trace_enter(struct pt_regs *regs) | |||
| 1115 | if (test_thread_flag(TIF_SYSCALL_TRACEPOINT)) | 1115 | if (test_thread_flag(TIF_SYSCALL_TRACEPOINT)) |
| 1116 | trace_sys_enter(regs, regs->syscallno); | 1116 | trace_sys_enter(regs, regs->syscallno); |
| 1117 | 1117 | ||
| 1118 | #ifdef CONFIG_AUDITSYSCALL | ||
| 1119 | audit_syscall_entry(syscall_get_arch(), regs->syscallno, | 1118 | audit_syscall_entry(syscall_get_arch(), regs->syscallno, |
| 1120 | regs->orig_x0, regs->regs[1], regs->regs[2], regs->regs[3]); | 1119 | regs->orig_x0, regs->regs[1], regs->regs[2], regs->regs[3]); |
| 1121 | #endif | ||
| 1122 | 1120 | ||
| 1123 | return regs->syscallno; | 1121 | return regs->syscallno; |
| 1124 | } | 1122 | } |
| 1125 | 1123 | ||
| 1126 | asmlinkage void syscall_trace_exit(struct pt_regs *regs) | 1124 | asmlinkage void syscall_trace_exit(struct pt_regs *regs) |
| 1127 | { | 1125 | { |
| 1128 | #ifdef CONFIG_AUDITSYSCALL | ||
| 1129 | audit_syscall_exit(regs); | 1126 | audit_syscall_exit(regs); |
| 1130 | #endif | ||
| 1131 | 1127 | ||
| 1132 | if (test_thread_flag(TIF_SYSCALL_TRACEPOINT)) | 1128 | if (test_thread_flag(TIF_SYSCALL_TRACEPOINT)) |
| 1133 | trace_sys_exit(regs, regs_return_value(regs)); | 1129 | trace_sys_exit(regs, regs_return_value(regs)); |
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c index 5b4526ee3a01..5472c2401876 100644 --- a/arch/arm64/mm/init.c +++ b/arch/arm64/mm/init.c | |||
| @@ -32,6 +32,7 @@ | |||
| 32 | #include <linux/of_fdt.h> | 32 | #include <linux/of_fdt.h> |
| 33 | #include <linux/dma-mapping.h> | 33 | #include <linux/dma-mapping.h> |
| 34 | #include <linux/dma-contiguous.h> | 34 | #include <linux/dma-contiguous.h> |
| 35 | #include <linux/efi.h> | ||
| 35 | 36 | ||
| 36 | #include <asm/fixmap.h> | 37 | #include <asm/fixmap.h> |
| 37 | #include <asm/sections.h> | 38 | #include <asm/sections.h> |
| @@ -148,7 +149,8 @@ void __init arm64_memblock_init(void) | |||
| 148 | memblock_reserve(__virt_to_phys(initrd_start), initrd_end - initrd_start); | 149 | memblock_reserve(__virt_to_phys(initrd_start), initrd_end - initrd_start); |
| 149 | #endif | 150 | #endif |
| 150 | 151 | ||
| 151 | early_init_fdt_scan_reserved_mem(); | 152 | if (!efi_enabled(EFI_MEMMAP)) |
| 153 | early_init_fdt_scan_reserved_mem(); | ||
| 152 | 154 | ||
| 153 | /* 4GB maximum for 32-bit only capable devices */ | 155 | /* 4GB maximum for 32-bit only capable devices */ |
| 154 | if (IS_ENABLED(CONFIG_ZONE_DMA)) | 156 | if (IS_ENABLED(CONFIG_ZONE_DMA)) |
diff --git a/arch/mips/alchemy/devboards/db1200.c b/arch/mips/alchemy/devboards/db1200.c index 776188908dfc..8c13675a12e7 100644 --- a/arch/mips/alchemy/devboards/db1200.c +++ b/arch/mips/alchemy/devboards/db1200.c | |||
| @@ -847,6 +847,7 @@ int __init db1200_dev_setup(void) | |||
| 847 | pr_warn("DB1200: cant get I2C close to 50MHz\n"); | 847 | pr_warn("DB1200: cant get I2C close to 50MHz\n"); |
| 848 | else | 848 | else |
| 849 | clk_set_rate(c, pfc); | 849 | clk_set_rate(c, pfc); |
| 850 | clk_prepare_enable(c); | ||
| 850 | clk_put(c); | 851 | clk_put(c); |
| 851 | } | 852 | } |
| 852 | 853 | ||
| @@ -922,11 +923,6 @@ int __init db1200_dev_setup(void) | |||
| 922 | } | 923 | } |
| 923 | 924 | ||
| 924 | /* Audio PSC clock is supplied externally. (FIXME: platdata!!) */ | 925 | /* Audio PSC clock is supplied externally. (FIXME: platdata!!) */ |
| 925 | c = clk_get(NULL, "psc1_intclk"); | ||
| 926 | if (!IS_ERR(c)) { | ||
| 927 | clk_prepare_enable(c); | ||
| 928 | clk_put(c); | ||
| 929 | } | ||
| 930 | __raw_writel(PSC_SEL_CLK_SERCLK, | 926 | __raw_writel(PSC_SEL_CLK_SERCLK, |
| 931 | (void __iomem *)KSEG1ADDR(AU1550_PSC1_PHYS_ADDR) + PSC_SEL_OFFSET); | 927 | (void __iomem *)KSEG1ADDR(AU1550_PSC1_PHYS_ADDR) + PSC_SEL_OFFSET); |
| 932 | wmb(); | 928 | wmb(); |
diff --git a/arch/mips/bcm47xx/setup.c b/arch/mips/bcm47xx/setup.c index 2b63e7e7d3d3..ad439c273003 100644 --- a/arch/mips/bcm47xx/setup.c +++ b/arch/mips/bcm47xx/setup.c | |||
| @@ -59,12 +59,21 @@ static void bcm47xx_machine_restart(char *command) | |||
| 59 | switch (bcm47xx_bus_type) { | 59 | switch (bcm47xx_bus_type) { |
| 60 | #ifdef CONFIG_BCM47XX_SSB | 60 | #ifdef CONFIG_BCM47XX_SSB |
| 61 | case BCM47XX_BUS_TYPE_SSB: | 61 | case BCM47XX_BUS_TYPE_SSB: |
| 62 | ssb_watchdog_timer_set(&bcm47xx_bus.ssb, 3); | 62 | if (bcm47xx_bus.ssb.chip_id == 0x4785) |
| 63 | write_c0_diag4(1 << 22); | ||
| 64 | ssb_watchdog_timer_set(&bcm47xx_bus.ssb, 1); | ||
| 65 | if (bcm47xx_bus.ssb.chip_id == 0x4785) { | ||
| 66 | __asm__ __volatile__( | ||
| 67 | ".set\tmips3\n\t" | ||
| 68 | "sync\n\t" | ||
| 69 | "wait\n\t" | ||
| 70 | ".set\tmips0"); | ||
| 71 | } | ||
| 63 | break; | 72 | break; |
| 64 | #endif | 73 | #endif |
| 65 | #ifdef CONFIG_BCM47XX_BCMA | 74 | #ifdef CONFIG_BCM47XX_BCMA |
| 66 | case BCM47XX_BUS_TYPE_BCMA: | 75 | case BCM47XX_BUS_TYPE_BCMA: |
| 67 | bcma_chipco_watchdog_timer_set(&bcm47xx_bus.bcma.bus.drv_cc, 3); | 76 | bcma_chipco_watchdog_timer_set(&bcm47xx_bus.bcma.bus.drv_cc, 1); |
| 68 | break; | 77 | break; |
| 69 | #endif | 78 | #endif |
| 70 | } | 79 | } |
diff --git a/arch/mips/cavium-octeon/setup.c b/arch/mips/cavium-octeon/setup.c index 008e9c8b8eac..38f4c32e2816 100644 --- a/arch/mips/cavium-octeon/setup.c +++ b/arch/mips/cavium-octeon/setup.c | |||
| @@ -263,7 +263,6 @@ static uint64_t crashk_size, crashk_base; | |||
| 263 | static int octeon_uart; | 263 | static int octeon_uart; |
| 264 | 264 | ||
| 265 | extern asmlinkage void handle_int(void); | 265 | extern asmlinkage void handle_int(void); |
| 266 | extern asmlinkage void plat_irq_dispatch(void); | ||
| 267 | 266 | ||
| 268 | /** | 267 | /** |
| 269 | * Return non zero if we are currently running in the Octeon simulator | 268 | * Return non zero if we are currently running in the Octeon simulator |
| @@ -458,6 +457,18 @@ static void octeon_halt(void) | |||
| 458 | octeon_kill_core(NULL); | 457 | octeon_kill_core(NULL); |
| 459 | } | 458 | } |
| 460 | 459 | ||
| 460 | static char __read_mostly octeon_system_type[80]; | ||
| 461 | |||
| 462 | static int __init init_octeon_system_type(void) | ||
| 463 | { | ||
| 464 | snprintf(octeon_system_type, sizeof(octeon_system_type), "%s (%s)", | ||
| 465 | cvmx_board_type_to_string(octeon_bootinfo->board_type), | ||
| 466 | octeon_model_get_string(read_c0_prid())); | ||
| 467 | |||
| 468 | return 0; | ||
| 469 | } | ||
| 470 | early_initcall(init_octeon_system_type); | ||
| 471 | |||
| 461 | /** | 472 | /** |
| 462 | * Return a string representing the system type | 473 | * Return a string representing the system type |
| 463 | * | 474 | * |
| @@ -465,11 +476,7 @@ static void octeon_halt(void) | |||
| 465 | */ | 476 | */ |
| 466 | const char *octeon_board_type_string(void) | 477 | const char *octeon_board_type_string(void) |
| 467 | { | 478 | { |
| 468 | static char name[80]; | 479 | return octeon_system_type; |
| 469 | sprintf(name, "%s (%s)", | ||
| 470 | cvmx_board_type_to_string(octeon_bootinfo->board_type), | ||
| 471 | octeon_model_get_string(read_c0_prid())); | ||
| 472 | return name; | ||
| 473 | } | 480 | } |
| 474 | 481 | ||
| 475 | const char *get_system_type(void) | 482 | const char *get_system_type(void) |
diff --git a/arch/mips/include/asm/eva.h b/arch/mips/include/asm/eva.h new file mode 100644 index 000000000000..a3d1807f227c --- /dev/null +++ b/arch/mips/include/asm/eva.h | |||
| @@ -0,0 +1,43 @@ | |||
| 1 | /* | ||
| 2 | * This file is subject to the terms and conditions of the GNU General Public | ||
| 3 | * License. See the file "COPYING" in the main directory of this archive | ||
| 4 | * for more details. | ||
| 5 | * | ||
| 6 | * Copyright (C) 2014, Imagination Technologies Ltd. | ||
| 7 | * | ||
| 8 | * EVA functions for generic code | ||
| 9 | */ | ||
| 10 | |||
| 11 | #ifndef _ASM_EVA_H | ||
| 12 | #define _ASM_EVA_H | ||
| 13 | |||
| 14 | #include <kernel-entry-init.h> | ||
| 15 | |||
| 16 | #ifdef __ASSEMBLY__ | ||
| 17 | |||
| 18 | #ifdef CONFIG_EVA | ||
| 19 | |||
| 20 | /* | ||
| 21 | * EVA early init code | ||
| 22 | * | ||
| 23 | * Platforms must define their own 'platform_eva_init' macro in | ||
| 24 | * their kernel-entry-init.h header. This macro usually does the | ||
| 25 | * platform specific configuration of the segmentation registers, | ||
| 26 | * and it is normally called from assembly code. | ||
| 27 | * | ||
| 28 | */ | ||
| 29 | |||
| 30 | .macro eva_init | ||
| 31 | platform_eva_init | ||
| 32 | .endm | ||
| 33 | |||
| 34 | #else | ||
| 35 | |||
| 36 | .macro eva_init | ||
| 37 | .endm | ||
| 38 | |||
| 39 | #endif /* CONFIG_EVA */ | ||
| 40 | |||
| 41 | #endif /* __ASSEMBLY__ */ | ||
| 42 | |||
| 43 | #endif | ||
diff --git a/arch/mips/include/asm/gic.h b/arch/mips/include/asm/gic.h index 3f20b2111d56..d7699cf7e135 100644 --- a/arch/mips/include/asm/gic.h +++ b/arch/mips/include/asm/gic.h | |||
| @@ -49,7 +49,7 @@ | |||
| 49 | #endif | 49 | #endif |
| 50 | #define GICBIS(reg, mask, bits) \ | 50 | #define GICBIS(reg, mask, bits) \ |
| 51 | do { u32 data; \ | 51 | do { u32 data; \ |
| 52 | GICREAD((reg), data); \ | 52 | GICREAD(reg, data); \ |
| 53 | data &= ~(mask); \ | 53 | data &= ~(mask); \ |
| 54 | data |= ((bits) & (mask)); \ | 54 | data |= ((bits) & (mask)); \ |
| 55 | GICWRITE((reg), data); \ | 55 | GICWRITE((reg), data); \ |
diff --git a/arch/mips/include/asm/irq.h b/arch/mips/include/asm/irq.h index ae1f7b24dd1a..39f07aec640c 100644 --- a/arch/mips/include/asm/irq.h +++ b/arch/mips/include/asm/irq.h | |||
| @@ -26,6 +26,8 @@ static inline int irq_canonicalize(int irq) | |||
| 26 | #define irq_canonicalize(irq) (irq) /* Sane hardware, sane code ... */ | 26 | #define irq_canonicalize(irq) (irq) /* Sane hardware, sane code ... */ |
| 27 | #endif | 27 | #endif |
| 28 | 28 | ||
| 29 | asmlinkage void plat_irq_dispatch(void); | ||
| 30 | |||
| 29 | extern void do_IRQ(unsigned int irq); | 31 | extern void do_IRQ(unsigned int irq); |
| 30 | 32 | ||
| 31 | extern void arch_init_irq(void); | 33 | extern void arch_init_irq(void); |
diff --git a/arch/mips/include/asm/mach-malta/kernel-entry-init.h b/arch/mips/include/asm/mach-malta/kernel-entry-init.h index 77eeda77e73c..0cf8622db27f 100644 --- a/arch/mips/include/asm/mach-malta/kernel-entry-init.h +++ b/arch/mips/include/asm/mach-malta/kernel-entry-init.h | |||
| @@ -10,14 +10,15 @@ | |||
| 10 | #ifndef __ASM_MACH_MIPS_KERNEL_ENTRY_INIT_H | 10 | #ifndef __ASM_MACH_MIPS_KERNEL_ENTRY_INIT_H |
| 11 | #define __ASM_MACH_MIPS_KERNEL_ENTRY_INIT_H | 11 | #define __ASM_MACH_MIPS_KERNEL_ENTRY_INIT_H |
| 12 | 12 | ||
| 13 | #include <asm/regdef.h> | ||
| 14 | #include <asm/mipsregs.h> | ||
| 15 | |||
| 13 | /* | 16 | /* |
| 14 | * Prepare segments for EVA boot: | 17 | * Prepare segments for EVA boot: |
| 15 | * | 18 | * |
| 16 | * This is in case the processor boots in legacy configuration | 19 | * This is in case the processor boots in legacy configuration |
| 17 | * (SI_EVAReset is de-asserted and CONFIG5.K == 0) | 20 | * (SI_EVAReset is de-asserted and CONFIG5.K == 0) |
| 18 | * | 21 | * |
| 19 | * On entry, t1 is loaded with CP0_CONFIG | ||
| 20 | * | ||
| 21 | * ========================= Mappings ============================= | 22 | * ========================= Mappings ============================= |
| 22 | * Virtual memory Physical memory Mapping | 23 | * Virtual memory Physical memory Mapping |
| 23 | * 0x00000000 - 0x7fffffff 0x80000000 - 0xfffffffff MUSUK (kuseg) | 24 | * 0x00000000 - 0x7fffffff 0x80000000 - 0xfffffffff MUSUK (kuseg) |
| @@ -30,12 +31,20 @@ | |||
| 30 | * | 31 | * |
| 31 | * | 32 | * |
| 32 | * Lowmem is expanded to 2GB | 33 | * Lowmem is expanded to 2GB |
| 34 | * | ||
| 35 | * The following code uses the t0, t1, t2 and ra registers without | ||
| 36 | * previously preserving them. | ||
| 37 | * | ||
| 33 | */ | 38 | */ |
| 34 | .macro eva_entry | 39 | .macro platform_eva_init |
| 40 | |||
| 41 | .set push | ||
| 42 | .set reorder | ||
| 35 | /* | 43 | /* |
| 36 | * Get Config.K0 value and use it to program | 44 | * Get Config.K0 value and use it to program |
| 37 | * the segmentation registers | 45 | * the segmentation registers |
| 38 | */ | 46 | */ |
| 47 | mfc0 t1, CP0_CONFIG | ||
| 39 | andi t1, 0x7 /* CCA */ | 48 | andi t1, 0x7 /* CCA */ |
| 40 | move t2, t1 | 49 | move t2, t1 |
| 41 | ins t2, t1, 16, 3 | 50 | ins t2, t1, 16, 3 |
| @@ -77,6 +86,8 @@ | |||
| 77 | mtc0 t0, $16, 5 | 86 | mtc0 t0, $16, 5 |
| 78 | sync | 87 | sync |
| 79 | jal mips_ihb | 88 | jal mips_ihb |
| 89 | |||
| 90 | .set pop | ||
| 80 | .endm | 91 | .endm |
| 81 | 92 | ||
| 82 | .macro kernel_entry_setup | 93 | .macro kernel_entry_setup |
| @@ -95,7 +106,7 @@ | |||
| 95 | sll t0, t0, 6 /* SC bit */ | 106 | sll t0, t0, 6 /* SC bit */ |
| 96 | bgez t0, 9f | 107 | bgez t0, 9f |
| 97 | 108 | ||
| 98 | eva_entry | 109 | platform_eva_init |
| 99 | b 0f | 110 | b 0f |
| 100 | 9: | 111 | 9: |
| 101 | /* Assume we came from YAMON... */ | 112 | /* Assume we came from YAMON... */ |
| @@ -127,8 +138,7 @@ nonsc_processor: | |||
| 127 | #ifdef CONFIG_EVA | 138 | #ifdef CONFIG_EVA |
| 128 | sync | 139 | sync |
| 129 | ehb | 140 | ehb |
| 130 | mfc0 t1, CP0_CONFIG | 141 | platform_eva_init |
| 131 | eva_entry | ||
| 132 | #endif | 142 | #endif |
| 133 | .endm | 143 | .endm |
| 134 | 144 | ||
diff --git a/arch/mips/include/asm/mach-netlogic/topology.h b/arch/mips/include/asm/mach-netlogic/topology.h index ceeb1f5e7129..0eb43c832b25 100644 --- a/arch/mips/include/asm/mach-netlogic/topology.h +++ b/arch/mips/include/asm/mach-netlogic/topology.h | |||
| @@ -10,13 +10,6 @@ | |||
| 10 | 10 | ||
| 11 | #include <asm/mach-netlogic/multi-node.h> | 11 | #include <asm/mach-netlogic/multi-node.h> |
| 12 | 12 | ||
| 13 | #ifdef CONFIG_SMP | ||
| 14 | #define topology_physical_package_id(cpu) cpu_to_node(cpu) | ||
| 15 | #define topology_core_id(cpu) (cpu_logical_map(cpu) / NLM_THREADS_PER_CORE) | ||
| 16 | #define topology_thread_cpumask(cpu) (&cpu_sibling_map[cpu]) | ||
| 17 | #define topology_core_cpumask(cpu) cpumask_of_node(cpu_to_node(cpu)) | ||
| 18 | #endif | ||
| 19 | |||
| 20 | #include <asm-generic/topology.h> | 13 | #include <asm-generic/topology.h> |
| 21 | 14 | ||
| 22 | #endif /* _ASM_MACH_NETLOGIC_TOPOLOGY_H */ | 15 | #endif /* _ASM_MACH_NETLOGIC_TOPOLOGY_H */ |
diff --git a/arch/mips/include/asm/pgtable.h b/arch/mips/include/asm/pgtable.h index 027c74db13f9..df49a308085c 100644 --- a/arch/mips/include/asm/pgtable.h +++ b/arch/mips/include/asm/pgtable.h | |||
| @@ -122,6 +122,9 @@ do { \ | |||
| 122 | } \ | 122 | } \ |
| 123 | } while(0) | 123 | } while(0) |
| 124 | 124 | ||
| 125 | extern void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, | ||
| 126 | pte_t pteval); | ||
| 127 | |||
| 125 | #if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32) | 128 | #if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32) |
| 126 | 129 | ||
| 127 | #define pte_none(pte) (!(((pte).pte_low | (pte).pte_high) & ~_PAGE_GLOBAL)) | 130 | #define pte_none(pte) (!(((pte).pte_low | (pte).pte_high) & ~_PAGE_GLOBAL)) |
| @@ -145,7 +148,6 @@ static inline void set_pte(pte_t *ptep, pte_t pte) | |||
| 145 | } | 148 | } |
| 146 | } | 149 | } |
| 147 | } | 150 | } |
| 148 | #define set_pte_at(mm, addr, ptep, pteval) set_pte(ptep, pteval) | ||
| 149 | 151 | ||
| 150 | static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) | 152 | static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) |
| 151 | { | 153 | { |
| @@ -183,7 +185,6 @@ static inline void set_pte(pte_t *ptep, pte_t pteval) | |||
| 183 | } | 185 | } |
| 184 | #endif | 186 | #endif |
| 185 | } | 187 | } |
| 186 | #define set_pte_at(mm, addr, ptep, pteval) set_pte(ptep, pteval) | ||
| 187 | 188 | ||
| 188 | static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) | 189 | static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) |
| 189 | { | 190 | { |
| @@ -390,15 +391,12 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) | |||
| 390 | 391 | ||
| 391 | extern void __update_tlb(struct vm_area_struct *vma, unsigned long address, | 392 | extern void __update_tlb(struct vm_area_struct *vma, unsigned long address, |
| 392 | pte_t pte); | 393 | pte_t pte); |
| 393 | extern void __update_cache(struct vm_area_struct *vma, unsigned long address, | ||
| 394 | pte_t pte); | ||
| 395 | 394 | ||
| 396 | static inline void update_mmu_cache(struct vm_area_struct *vma, | 395 | static inline void update_mmu_cache(struct vm_area_struct *vma, |
| 397 | unsigned long address, pte_t *ptep) | 396 | unsigned long address, pte_t *ptep) |
| 398 | { | 397 | { |
| 399 | pte_t pte = *ptep; | 398 | pte_t pte = *ptep; |
| 400 | __update_tlb(vma, address, pte); | 399 | __update_tlb(vma, address, pte); |
| 401 | __update_cache(vma, address, pte); | ||
| 402 | } | 400 | } |
| 403 | 401 | ||
| 404 | static inline void update_mmu_cache_pmd(struct vm_area_struct *vma, | 402 | static inline void update_mmu_cache_pmd(struct vm_area_struct *vma, |
diff --git a/arch/mips/include/asm/syscall.h b/arch/mips/include/asm/syscall.h index 17960fe7a8ce..cdf68b33bd65 100644 --- a/arch/mips/include/asm/syscall.h +++ b/arch/mips/include/asm/syscall.h | |||
| @@ -131,10 +131,12 @@ static inline int syscall_get_arch(void) | |||
| 131 | { | 131 | { |
| 132 | int arch = EM_MIPS; | 132 | int arch = EM_MIPS; |
| 133 | #ifdef CONFIG_64BIT | 133 | #ifdef CONFIG_64BIT |
| 134 | if (!test_thread_flag(TIF_32BIT_REGS)) | 134 | if (!test_thread_flag(TIF_32BIT_REGS)) { |
| 135 | arch |= __AUDIT_ARCH_64BIT; | 135 | arch |= __AUDIT_ARCH_64BIT; |
| 136 | if (test_thread_flag(TIF_32BIT_ADDR)) | 136 | /* N32 sets only TIF_32BIT_ADDR */ |
| 137 | arch |= __AUDIT_ARCH_CONVENTION_MIPS64_N32; | 137 | if (test_thread_flag(TIF_32BIT_ADDR)) |
| 138 | arch |= __AUDIT_ARCH_CONVENTION_MIPS64_N32; | ||
| 139 | } | ||
| 138 | #endif | 140 | #endif |
| 139 | #if defined(__LITTLE_ENDIAN) | 141 | #if defined(__LITTLE_ENDIAN) |
| 140 | arch |= __AUDIT_ARCH_LE; | 142 | arch |= __AUDIT_ARCH_LE; |
diff --git a/arch/mips/kernel/cps-vec.S b/arch/mips/kernel/cps-vec.S index 6f4f739dad96..e6e97d2a5c9e 100644 --- a/arch/mips/kernel/cps-vec.S +++ b/arch/mips/kernel/cps-vec.S | |||
| @@ -13,6 +13,7 @@ | |||
| 13 | #include <asm/asm-offsets.h> | 13 | #include <asm/asm-offsets.h> |
| 14 | #include <asm/asmmacro.h> | 14 | #include <asm/asmmacro.h> |
| 15 | #include <asm/cacheops.h> | 15 | #include <asm/cacheops.h> |
| 16 | #include <asm/eva.h> | ||
| 16 | #include <asm/mipsregs.h> | 17 | #include <asm/mipsregs.h> |
| 17 | #include <asm/mipsmtregs.h> | 18 | #include <asm/mipsmtregs.h> |
| 18 | #include <asm/pm.h> | 19 | #include <asm/pm.h> |
| @@ -166,6 +167,9 @@ dcache_done: | |||
| 166 | 1: jal mips_cps_core_init | 167 | 1: jal mips_cps_core_init |
| 167 | nop | 168 | nop |
| 168 | 169 | ||
| 170 | /* Do any EVA initialization if necessary */ | ||
| 171 | eva_init | ||
| 172 | |||
| 169 | /* | 173 | /* |
| 170 | * Boot any other VPEs within this core that should be online, and | 174 | * Boot any other VPEs within this core that should be online, and |
| 171 | * deactivate this VPE if it should be offline. | 175 | * deactivate this VPE if it should be offline. |
diff --git a/arch/mips/kernel/perf_event_mipsxx.c b/arch/mips/kernel/perf_event_mipsxx.c index 14bf74b0f51c..b63f2482f288 100644 --- a/arch/mips/kernel/perf_event_mipsxx.c +++ b/arch/mips/kernel/perf_event_mipsxx.c | |||
| @@ -558,7 +558,7 @@ static int mipspmu_get_irq(void) | |||
| 558 | if (mipspmu.irq >= 0) { | 558 | if (mipspmu.irq >= 0) { |
| 559 | /* Request my own irq handler. */ | 559 | /* Request my own irq handler. */ |
| 560 | err = request_irq(mipspmu.irq, mipsxx_pmu_handle_irq, | 560 | err = request_irq(mipspmu.irq, mipsxx_pmu_handle_irq, |
| 561 | IRQF_PERCPU | IRQF_NOBALANCING, | 561 | IRQF_PERCPU | IRQF_NOBALANCING | IRQF_NO_THREAD, |
| 562 | "mips_perf_pmu", NULL); | 562 | "mips_perf_pmu", NULL); |
| 563 | if (err) { | 563 | if (err) { |
| 564 | pr_warning("Unable to request IRQ%d for MIPS " | 564 | pr_warning("Unable to request IRQ%d for MIPS " |
diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S index 13b964fddc4a..25bb8400156d 100644 --- a/arch/mips/kernel/scall64-o32.S +++ b/arch/mips/kernel/scall64-o32.S | |||
| @@ -113,15 +113,19 @@ trace_a_syscall: | |||
| 113 | move s0, t2 # Save syscall pointer | 113 | move s0, t2 # Save syscall pointer |
| 114 | move a0, sp | 114 | move a0, sp |
| 115 | /* | 115 | /* |
| 116 | * syscall number is in v0 unless we called syscall(__NR_###) | 116 | * absolute syscall number is in v0 unless we called syscall(__NR_###) |
| 117 | * where the real syscall number is in a0 | 117 | * where the real syscall number is in a0 |
| 118 | * note: NR_syscall is the first O32 syscall but the macro is | 118 | * note: NR_syscall is the first O32 syscall but the macro is |
| 119 | * only defined when compiling with -mabi=32 (CONFIG_32BIT) | 119 | * only defined when compiling with -mabi=32 (CONFIG_32BIT) |
| 120 | * therefore __NR_O32_Linux is used (4000) | 120 | * therefore __NR_O32_Linux is used (4000) |
| 121 | */ | 121 | */ |
| 122 | addiu a1, v0, __NR_O32_Linux | 122 | .set push |
| 123 | bnez v0, 1f /* __NR_syscall at offset 0 */ | 123 | .set reorder |
| 124 | lw a1, PT_R4(sp) | 124 | subu t1, v0, __NR_O32_Linux |
| 125 | move a1, v0 | ||
| 126 | bnez t1, 1f /* __NR_syscall at offset 0 */ | ||
| 127 | lw a1, PT_R4(sp) /* Arg1 for __NR_syscall case */ | ||
| 128 | .set pop | ||
| 125 | 129 | ||
| 126 | 1: jal syscall_trace_enter | 130 | 1: jal syscall_trace_enter |
| 127 | 131 | ||
diff --git a/arch/mips/loongson/loongson-3/cop2-ex.c b/arch/mips/loongson/loongson-3/cop2-ex.c index 9182e8d2967c..b03e37d2071a 100644 --- a/arch/mips/loongson/loongson-3/cop2-ex.c +++ b/arch/mips/loongson/loongson-3/cop2-ex.c | |||
| @@ -22,13 +22,13 @@ | |||
| 22 | static int loongson_cu2_call(struct notifier_block *nfb, unsigned long action, | 22 | static int loongson_cu2_call(struct notifier_block *nfb, unsigned long action, |
| 23 | void *data) | 23 | void *data) |
| 24 | { | 24 | { |
| 25 | int fpu_enabled; | 25 | int fpu_owned; |
| 26 | int fr = !test_thread_flag(TIF_32BIT_FPREGS); | 26 | int fr = !test_thread_flag(TIF_32BIT_FPREGS); |
| 27 | 27 | ||
| 28 | switch (action) { | 28 | switch (action) { |
| 29 | case CU2_EXCEPTION: | 29 | case CU2_EXCEPTION: |
| 30 | preempt_disable(); | 30 | preempt_disable(); |
| 31 | fpu_enabled = read_c0_status() & ST0_CU1; | 31 | fpu_owned = __is_fpu_owner(); |
| 32 | if (!fr) | 32 | if (!fr) |
| 33 | set_c0_status(ST0_CU1 | ST0_CU2); | 33 | set_c0_status(ST0_CU1 | ST0_CU2); |
| 34 | else | 34 | else |
| @@ -39,8 +39,8 @@ static int loongson_cu2_call(struct notifier_block *nfb, unsigned long action, | |||
| 39 | KSTK_STATUS(current) |= ST0_FR; | 39 | KSTK_STATUS(current) |= ST0_FR; |
| 40 | else | 40 | else |
| 41 | KSTK_STATUS(current) &= ~ST0_FR; | 41 | KSTK_STATUS(current) &= ~ST0_FR; |
| 42 | /* If FPU is enabled, we needn't init or restore fp */ | 42 | /* If FPU is owned, we needn't init or restore fp */ |
| 43 | if(!fpu_enabled) { | 43 | if (!fpu_owned) { |
| 44 | set_thread_flag(TIF_USEDFPU); | 44 | set_thread_flag(TIF_USEDFPU); |
| 45 | if (!used_math()) { | 45 | if (!used_math()) { |
| 46 | _init_fpu(); | 46 | _init_fpu(); |
diff --git a/arch/mips/loongson/loongson-3/numa.c b/arch/mips/loongson/loongson-3/numa.c index ca025a6ba559..37ed184398c6 100644 --- a/arch/mips/loongson/loongson-3/numa.c +++ b/arch/mips/loongson/loongson-3/numa.c | |||
| @@ -24,8 +24,6 @@ | |||
| 24 | #include <asm/page.h> | 24 | #include <asm/page.h> |
| 25 | #include <asm/pgalloc.h> | 25 | #include <asm/pgalloc.h> |
| 26 | #include <asm/sections.h> | 26 | #include <asm/sections.h> |
| 27 | #include <linux/bootmem.h> | ||
| 28 | #include <linux/init.h> | ||
| 29 | #include <linux/irq.h> | 27 | #include <linux/irq.h> |
| 30 | #include <asm/bootinfo.h> | 28 | #include <asm/bootinfo.h> |
| 31 | #include <asm/mc146818-time.h> | 29 | #include <asm/mc146818-time.h> |
diff --git a/arch/mips/mm/cache.c b/arch/mips/mm/cache.c index f7b91d3a371d..7e3ea7766822 100644 --- a/arch/mips/mm/cache.c +++ b/arch/mips/mm/cache.c | |||
| @@ -119,25 +119,36 @@ void __flush_anon_page(struct page *page, unsigned long vmaddr) | |||
| 119 | 119 | ||
| 120 | EXPORT_SYMBOL(__flush_anon_page); | 120 | EXPORT_SYMBOL(__flush_anon_page); |
| 121 | 121 | ||
| 122 | void __update_cache(struct vm_area_struct *vma, unsigned long address, | 122 | static void mips_flush_dcache_from_pte(pte_t pteval, unsigned long address) |
| 123 | pte_t pte) | ||
| 124 | { | 123 | { |
| 125 | struct page *page; | 124 | struct page *page; |
| 126 | unsigned long pfn, addr; | 125 | unsigned long pfn = pte_pfn(pteval); |
| 127 | int exec = (vma->vm_flags & VM_EXEC) && !cpu_has_ic_fills_f_dc; | ||
| 128 | 126 | ||
| 129 | pfn = pte_pfn(pte); | ||
| 130 | if (unlikely(!pfn_valid(pfn))) | 127 | if (unlikely(!pfn_valid(pfn))) |
| 131 | return; | 128 | return; |
| 129 | |||
| 132 | page = pfn_to_page(pfn); | 130 | page = pfn_to_page(pfn); |
| 133 | if (page_mapping(page) && Page_dcache_dirty(page)) { | 131 | if (page_mapping(page) && Page_dcache_dirty(page)) { |
| 134 | addr = (unsigned long) page_address(page); | 132 | unsigned long page_addr = (unsigned long) page_address(page); |
| 135 | if (exec || pages_do_alias(addr, address & PAGE_MASK)) | 133 | |
| 136 | flush_data_cache_page(addr); | 134 | if (!cpu_has_ic_fills_f_dc || |
| 135 | pages_do_alias(page_addr, address & PAGE_MASK)) | ||
| 136 | flush_data_cache_page(page_addr); | ||
| 137 | ClearPageDcacheDirty(page); | 137 | ClearPageDcacheDirty(page); |
| 138 | } | 138 | } |
| 139 | } | 139 | } |
| 140 | 140 | ||
| 141 | void set_pte_at(struct mm_struct *mm, unsigned long addr, | ||
| 142 | pte_t *ptep, pte_t pteval) | ||
| 143 | { | ||
| 144 | if (cpu_has_dc_aliases || !cpu_has_ic_fills_f_dc) { | ||
| 145 | if (pte_present(pteval)) | ||
| 146 | mips_flush_dcache_from_pte(pteval, addr); | ||
| 147 | } | ||
| 148 | |||
| 149 | set_pte(ptep, pteval); | ||
| 150 | } | ||
| 151 | |||
| 141 | unsigned long _page_cachable_default; | 152 | unsigned long _page_cachable_default; |
| 142 | EXPORT_SYMBOL(_page_cachable_default); | 153 | EXPORT_SYMBOL(_page_cachable_default); |
| 143 | 154 | ||
diff --git a/arch/mips/mti-malta/malta-memory.c b/arch/mips/mti-malta/malta-memory.c index 0c35dee0a215..8fddd2cdbff7 100644 --- a/arch/mips/mti-malta/malta-memory.c +++ b/arch/mips/mti-malta/malta-memory.c | |||
| @@ -35,13 +35,19 @@ fw_memblock_t * __init fw_getmdesc(int eva) | |||
| 35 | /* otherwise look in the environment */ | 35 | /* otherwise look in the environment */ |
| 36 | 36 | ||
| 37 | memsize_str = fw_getenv("memsize"); | 37 | memsize_str = fw_getenv("memsize"); |
| 38 | if (memsize_str) | 38 | if (memsize_str) { |
| 39 | tmp = kstrtol(memsize_str, 0, &memsize); | 39 | tmp = kstrtoul(memsize_str, 0, &memsize); |
| 40 | if (tmp) | ||
| 41 | pr_warn("Failed to read the 'memsize' env variable.\n"); | ||
| 42 | } | ||
| 40 | if (eva) { | 43 | if (eva) { |
| 41 | /* Look for ememsize for EVA */ | 44 | /* Look for ememsize for EVA */ |
| 42 | ememsize_str = fw_getenv("ememsize"); | 45 | ememsize_str = fw_getenv("ememsize"); |
| 43 | if (ememsize_str) | 46 | if (ememsize_str) { |
| 44 | tmp = kstrtol(ememsize_str, 0, &ememsize); | 47 | tmp = kstrtoul(ememsize_str, 0, &ememsize); |
| 48 | if (tmp) | ||
| 49 | pr_warn("Failed to read the 'ememsize' env variable.\n"); | ||
| 50 | } | ||
| 45 | } | 51 | } |
| 46 | if (!memsize && !ememsize) { | 52 | if (!memsize && !ememsize) { |
| 47 | pr_warn("memsize not set in YAMON, set to default (32Mb)\n"); | 53 | pr_warn("memsize not set in YAMON, set to default (32Mb)\n"); |
diff --git a/arch/mips/pmcs-msp71xx/msp_irq.c b/arch/mips/pmcs-msp71xx/msp_irq.c index 941744aabb51..f914c753de21 100644 --- a/arch/mips/pmcs-msp71xx/msp_irq.c +++ b/arch/mips/pmcs-msp71xx/msp_irq.c | |||
| @@ -51,7 +51,7 @@ static inline void sec_int_dispatch(void) { do_IRQ(MSP_INT_SEC); } | |||
| 51 | * the range 40-71. | 51 | * the range 40-71. |
| 52 | */ | 52 | */ |
| 53 | 53 | ||
| 54 | asmlinkage void plat_irq_dispatch(struct pt_regs *regs) | 54 | asmlinkage void plat_irq_dispatch(void) |
| 55 | { | 55 | { |
| 56 | u32 pending; | 56 | u32 pending; |
| 57 | 57 | ||
diff --git a/arch/s390/include/uapi/asm/unistd.h b/arch/s390/include/uapi/asm/unistd.h index 3802d2d3a18d..940ac49198db 100644 --- a/arch/s390/include/uapi/asm/unistd.h +++ b/arch/s390/include/uapi/asm/unistd.h | |||
| @@ -283,7 +283,10 @@ | |||
| 283 | #define __NR_sched_setattr 345 | 283 | #define __NR_sched_setattr 345 |
| 284 | #define __NR_sched_getattr 346 | 284 | #define __NR_sched_getattr 346 |
| 285 | #define __NR_renameat2 347 | 285 | #define __NR_renameat2 347 |
| 286 | #define NR_syscalls 348 | 286 | #define __NR_seccomp 348 |
| 287 | #define __NR_getrandom 349 | ||
| 288 | #define __NR_memfd_create 350 | ||
| 289 | #define NR_syscalls 351 | ||
| 287 | 290 | ||
| 288 | /* | 291 | /* |
| 289 | * There are some system calls that are not present on 64 bit, some | 292 | * There are some system calls that are not present on 64 bit, some |
diff --git a/arch/s390/kernel/compat_wrapper.c b/arch/s390/kernel/compat_wrapper.c index 45cdb37aa6f8..faf6caa510dc 100644 --- a/arch/s390/kernel/compat_wrapper.c +++ b/arch/s390/kernel/compat_wrapper.c | |||
| @@ -214,3 +214,6 @@ COMPAT_SYSCALL_WRAP3(finit_module, int, fd, const char __user *, uargs, int, fla | |||
| 214 | COMPAT_SYSCALL_WRAP3(sched_setattr, pid_t, pid, struct sched_attr __user *, attr, unsigned int, flags); | 214 | COMPAT_SYSCALL_WRAP3(sched_setattr, pid_t, pid, struct sched_attr __user *, attr, unsigned int, flags); |
| 215 | COMPAT_SYSCALL_WRAP4(sched_getattr, pid_t, pid, struct sched_attr __user *, attr, unsigned int, size, unsigned int, flags); | 215 | COMPAT_SYSCALL_WRAP4(sched_getattr, pid_t, pid, struct sched_attr __user *, attr, unsigned int, size, unsigned int, flags); |
| 216 | COMPAT_SYSCALL_WRAP5(renameat2, int, olddfd, const char __user *, oldname, int, newdfd, const char __user *, newname, unsigned int, flags); | 216 | COMPAT_SYSCALL_WRAP5(renameat2, int, olddfd, const char __user *, oldname, int, newdfd, const char __user *, newname, unsigned int, flags); |
| 217 | COMPAT_SYSCALL_WRAP3(seccomp, unsigned int, op, unsigned int, flags, const char __user *, uargs) | ||
| 218 | COMPAT_SYSCALL_WRAP3(getrandom, char __user *, buf, size_t, count, unsigned int, flags) | ||
| 219 | COMPAT_SYSCALL_WRAP2(memfd_create, const char __user *, uname, unsigned int, flags) | ||
diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c index 633ca7504536..22aac5885ba2 100644 --- a/arch/s390/kernel/ipl.c +++ b/arch/s390/kernel/ipl.c | |||
| @@ -2060,6 +2060,13 @@ void s390_reset_system(void (*func)(void *), void *data) | |||
| 2060 | S390_lowcore.program_new_psw.addr = | 2060 | S390_lowcore.program_new_psw.addr = |
| 2061 | PSW_ADDR_AMODE | (unsigned long) s390_base_pgm_handler; | 2061 | PSW_ADDR_AMODE | (unsigned long) s390_base_pgm_handler; |
| 2062 | 2062 | ||
| 2063 | /* | ||
| 2064 | * Clear subchannel ID and number to signal new kernel that no CCW or | ||
| 2065 | * SCSI IPL has been done (for kexec and kdump) | ||
| 2066 | */ | ||
| 2067 | S390_lowcore.subchannel_id = 0; | ||
| 2068 | S390_lowcore.subchannel_nr = 0; | ||
| 2069 | |||
| 2063 | /* Store status at absolute zero */ | 2070 | /* Store status at absolute zero */ |
| 2064 | store_status(); | 2071 | store_status(); |
| 2065 | 2072 | ||
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c index ae1d5be7dd88..82bc113e8c1d 100644 --- a/arch/s390/kernel/setup.c +++ b/arch/s390/kernel/setup.c | |||
| @@ -24,6 +24,7 @@ | |||
| 24 | #include <linux/stddef.h> | 24 | #include <linux/stddef.h> |
| 25 | #include <linux/unistd.h> | 25 | #include <linux/unistd.h> |
| 26 | #include <linux/ptrace.h> | 26 | #include <linux/ptrace.h> |
| 27 | #include <linux/random.h> | ||
| 27 | #include <linux/user.h> | 28 | #include <linux/user.h> |
| 28 | #include <linux/tty.h> | 29 | #include <linux/tty.h> |
| 29 | #include <linux/ioport.h> | 30 | #include <linux/ioport.h> |
| @@ -61,6 +62,7 @@ | |||
| 61 | #include <asm/diag.h> | 62 | #include <asm/diag.h> |
| 62 | #include <asm/os_info.h> | 63 | #include <asm/os_info.h> |
| 63 | #include <asm/sclp.h> | 64 | #include <asm/sclp.h> |
| 65 | #include <asm/sysinfo.h> | ||
| 64 | #include "entry.h" | 66 | #include "entry.h" |
| 65 | 67 | ||
| 66 | /* | 68 | /* |
| @@ -766,6 +768,7 @@ static void __init setup_hwcaps(void) | |||
| 766 | #endif | 768 | #endif |
| 767 | 769 | ||
| 768 | get_cpu_id(&cpu_id); | 770 | get_cpu_id(&cpu_id); |
| 771 | add_device_randomness(&cpu_id, sizeof(cpu_id)); | ||
| 769 | switch (cpu_id.machine) { | 772 | switch (cpu_id.machine) { |
| 770 | case 0x9672: | 773 | case 0x9672: |
| 771 | #if !defined(CONFIG_64BIT) | 774 | #if !defined(CONFIG_64BIT) |
| @@ -804,6 +807,19 @@ static void __init setup_hwcaps(void) | |||
| 804 | } | 807 | } |
| 805 | 808 | ||
| 806 | /* | 809 | /* |
| 810 | * Add system information as device randomness | ||
| 811 | */ | ||
| 812 | static void __init setup_randomness(void) | ||
| 813 | { | ||
| 814 | struct sysinfo_3_2_2 *vmms; | ||
| 815 | |||
| 816 | vmms = (struct sysinfo_3_2_2 *) alloc_page(GFP_KERNEL); | ||
| 817 | if (vmms && stsi(vmms, 3, 2, 2) == 0 && vmms->count) | ||
| 818 | add_device_randomness(&vmms, vmms->count); | ||
| 819 | free_page((unsigned long) vmms); | ||
| 820 | } | ||
| 821 | |||
| 822 | /* | ||
| 807 | * Setup function called from init/main.c just after the banner | 823 | * Setup function called from init/main.c just after the banner |
| 808 | * was printed. | 824 | * was printed. |
| 809 | */ | 825 | */ |
| @@ -901,6 +917,9 @@ void __init setup_arch(char **cmdline_p) | |||
| 901 | 917 | ||
| 902 | /* Setup zfcpdump support */ | 918 | /* Setup zfcpdump support */ |
| 903 | setup_zfcpdump(); | 919 | setup_zfcpdump(); |
| 920 | |||
| 921 | /* Add system specific data to the random pool */ | ||
| 922 | setup_randomness(); | ||
| 904 | } | 923 | } |
| 905 | 924 | ||
| 906 | #ifdef CONFIG_32BIT | 925 | #ifdef CONFIG_32BIT |
diff --git a/arch/s390/kernel/syscalls.S b/arch/s390/kernel/syscalls.S index fe5cdf29a001..6fe886ac2db5 100644 --- a/arch/s390/kernel/syscalls.S +++ b/arch/s390/kernel/syscalls.S | |||
| @@ -356,3 +356,6 @@ SYSCALL(sys_finit_module,sys_finit_module,compat_sys_finit_module) | |||
| 356 | SYSCALL(sys_sched_setattr,sys_sched_setattr,compat_sys_sched_setattr) /* 345 */ | 356 | SYSCALL(sys_sched_setattr,sys_sched_setattr,compat_sys_sched_setattr) /* 345 */ |
| 357 | SYSCALL(sys_sched_getattr,sys_sched_getattr,compat_sys_sched_getattr) | 357 | SYSCALL(sys_sched_getattr,sys_sched_getattr,compat_sys_sched_getattr) |
| 358 | SYSCALL(sys_renameat2,sys_renameat2,compat_sys_renameat2) | 358 | SYSCALL(sys_renameat2,sys_renameat2,compat_sys_renameat2) |
| 359 | SYSCALL(sys_seccomp,sys_seccomp,compat_sys_seccomp) | ||
| 360 | SYSCALL(sys_getrandom,sys_getrandom,compat_sys_getrandom) | ||
| 361 | SYSCALL(sys_memfd_create,sys_memfd_create,compat_sys_memfd_create) /* 350 */ | ||
diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig index 453fa5c09550..b319846ad97f 100644 --- a/arch/sh/Kconfig +++ b/arch/sh/Kconfig | |||
| @@ -172,6 +172,7 @@ menu "System type" | |||
| 172 | # | 172 | # |
| 173 | config CPU_SH2 | 173 | config CPU_SH2 |
| 174 | bool | 174 | bool |
| 175 | select SH_INTC | ||
| 175 | 176 | ||
| 176 | config CPU_SH2A | 177 | config CPU_SH2A |
| 177 | bool | 178 | bool |
| @@ -182,6 +183,7 @@ config CPU_SH3 | |||
| 182 | bool | 183 | bool |
| 183 | select CPU_HAS_INTEVT | 184 | select CPU_HAS_INTEVT |
| 184 | select CPU_HAS_SR_RB | 185 | select CPU_HAS_SR_RB |
| 186 | select SH_INTC | ||
| 185 | select SYS_SUPPORTS_SH_TMU | 187 | select SYS_SUPPORTS_SH_TMU |
| 186 | 188 | ||
| 187 | config CPU_SH4 | 189 | config CPU_SH4 |
| @@ -189,6 +191,7 @@ config CPU_SH4 | |||
| 189 | select CPU_HAS_INTEVT | 191 | select CPU_HAS_INTEVT |
| 190 | select CPU_HAS_SR_RB | 192 | select CPU_HAS_SR_RB |
| 191 | select CPU_HAS_FPU if !CPU_SH4AL_DSP | 193 | select CPU_HAS_FPU if !CPU_SH4AL_DSP |
| 194 | select SH_INTC | ||
| 192 | select SYS_SUPPORTS_SH_TMU | 195 | select SYS_SUPPORTS_SH_TMU |
| 193 | select SYS_SUPPORTS_HUGETLBFS if MMU | 196 | select SYS_SUPPORTS_HUGETLBFS if MMU |
| 194 | 197 | ||
diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S index 47c410d99f5d..4b0e1dfa2226 100644 --- a/arch/x86/kernel/entry_32.S +++ b/arch/x86/kernel/entry_32.S | |||
| @@ -683,7 +683,7 @@ END(syscall_badsys) | |||
| 683 | sysenter_badsys: | 683 | sysenter_badsys: |
| 684 | movl $-ENOSYS,%eax | 684 | movl $-ENOSYS,%eax |
| 685 | jmp sysenter_after_call | 685 | jmp sysenter_after_call |
| 686 | END(syscall_badsys) | 686 | END(sysenter_badsys) |
| 687 | CFI_ENDPROC | 687 | CFI_ENDPROC |
| 688 | 688 | ||
| 689 | .macro FIXUP_ESPFIX_STACK | 689 | .macro FIXUP_ESPFIX_STACK |
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c index 1fe33987de02..ee61c36d64f8 100644 --- a/arch/x86/mm/tlb.c +++ b/arch/x86/mm/tlb.c | |||
| @@ -49,7 +49,13 @@ void leave_mm(int cpu) | |||
| 49 | if (cpumask_test_cpu(cpu, mm_cpumask(active_mm))) { | 49 | if (cpumask_test_cpu(cpu, mm_cpumask(active_mm))) { |
| 50 | cpumask_clear_cpu(cpu, mm_cpumask(active_mm)); | 50 | cpumask_clear_cpu(cpu, mm_cpumask(active_mm)); |
| 51 | load_cr3(swapper_pg_dir); | 51 | load_cr3(swapper_pg_dir); |
| 52 | trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL); | 52 | /* |
| 53 | * This gets called in the idle path where RCU | ||
| 54 | * functions differently. Tracing normally | ||
| 55 | * uses RCU, so we have to call the tracepoint | ||
| 56 | * specially here. | ||
| 57 | */ | ||
| 58 | trace_tlb_flush_rcuidle(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL); | ||
| 53 | } | 59 | } |
| 54 | } | 60 | } |
| 55 | EXPORT_SYMBOL_GPL(leave_mm); | 61 | EXPORT_SYMBOL_GPL(leave_mm); |
| @@ -174,7 +180,7 @@ void flush_tlb_current_task(void) | |||
| 174 | * | 180 | * |
| 175 | * This is in units of pages. | 181 | * This is in units of pages. |
| 176 | */ | 182 | */ |
| 177 | unsigned long tlb_single_page_flush_ceiling = 33; | 183 | static unsigned long tlb_single_page_flush_ceiling __read_mostly = 33; |
| 178 | 184 | ||
| 179 | void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start, | 185 | void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start, |
| 180 | unsigned long end, unsigned long vmflag) | 186 | unsigned long end, unsigned long vmflag) |
diff --git a/drivers/ata/ahci_tegra.c b/drivers/ata/ahci_tegra.c index fc3df47fca35..f1fef74e503c 100644 --- a/drivers/ata/ahci_tegra.c +++ b/drivers/ata/ahci_tegra.c | |||
| @@ -24,8 +24,8 @@ | |||
| 24 | #include <linux/module.h> | 24 | #include <linux/module.h> |
| 25 | #include <linux/of_device.h> | 25 | #include <linux/of_device.h> |
| 26 | #include <linux/platform_device.h> | 26 | #include <linux/platform_device.h> |
| 27 | #include <linux/tegra-powergate.h> | ||
| 28 | #include <linux/regulator/consumer.h> | 27 | #include <linux/regulator/consumer.h> |
| 28 | #include <soc/tegra/pmc.h> | ||
| 29 | #include "ahci.h" | 29 | #include "ahci.h" |
| 30 | 30 | ||
| 31 | #define SATA_CONFIGURATION_0 0x180 | 31 | #define SATA_CONFIGURATION_0 0x180 |
diff --git a/drivers/ata/ahci_xgene.c b/drivers/ata/ahci_xgene.c index bc281115490b..c6962300b93c 100644 --- a/drivers/ata/ahci_xgene.c +++ b/drivers/ata/ahci_xgene.c | |||
| @@ -344,7 +344,7 @@ static struct ata_port_operations xgene_ahci_ops = { | |||
| 344 | }; | 344 | }; |
| 345 | 345 | ||
| 346 | static const struct ata_port_info xgene_ahci_port_info = { | 346 | static const struct ata_port_info xgene_ahci_port_info = { |
| 347 | .flags = AHCI_FLAG_COMMON | ATA_FLAG_NCQ, | 347 | .flags = AHCI_FLAG_COMMON, |
| 348 | .pio_mask = ATA_PIO4, | 348 | .pio_mask = ATA_PIO4, |
| 349 | .udma_mask = ATA_UDMA6, | 349 | .udma_mask = ATA_UDMA6, |
| 350 | .port_ops = &xgene_ahci_ops, | 350 | .port_ops = &xgene_ahci_ops, |
| @@ -480,7 +480,7 @@ static int xgene_ahci_probe(struct platform_device *pdev) | |||
| 480 | /* Configure the host controller */ | 480 | /* Configure the host controller */ |
| 481 | xgene_ahci_hw_init(hpriv); | 481 | xgene_ahci_hw_init(hpriv); |
| 482 | 482 | ||
| 483 | hpriv->flags = AHCI_HFLAG_NO_PMP | AHCI_HFLAG_YES_NCQ; | 483 | hpriv->flags = AHCI_HFLAG_NO_PMP | AHCI_HFLAG_NO_NCQ; |
| 484 | 484 | ||
| 485 | rc = ahci_platform_init_host(pdev, hpriv, &xgene_ahci_port_info); | 485 | rc = ahci_platform_init_host(pdev, hpriv, &xgene_ahci_port_info); |
| 486 | if (rc) | 486 | if (rc) |
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index dbdc5d32343f..f3e7b9f894cd 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c | |||
| @@ -4228,7 +4228,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = { | |||
| 4228 | { "Micron_M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM, }, | 4228 | { "Micron_M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM, }, |
| 4229 | { "Crucial_CT???M500SSD*", NULL, ATA_HORKAGE_NO_NCQ_TRIM, }, | 4229 | { "Crucial_CT???M500SSD*", NULL, ATA_HORKAGE_NO_NCQ_TRIM, }, |
| 4230 | { "Micron_M550*", NULL, ATA_HORKAGE_NO_NCQ_TRIM, }, | 4230 | { "Micron_M550*", NULL, ATA_HORKAGE_NO_NCQ_TRIM, }, |
| 4231 | { "Crucial_CT???M550SSD*", NULL, ATA_HORKAGE_NO_NCQ_TRIM, }, | 4231 | { "Crucial_CT*M550SSD*", NULL, ATA_HORKAGE_NO_NCQ_TRIM, }, |
| 4232 | 4232 | ||
| 4233 | /* | 4233 | /* |
| 4234 | * Some WD SATA-I drives spin up and down erratically when the link | 4234 | * Some WD SATA-I drives spin up and down erratically when the link |
diff --git a/drivers/ata/pata_samsung_cf.c b/drivers/ata/pata_samsung_cf.c index 2578fc16960a..1a24a5dc3940 100644 --- a/drivers/ata/pata_samsung_cf.c +++ b/drivers/ata/pata_samsung_cf.c | |||
| @@ -360,7 +360,7 @@ static int pata_s3c_wait_after_reset(struct ata_link *link, | |||
| 360 | /* | 360 | /* |
| 361 | * pata_s3c_bus_softreset - PATA device software reset | 361 | * pata_s3c_bus_softreset - PATA device software reset |
| 362 | */ | 362 | */ |
| 363 | static unsigned int pata_s3c_bus_softreset(struct ata_port *ap, | 363 | static int pata_s3c_bus_softreset(struct ata_port *ap, |
| 364 | unsigned long deadline) | 364 | unsigned long deadline) |
| 365 | { | 365 | { |
| 366 | struct ata_ioports *ioaddr = &ap->ioaddr; | 366 | struct ata_ioports *ioaddr = &ap->ioaddr; |
diff --git a/drivers/ata/pata_scc.c b/drivers/ata/pata_scc.c index 4e006d74bef8..7f4cb76ed9fa 100644 --- a/drivers/ata/pata_scc.c +++ b/drivers/ata/pata_scc.c | |||
| @@ -585,7 +585,7 @@ static int scc_wait_after_reset(struct ata_link *link, unsigned int devmask, | |||
| 585 | * Note: Original code is ata_bus_softreset(). | 585 | * Note: Original code is ata_bus_softreset(). |
| 586 | */ | 586 | */ |
| 587 | 587 | ||
| 588 | static unsigned int scc_bus_softreset(struct ata_port *ap, unsigned int devmask, | 588 | static int scc_bus_softreset(struct ata_port *ap, unsigned int devmask, |
| 589 | unsigned long deadline) | 589 | unsigned long deadline) |
| 590 | { | 590 | { |
| 591 | struct ata_ioports *ioaddr = &ap->ioaddr; | 591 | struct ata_ioports *ioaddr = &ap->ioaddr; |
| @@ -599,9 +599,7 @@ static unsigned int scc_bus_softreset(struct ata_port *ap, unsigned int devmask, | |||
| 599 | udelay(20); | 599 | udelay(20); |
| 600 | out_be32(ioaddr->ctl_addr, ap->ctl); | 600 | out_be32(ioaddr->ctl_addr, ap->ctl); |
| 601 | 601 | ||
| 602 | scc_wait_after_reset(&ap->link, devmask, deadline); | 602 | return scc_wait_after_reset(&ap->link, devmask, deadline); |
| 603 | |||
| 604 | return 0; | ||
| 605 | } | 603 | } |
| 606 | 604 | ||
| 607 | /** | 605 | /** |
| @@ -618,7 +616,8 @@ static int scc_softreset(struct ata_link *link, unsigned int *classes, | |||
| 618 | { | 616 | { |
| 619 | struct ata_port *ap = link->ap; | 617 | struct ata_port *ap = link->ap; |
| 620 | unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS; | 618 | unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS; |
| 621 | unsigned int devmask = 0, err_mask; | 619 | unsigned int devmask = 0; |
| 620 | int rc; | ||
| 622 | u8 err; | 621 | u8 err; |
| 623 | 622 | ||
| 624 | DPRINTK("ENTER\n"); | 623 | DPRINTK("ENTER\n"); |
| @@ -634,9 +633,9 @@ static int scc_softreset(struct ata_link *link, unsigned int *classes, | |||
| 634 | 633 | ||
| 635 | /* issue bus reset */ | 634 | /* issue bus reset */ |
| 636 | DPRINTK("about to softreset, devmask=%x\n", devmask); | 635 | DPRINTK("about to softreset, devmask=%x\n", devmask); |
| 637 | err_mask = scc_bus_softreset(ap, devmask, deadline); | 636 | rc = scc_bus_softreset(ap, devmask, deadline); |
| 638 | if (err_mask) { | 637 | if (rc) { |
| 639 | ata_port_err(ap, "SRST failed (err_mask=0x%x)\n", err_mask); | 638 | ata_port_err(ap, "SRST failed (err_mask=0x%x)\n", rc); |
| 640 | return -EIO; | 639 | return -EIO; |
| 641 | } | 640 | } |
| 642 | 641 | ||
diff --git a/drivers/bus/arm-ccn.c b/drivers/bus/arm-ccn.c index 3266f8ff9311..6f550d9e7a2d 100644 --- a/drivers/bus/arm-ccn.c +++ b/drivers/bus/arm-ccn.c | |||
| @@ -662,7 +662,7 @@ static int arm_ccn_pmu_event_init(struct perf_event *event) | |||
| 662 | } | 662 | } |
| 663 | if (e->num_vcs && vc >= e->num_vcs) { | 663 | if (e->num_vcs && vc >= e->num_vcs) { |
| 664 | dev_warn(ccn->dev, "Invalid vc %d for node/XP %d!\n", | 664 | dev_warn(ccn->dev, "Invalid vc %d for node/XP %d!\n", |
| 665 | port, node_xp); | 665 | vc, node_xp); |
| 666 | return -EINVAL; | 666 | return -EINVAL; |
| 667 | } | 667 | } |
| 668 | valid = 1; | 668 | valid = 1; |
diff --git a/drivers/firmware/efi/vars.c b/drivers/firmware/efi/vars.c index f0a43646a2f3..5abe943e3404 100644 --- a/drivers/firmware/efi/vars.c +++ b/drivers/firmware/efi/vars.c | |||
| @@ -481,7 +481,7 @@ EXPORT_SYMBOL_GPL(efivar_entry_remove); | |||
| 481 | */ | 481 | */ |
| 482 | static void efivar_entry_list_del_unlock(struct efivar_entry *entry) | 482 | static void efivar_entry_list_del_unlock(struct efivar_entry *entry) |
| 483 | { | 483 | { |
| 484 | WARN_ON(!spin_is_locked(&__efivars->lock)); | 484 | lockdep_assert_held(&__efivars->lock); |
| 485 | 485 | ||
| 486 | list_del(&entry->list); | 486 | list_del(&entry->list); |
| 487 | spin_unlock_irq(&__efivars->lock); | 487 | spin_unlock_irq(&__efivars->lock); |
| @@ -507,7 +507,7 @@ int __efivar_entry_delete(struct efivar_entry *entry) | |||
| 507 | const struct efivar_operations *ops = __efivars->ops; | 507 | const struct efivar_operations *ops = __efivars->ops; |
| 508 | efi_status_t status; | 508 | efi_status_t status; |
| 509 | 509 | ||
| 510 | WARN_ON(!spin_is_locked(&__efivars->lock)); | 510 | lockdep_assert_held(&__efivars->lock); |
| 511 | 511 | ||
| 512 | status = ops->set_variable(entry->var.VariableName, | 512 | status = ops->set_variable(entry->var.VariableName, |
| 513 | &entry->var.VendorGuid, | 513 | &entry->var.VendorGuid, |
| @@ -667,7 +667,7 @@ struct efivar_entry *efivar_entry_find(efi_char16_t *name, efi_guid_t guid, | |||
| 667 | int strsize1, strsize2; | 667 | int strsize1, strsize2; |
| 668 | bool found = false; | 668 | bool found = false; |
| 669 | 669 | ||
| 670 | WARN_ON(!spin_is_locked(&__efivars->lock)); | 670 | lockdep_assert_held(&__efivars->lock); |
| 671 | 671 | ||
| 672 | list_for_each_entry_safe(entry, n, head, list) { | 672 | list_for_each_entry_safe(entry, n, head, list) { |
| 673 | strsize1 = ucs2_strsize(name, 1024); | 673 | strsize1 = ucs2_strsize(name, 1024); |
| @@ -739,7 +739,7 @@ int __efivar_entry_get(struct efivar_entry *entry, u32 *attributes, | |||
| 739 | const struct efivar_operations *ops = __efivars->ops; | 739 | const struct efivar_operations *ops = __efivars->ops; |
| 740 | efi_status_t status; | 740 | efi_status_t status; |
| 741 | 741 | ||
| 742 | WARN_ON(!spin_is_locked(&__efivars->lock)); | 742 | lockdep_assert_held(&__efivars->lock); |
| 743 | 743 | ||
| 744 | status = ops->get_variable(entry->var.VariableName, | 744 | status = ops->get_variable(entry->var.VariableName, |
| 745 | &entry->var.VendorGuid, | 745 | &entry->var.VendorGuid, |
diff --git a/drivers/gpio/devres.c b/drivers/gpio/devres.c index 41b2f40578d5..954b9f6b0ef8 100644 --- a/drivers/gpio/devres.c +++ b/drivers/gpio/devres.c | |||
| @@ -90,7 +90,7 @@ struct gpio_desc *__must_check __devm_gpiod_get_index(struct device *dev, | |||
| 90 | struct gpio_desc **dr; | 90 | struct gpio_desc **dr; |
| 91 | struct gpio_desc *desc; | 91 | struct gpio_desc *desc; |
| 92 | 92 | ||
| 93 | dr = devres_alloc(devm_gpiod_release, sizeof(struct gpiod_desc *), | 93 | dr = devres_alloc(devm_gpiod_release, sizeof(struct gpio_desc *), |
| 94 | GFP_KERNEL); | 94 | GFP_KERNEL); |
| 95 | if (!dr) | 95 | if (!dr) |
| 96 | return ERR_PTR(-ENOMEM); | 96 | return ERR_PTR(-ENOMEM); |
diff --git a/drivers/gpio/gpio-lynxpoint.c b/drivers/gpio/gpio-lynxpoint.c index ff9eb911b5e4..fa945ec9ccff 100644 --- a/drivers/gpio/gpio-lynxpoint.c +++ b/drivers/gpio/gpio-lynxpoint.c | |||
| @@ -407,9 +407,27 @@ static int lp_gpio_runtime_resume(struct device *dev) | |||
| 407 | return 0; | 407 | return 0; |
| 408 | } | 408 | } |
| 409 | 409 | ||
| 410 | static int lp_gpio_resume(struct device *dev) | ||
| 411 | { | ||
| 412 | struct platform_device *pdev = to_platform_device(dev); | ||
| 413 | struct lp_gpio *lg = platform_get_drvdata(pdev); | ||
| 414 | unsigned long reg; | ||
| 415 | int i; | ||
| 416 | |||
| 417 | /* on some hardware suspend clears input sensing, re-enable it here */ | ||
| 418 | for (i = 0; i < lg->chip.ngpio; i++) { | ||
| 419 | if (gpiochip_is_requested(&lg->chip, i) != NULL) { | ||
| 420 | reg = lp_gpio_reg(&lg->chip, i, LP_CONFIG2); | ||
| 421 | outl(inl(reg) & ~GPINDIS_BIT, reg); | ||
| 422 | } | ||
| 423 | } | ||
| 424 | return 0; | ||
| 425 | } | ||
| 426 | |||
| 410 | static const struct dev_pm_ops lp_gpio_pm_ops = { | 427 | static const struct dev_pm_ops lp_gpio_pm_ops = { |
| 411 | .runtime_suspend = lp_gpio_runtime_suspend, | 428 | .runtime_suspend = lp_gpio_runtime_suspend, |
| 412 | .runtime_resume = lp_gpio_runtime_resume, | 429 | .runtime_resume = lp_gpio_runtime_resume, |
| 430 | .resume = lp_gpio_resume, | ||
| 413 | }; | 431 | }; |
| 414 | 432 | ||
| 415 | static const struct acpi_device_id lynxpoint_gpio_acpi_match[] = { | 433 | static const struct acpi_device_id lynxpoint_gpio_acpi_match[] = { |
diff --git a/drivers/gpio/gpio-zynq.c b/drivers/gpio/gpio-zynq.c index c3145f91fda3..31ad5df5dbc9 100644 --- a/drivers/gpio/gpio-zynq.c +++ b/drivers/gpio/gpio-zynq.c | |||
| @@ -95,6 +95,9 @@ struct zynq_gpio { | |||
| 95 | struct clk *clk; | 95 | struct clk *clk; |
| 96 | }; | 96 | }; |
| 97 | 97 | ||
| 98 | static struct irq_chip zynq_gpio_level_irqchip; | ||
| 99 | static struct irq_chip zynq_gpio_edge_irqchip; | ||
| 100 | |||
| 98 | /** | 101 | /** |
| 99 | * zynq_gpio_get_bank_pin - Get the bank number and pin number within that bank | 102 | * zynq_gpio_get_bank_pin - Get the bank number and pin number within that bank |
| 100 | * for a given pin in the GPIO device | 103 | * for a given pin in the GPIO device |
| @@ -410,6 +413,15 @@ static int zynq_gpio_set_irq_type(struct irq_data *irq_data, unsigned int type) | |||
| 410 | gpio->base_addr + ZYNQ_GPIO_INTPOL_OFFSET(bank_num)); | 413 | gpio->base_addr + ZYNQ_GPIO_INTPOL_OFFSET(bank_num)); |
| 411 | writel_relaxed(int_any, | 414 | writel_relaxed(int_any, |
| 412 | gpio->base_addr + ZYNQ_GPIO_INTANY_OFFSET(bank_num)); | 415 | gpio->base_addr + ZYNQ_GPIO_INTANY_OFFSET(bank_num)); |
| 416 | |||
| 417 | if (type & IRQ_TYPE_LEVEL_MASK) { | ||
| 418 | __irq_set_chip_handler_name_locked(irq_data->irq, | ||
| 419 | &zynq_gpio_level_irqchip, handle_fasteoi_irq, NULL); | ||
| 420 | } else { | ||
| 421 | __irq_set_chip_handler_name_locked(irq_data->irq, | ||
| 422 | &zynq_gpio_edge_irqchip, handle_level_irq, NULL); | ||
| 423 | } | ||
| 424 | |||
| 413 | return 0; | 425 | return 0; |
| 414 | } | 426 | } |
| 415 | 427 | ||
| @@ -424,9 +436,21 @@ static int zynq_gpio_set_wake(struct irq_data *data, unsigned int on) | |||
| 424 | } | 436 | } |
| 425 | 437 | ||
| 426 | /* irq chip descriptor */ | 438 | /* irq chip descriptor */ |
| 427 | static struct irq_chip zynq_gpio_irqchip = { | 439 | static struct irq_chip zynq_gpio_level_irqchip = { |
| 428 | .name = DRIVER_NAME, | 440 | .name = DRIVER_NAME, |
| 429 | .irq_enable = zynq_gpio_irq_enable, | 441 | .irq_enable = zynq_gpio_irq_enable, |
| 442 | .irq_eoi = zynq_gpio_irq_ack, | ||
| 443 | .irq_mask = zynq_gpio_irq_mask, | ||
| 444 | .irq_unmask = zynq_gpio_irq_unmask, | ||
| 445 | .irq_set_type = zynq_gpio_set_irq_type, | ||
| 446 | .irq_set_wake = zynq_gpio_set_wake, | ||
| 447 | .flags = IRQCHIP_EOI_THREADED | IRQCHIP_EOI_IF_HANDLED, | ||
| 448 | }; | ||
| 449 | |||
| 450 | static struct irq_chip zynq_gpio_edge_irqchip = { | ||
| 451 | .name = DRIVER_NAME, | ||
| 452 | .irq_enable = zynq_gpio_irq_enable, | ||
| 453 | .irq_ack = zynq_gpio_irq_ack, | ||
| 430 | .irq_mask = zynq_gpio_irq_mask, | 454 | .irq_mask = zynq_gpio_irq_mask, |
| 431 | .irq_unmask = zynq_gpio_irq_unmask, | 455 | .irq_unmask = zynq_gpio_irq_unmask, |
| 432 | .irq_set_type = zynq_gpio_set_irq_type, | 456 | .irq_set_type = zynq_gpio_set_irq_type, |
| @@ -469,10 +493,6 @@ static void zynq_gpio_irqhandler(unsigned int irq, struct irq_desc *desc) | |||
| 469 | offset); | 493 | offset); |
| 470 | generic_handle_irq(gpio_irq); | 494 | generic_handle_irq(gpio_irq); |
| 471 | } | 495 | } |
| 472 | |||
| 473 | /* clear IRQ in HW */ | ||
| 474 | writel_relaxed(int_sts, gpio->base_addr + | ||
| 475 | ZYNQ_GPIO_INTSTS_OFFSET(bank_num)); | ||
| 476 | } | 496 | } |
| 477 | } | 497 | } |
| 478 | 498 | ||
| @@ -610,14 +630,14 @@ static int zynq_gpio_probe(struct platform_device *pdev) | |||
| 610 | writel_relaxed(ZYNQ_GPIO_IXR_DISABLE_ALL, gpio->base_addr + | 630 | writel_relaxed(ZYNQ_GPIO_IXR_DISABLE_ALL, gpio->base_addr + |
| 611 | ZYNQ_GPIO_INTDIS_OFFSET(bank_num)); | 631 | ZYNQ_GPIO_INTDIS_OFFSET(bank_num)); |
| 612 | 632 | ||
| 613 | ret = gpiochip_irqchip_add(chip, &zynq_gpio_irqchip, 0, | 633 | ret = gpiochip_irqchip_add(chip, &zynq_gpio_edge_irqchip, 0, |
| 614 | handle_simple_irq, IRQ_TYPE_NONE); | 634 | handle_level_irq, IRQ_TYPE_NONE); |
| 615 | if (ret) { | 635 | if (ret) { |
| 616 | dev_err(&pdev->dev, "Failed to add irq chip\n"); | 636 | dev_err(&pdev->dev, "Failed to add irq chip\n"); |
| 617 | goto err_rm_gpiochip; | 637 | goto err_rm_gpiochip; |
| 618 | } | 638 | } |
| 619 | 639 | ||
| 620 | gpiochip_set_chained_irqchip(chip, &zynq_gpio_irqchip, irq, | 640 | gpiochip_set_chained_irqchip(chip, &zynq_gpio_edge_irqchip, irq, |
| 621 | zynq_gpio_irqhandler); | 641 | zynq_gpio_irqhandler); |
| 622 | 642 | ||
| 623 | pm_runtime_set_active(&pdev->dev); | 643 | pm_runtime_set_active(&pdev->dev); |
diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c index 7cfdc2278905..604dbe60bdee 100644 --- a/drivers/gpio/gpiolib-of.c +++ b/drivers/gpio/gpiolib-of.c | |||
| @@ -307,7 +307,5 @@ void of_gpiochip_add(struct gpio_chip *chip) | |||
| 307 | void of_gpiochip_remove(struct gpio_chip *chip) | 307 | void of_gpiochip_remove(struct gpio_chip *chip) |
| 308 | { | 308 | { |
| 309 | gpiochip_remove_pin_ranges(chip); | 309 | gpiochip_remove_pin_ranges(chip); |
| 310 | 310 | of_node_put(chip->of_node); | |
| 311 | if (chip->of_node) | ||
| 312 | of_node_put(chip->of_node); | ||
| 313 | } | 311 | } |
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index ec96f9a9724c..e27cdbe9d524 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c | |||
| @@ -494,6 +494,36 @@ bool i915_semaphore_is_enabled(struct drm_device *dev) | |||
| 494 | return true; | 494 | return true; |
| 495 | } | 495 | } |
| 496 | 496 | ||
| 497 | void intel_hpd_cancel_work(struct drm_i915_private *dev_priv) | ||
| 498 | { | ||
| 499 | spin_lock_irq(&dev_priv->irq_lock); | ||
| 500 | |||
| 501 | dev_priv->long_hpd_port_mask = 0; | ||
| 502 | dev_priv->short_hpd_port_mask = 0; | ||
| 503 | dev_priv->hpd_event_bits = 0; | ||
| 504 | |||
| 505 | spin_unlock_irq(&dev_priv->irq_lock); | ||
| 506 | |||
| 507 | cancel_work_sync(&dev_priv->dig_port_work); | ||
| 508 | cancel_work_sync(&dev_priv->hotplug_work); | ||
| 509 | cancel_delayed_work_sync(&dev_priv->hotplug_reenable_work); | ||
| 510 | } | ||
| 511 | |||
| 512 | static void intel_suspend_encoders(struct drm_i915_private *dev_priv) | ||
| 513 | { | ||
| 514 | struct drm_device *dev = dev_priv->dev; | ||
| 515 | struct drm_encoder *encoder; | ||
| 516 | |||
| 517 | drm_modeset_lock_all(dev); | ||
| 518 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { | ||
| 519 | struct intel_encoder *intel_encoder = to_intel_encoder(encoder); | ||
| 520 | |||
| 521 | if (intel_encoder->suspend) | ||
| 522 | intel_encoder->suspend(intel_encoder); | ||
| 523 | } | ||
| 524 | drm_modeset_unlock_all(dev); | ||
| 525 | } | ||
| 526 | |||
| 497 | static int i915_drm_freeze(struct drm_device *dev) | 527 | static int i915_drm_freeze(struct drm_device *dev) |
| 498 | { | 528 | { |
| 499 | struct drm_i915_private *dev_priv = dev->dev_private; | 529 | struct drm_i915_private *dev_priv = dev->dev_private; |
| @@ -538,6 +568,9 @@ static int i915_drm_freeze(struct drm_device *dev) | |||
| 538 | flush_delayed_work(&dev_priv->rps.delayed_resume_work); | 568 | flush_delayed_work(&dev_priv->rps.delayed_resume_work); |
| 539 | 569 | ||
| 540 | intel_runtime_pm_disable_interrupts(dev); | 570 | intel_runtime_pm_disable_interrupts(dev); |
| 571 | intel_hpd_cancel_work(dev_priv); | ||
| 572 | |||
| 573 | intel_suspend_encoders(dev_priv); | ||
| 541 | 574 | ||
| 542 | intel_suspend_gt_powersave(dev); | 575 | intel_suspend_gt_powersave(dev); |
| 543 | 576 | ||
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 4412f6a4383b..7a830eac5ba3 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h | |||
| @@ -1458,7 +1458,7 @@ struct drm_i915_private { | |||
| 1458 | } hpd_mark; | 1458 | } hpd_mark; |
| 1459 | } hpd_stats[HPD_NUM_PINS]; | 1459 | } hpd_stats[HPD_NUM_PINS]; |
| 1460 | u32 hpd_event_bits; | 1460 | u32 hpd_event_bits; |
| 1461 | struct timer_list hotplug_reenable_timer; | 1461 | struct delayed_work hotplug_reenable_work; |
| 1462 | 1462 | ||
| 1463 | struct i915_fbc fbc; | 1463 | struct i915_fbc fbc; |
| 1464 | struct i915_drrs drrs; | 1464 | struct i915_drrs drrs; |
| @@ -2178,6 +2178,7 @@ extern unsigned long i915_mch_val(struct drm_i915_private *dev_priv); | |||
| 2178 | extern unsigned long i915_gfx_val(struct drm_i915_private *dev_priv); | 2178 | extern unsigned long i915_gfx_val(struct drm_i915_private *dev_priv); |
| 2179 | extern void i915_update_gfx_val(struct drm_i915_private *dev_priv); | 2179 | extern void i915_update_gfx_val(struct drm_i915_private *dev_priv); |
| 2180 | int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool on); | 2180 | int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool on); |
| 2181 | void intel_hpd_cancel_work(struct drm_i915_private *dev_priv); | ||
| 2181 | 2182 | ||
| 2182 | extern void intel_console_resume(struct work_struct *work); | 2183 | extern void intel_console_resume(struct work_struct *work); |
| 2183 | 2184 | ||
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 390ccc2a3096..0050ee9470f1 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c | |||
| @@ -1189,8 +1189,8 @@ static void i915_hotplug_work_func(struct work_struct *work) | |||
| 1189 | * some connectors */ | 1189 | * some connectors */ |
| 1190 | if (hpd_disabled) { | 1190 | if (hpd_disabled) { |
| 1191 | drm_kms_helper_poll_enable(dev); | 1191 | drm_kms_helper_poll_enable(dev); |
| 1192 | mod_timer(&dev_priv->hotplug_reenable_timer, | 1192 | mod_delayed_work(system_wq, &dev_priv->hotplug_reenable_work, |
| 1193 | jiffies + msecs_to_jiffies(I915_REENABLE_HOTPLUG_DELAY)); | 1193 | msecs_to_jiffies(I915_REENABLE_HOTPLUG_DELAY)); |
| 1194 | } | 1194 | } |
| 1195 | 1195 | ||
| 1196 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); | 1196 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); |
| @@ -1213,11 +1213,6 @@ static void i915_hotplug_work_func(struct work_struct *work) | |||
| 1213 | drm_kms_helper_hotplug_event(dev); | 1213 | drm_kms_helper_hotplug_event(dev); |
| 1214 | } | 1214 | } |
| 1215 | 1215 | ||
| 1216 | static void intel_hpd_irq_uninstall(struct drm_i915_private *dev_priv) | ||
| 1217 | { | ||
| 1218 | del_timer_sync(&dev_priv->hotplug_reenable_timer); | ||
| 1219 | } | ||
| 1220 | |||
| 1221 | static void ironlake_rps_change_irq_handler(struct drm_device *dev) | 1216 | static void ironlake_rps_change_irq_handler(struct drm_device *dev) |
| 1222 | { | 1217 | { |
| 1223 | struct drm_i915_private *dev_priv = dev->dev_private; | 1218 | struct drm_i915_private *dev_priv = dev->dev_private; |
| @@ -3892,8 +3887,6 @@ static void gen8_irq_uninstall(struct drm_device *dev) | |||
| 3892 | if (!dev_priv) | 3887 | if (!dev_priv) |
| 3893 | return; | 3888 | return; |
| 3894 | 3889 | ||
| 3895 | intel_hpd_irq_uninstall(dev_priv); | ||
| 3896 | |||
| 3897 | gen8_irq_reset(dev); | 3890 | gen8_irq_reset(dev); |
| 3898 | } | 3891 | } |
| 3899 | 3892 | ||
| @@ -3908,8 +3901,6 @@ static void valleyview_irq_uninstall(struct drm_device *dev) | |||
| 3908 | 3901 | ||
| 3909 | I915_WRITE(VLV_MASTER_IER, 0); | 3902 | I915_WRITE(VLV_MASTER_IER, 0); |
| 3910 | 3903 | ||
| 3911 | intel_hpd_irq_uninstall(dev_priv); | ||
| 3912 | |||
| 3913 | for_each_pipe(pipe) | 3904 | for_each_pipe(pipe) |
| 3914 | I915_WRITE(PIPESTAT(pipe), 0xffff); | 3905 | I915_WRITE(PIPESTAT(pipe), 0xffff); |
| 3915 | 3906 | ||
| @@ -3988,8 +3979,6 @@ static void ironlake_irq_uninstall(struct drm_device *dev) | |||
| 3988 | if (!dev_priv) | 3979 | if (!dev_priv) |
| 3989 | return; | 3980 | return; |
| 3990 | 3981 | ||
| 3991 | intel_hpd_irq_uninstall(dev_priv); | ||
| 3992 | |||
| 3993 | ironlake_irq_reset(dev); | 3982 | ironlake_irq_reset(dev); |
| 3994 | } | 3983 | } |
| 3995 | 3984 | ||
| @@ -4360,8 +4349,6 @@ static void i915_irq_uninstall(struct drm_device * dev) | |||
| 4360 | struct drm_i915_private *dev_priv = dev->dev_private; | 4349 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 4361 | int pipe; | 4350 | int pipe; |
| 4362 | 4351 | ||
| 4363 | intel_hpd_irq_uninstall(dev_priv); | ||
| 4364 | |||
| 4365 | if (I915_HAS_HOTPLUG(dev)) { | 4352 | if (I915_HAS_HOTPLUG(dev)) { |
| 4366 | I915_WRITE(PORT_HOTPLUG_EN, 0); | 4353 | I915_WRITE(PORT_HOTPLUG_EN, 0); |
| 4367 | I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); | 4354 | I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); |
| @@ -4598,8 +4585,6 @@ static void i965_irq_uninstall(struct drm_device * dev) | |||
| 4598 | if (!dev_priv) | 4585 | if (!dev_priv) |
| 4599 | return; | 4586 | return; |
| 4600 | 4587 | ||
| 4601 | intel_hpd_irq_uninstall(dev_priv); | ||
| 4602 | |||
| 4603 | I915_WRITE(PORT_HOTPLUG_EN, 0); | 4588 | I915_WRITE(PORT_HOTPLUG_EN, 0); |
| 4604 | I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); | 4589 | I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); |
| 4605 | 4590 | ||
| @@ -4615,14 +4600,18 @@ static void i965_irq_uninstall(struct drm_device * dev) | |||
| 4615 | I915_WRITE(IIR, I915_READ(IIR)); | 4600 | I915_WRITE(IIR, I915_READ(IIR)); |
| 4616 | } | 4601 | } |
| 4617 | 4602 | ||
| 4618 | static void intel_hpd_irq_reenable(unsigned long data) | 4603 | static void intel_hpd_irq_reenable(struct work_struct *work) |
| 4619 | { | 4604 | { |
| 4620 | struct drm_i915_private *dev_priv = (struct drm_i915_private *)data; | 4605 | struct drm_i915_private *dev_priv = |
| 4606 | container_of(work, typeof(*dev_priv), | ||
| 4607 | hotplug_reenable_work.work); | ||
| 4621 | struct drm_device *dev = dev_priv->dev; | 4608 | struct drm_device *dev = dev_priv->dev; |
| 4622 | struct drm_mode_config *mode_config = &dev->mode_config; | 4609 | struct drm_mode_config *mode_config = &dev->mode_config; |
| 4623 | unsigned long irqflags; | 4610 | unsigned long irqflags; |
| 4624 | int i; | 4611 | int i; |
| 4625 | 4612 | ||
| 4613 | intel_runtime_pm_get(dev_priv); | ||
| 4614 | |||
| 4626 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); | 4615 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); |
| 4627 | for (i = (HPD_NONE + 1); i < HPD_NUM_PINS; i++) { | 4616 | for (i = (HPD_NONE + 1); i < HPD_NUM_PINS; i++) { |
| 4628 | struct drm_connector *connector; | 4617 | struct drm_connector *connector; |
| @@ -4648,6 +4637,8 @@ static void intel_hpd_irq_reenable(unsigned long data) | |||
| 4648 | if (dev_priv->display.hpd_irq_setup) | 4637 | if (dev_priv->display.hpd_irq_setup) |
| 4649 | dev_priv->display.hpd_irq_setup(dev); | 4638 | dev_priv->display.hpd_irq_setup(dev); |
| 4650 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); | 4639 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); |
| 4640 | |||
| 4641 | intel_runtime_pm_put(dev_priv); | ||
| 4651 | } | 4642 | } |
| 4652 | 4643 | ||
| 4653 | void intel_irq_init(struct drm_device *dev) | 4644 | void intel_irq_init(struct drm_device *dev) |
| @@ -4670,8 +4661,8 @@ void intel_irq_init(struct drm_device *dev) | |||
| 4670 | setup_timer(&dev_priv->gpu_error.hangcheck_timer, | 4661 | setup_timer(&dev_priv->gpu_error.hangcheck_timer, |
| 4671 | i915_hangcheck_elapsed, | 4662 | i915_hangcheck_elapsed, |
| 4672 | (unsigned long) dev); | 4663 | (unsigned long) dev); |
| 4673 | setup_timer(&dev_priv->hotplug_reenable_timer, intel_hpd_irq_reenable, | 4664 | INIT_DELAYED_WORK(&dev_priv->hotplug_reenable_work, |
| 4674 | (unsigned long) dev_priv); | 4665 | intel_hpd_irq_reenable); |
| 4675 | 4666 | ||
| 4676 | pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE); | 4667 | pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE); |
| 4677 | 4668 | ||
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c index 2efaf8e8d9c4..e8abfce40976 100644 --- a/drivers/gpu/drm/i915/intel_crt.c +++ b/drivers/gpu/drm/i915/intel_crt.c | |||
| @@ -699,16 +699,21 @@ intel_crt_detect(struct drm_connector *connector, bool force) | |||
| 699 | goto out; | 699 | goto out; |
| 700 | } | 700 | } |
| 701 | 701 | ||
| 702 | drm_modeset_acquire_init(&ctx, 0); | ||
| 703 | |||
| 702 | /* for pre-945g platforms use load detect */ | 704 | /* for pre-945g platforms use load detect */ |
| 703 | if (intel_get_load_detect_pipe(connector, NULL, &tmp, &ctx)) { | 705 | if (intel_get_load_detect_pipe(connector, NULL, &tmp, &ctx)) { |
| 704 | if (intel_crt_detect_ddc(connector)) | 706 | if (intel_crt_detect_ddc(connector)) |
| 705 | status = connector_status_connected; | 707 | status = connector_status_connected; |
| 706 | else | 708 | else |
| 707 | status = intel_crt_load_detect(crt); | 709 | status = intel_crt_load_detect(crt); |
| 708 | intel_release_load_detect_pipe(connector, &tmp, &ctx); | 710 | intel_release_load_detect_pipe(connector, &tmp); |
| 709 | } else | 711 | } else |
| 710 | status = connector_status_unknown; | 712 | status = connector_status_unknown; |
| 711 | 713 | ||
| 714 | drm_modeset_drop_locks(&ctx); | ||
| 715 | drm_modeset_acquire_fini(&ctx); | ||
| 716 | |||
| 712 | out: | 717 | out: |
| 713 | intel_display_power_put(dev_priv, power_domain); | 718 | intel_display_power_put(dev_priv, power_domain); |
| 714 | return status; | 719 | return status; |
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 018fb7222f60..d074d704f458 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c | |||
| @@ -8462,8 +8462,6 @@ bool intel_get_load_detect_pipe(struct drm_connector *connector, | |||
| 8462 | connector->base.id, connector->name, | 8462 | connector->base.id, connector->name, |
| 8463 | encoder->base.id, encoder->name); | 8463 | encoder->base.id, encoder->name); |
| 8464 | 8464 | ||
| 8465 | drm_modeset_acquire_init(ctx, 0); | ||
| 8466 | |||
| 8467 | retry: | 8465 | retry: |
| 8468 | ret = drm_modeset_lock(&config->connection_mutex, ctx); | 8466 | ret = drm_modeset_lock(&config->connection_mutex, ctx); |
| 8469 | if (ret) | 8467 | if (ret) |
| @@ -8502,10 +8500,14 @@ retry: | |||
| 8502 | i++; | 8500 | i++; |
| 8503 | if (!(encoder->possible_crtcs & (1 << i))) | 8501 | if (!(encoder->possible_crtcs & (1 << i))) |
| 8504 | continue; | 8502 | continue; |
| 8505 | if (!possible_crtc->enabled) { | 8503 | if (possible_crtc->enabled) |
| 8506 | crtc = possible_crtc; | 8504 | continue; |
| 8507 | break; | 8505 | /* This can occur when applying the pipe A quirk on resume. */ |
| 8508 | } | 8506 | if (to_intel_crtc(possible_crtc)->new_enabled) |
| 8507 | continue; | ||
| 8508 | |||
| 8509 | crtc = possible_crtc; | ||
| 8510 | break; | ||
| 8509 | } | 8511 | } |
| 8510 | 8512 | ||
| 8511 | /* | 8513 | /* |
| @@ -8574,15 +8576,11 @@ fail_unlock: | |||
| 8574 | goto retry; | 8576 | goto retry; |
| 8575 | } | 8577 | } |
| 8576 | 8578 | ||
| 8577 | drm_modeset_drop_locks(ctx); | ||
| 8578 | drm_modeset_acquire_fini(ctx); | ||
| 8579 | |||
| 8580 | return false; | 8579 | return false; |
| 8581 | } | 8580 | } |
| 8582 | 8581 | ||
| 8583 | void intel_release_load_detect_pipe(struct drm_connector *connector, | 8582 | void intel_release_load_detect_pipe(struct drm_connector *connector, |
| 8584 | struct intel_load_detect_pipe *old, | 8583 | struct intel_load_detect_pipe *old) |
| 8585 | struct drm_modeset_acquire_ctx *ctx) | ||
| 8586 | { | 8584 | { |
| 8587 | struct intel_encoder *intel_encoder = | 8585 | struct intel_encoder *intel_encoder = |
| 8588 | intel_attached_encoder(connector); | 8586 | intel_attached_encoder(connector); |
| @@ -8606,17 +8604,12 @@ void intel_release_load_detect_pipe(struct drm_connector *connector, | |||
| 8606 | drm_framebuffer_unreference(old->release_fb); | 8604 | drm_framebuffer_unreference(old->release_fb); |
| 8607 | } | 8605 | } |
| 8608 | 8606 | ||
| 8609 | goto unlock; | ||
| 8610 | return; | 8607 | return; |
| 8611 | } | 8608 | } |
| 8612 | 8609 | ||
| 8613 | /* Switch crtc and encoder back off if necessary */ | 8610 | /* Switch crtc and encoder back off if necessary */ |
| 8614 | if (old->dpms_mode != DRM_MODE_DPMS_ON) | 8611 | if (old->dpms_mode != DRM_MODE_DPMS_ON) |
| 8615 | connector->funcs->dpms(connector, old->dpms_mode); | 8612 | connector->funcs->dpms(connector, old->dpms_mode); |
| 8616 | |||
| 8617 | unlock: | ||
| 8618 | drm_modeset_drop_locks(ctx); | ||
| 8619 | drm_modeset_acquire_fini(ctx); | ||
| 8620 | } | 8613 | } |
| 8621 | 8614 | ||
| 8622 | static int i9xx_pll_refclk(struct drm_device *dev, | 8615 | static int i9xx_pll_refclk(struct drm_device *dev, |
| @@ -11700,8 +11693,8 @@ intel_cursor_plane_update(struct drm_plane *plane, struct drm_crtc *crtc, | |||
| 11700 | }; | 11693 | }; |
| 11701 | const struct drm_rect clip = { | 11694 | const struct drm_rect clip = { |
| 11702 | /* integer pixels */ | 11695 | /* integer pixels */ |
| 11703 | .x2 = intel_crtc->config.pipe_src_w, | 11696 | .x2 = intel_crtc->active ? intel_crtc->config.pipe_src_w : 0, |
| 11704 | .y2 = intel_crtc->config.pipe_src_h, | 11697 | .y2 = intel_crtc->active ? intel_crtc->config.pipe_src_h : 0, |
| 11705 | }; | 11698 | }; |
| 11706 | bool visible; | 11699 | bool visible; |
| 11707 | int ret; | 11700 | int ret; |
| @@ -12659,7 +12652,7 @@ static void intel_enable_pipe_a(struct drm_device *dev) | |||
| 12659 | struct intel_connector *connector; | 12652 | struct intel_connector *connector; |
| 12660 | struct drm_connector *crt = NULL; | 12653 | struct drm_connector *crt = NULL; |
| 12661 | struct intel_load_detect_pipe load_detect_temp; | 12654 | struct intel_load_detect_pipe load_detect_temp; |
| 12662 | struct drm_modeset_acquire_ctx ctx; | 12655 | struct drm_modeset_acquire_ctx *ctx = dev->mode_config.acquire_ctx; |
| 12663 | 12656 | ||
| 12664 | /* We can't just switch on the pipe A, we need to set things up with a | 12657 | /* We can't just switch on the pipe A, we need to set things up with a |
| 12665 | * proper mode and output configuration. As a gross hack, enable pipe A | 12658 | * proper mode and output configuration. As a gross hack, enable pipe A |
| @@ -12676,10 +12669,8 @@ static void intel_enable_pipe_a(struct drm_device *dev) | |||
| 12676 | if (!crt) | 12669 | if (!crt) |
| 12677 | return; | 12670 | return; |
| 12678 | 12671 | ||
| 12679 | if (intel_get_load_detect_pipe(crt, NULL, &load_detect_temp, &ctx)) | 12672 | if (intel_get_load_detect_pipe(crt, NULL, &load_detect_temp, ctx)) |
| 12680 | intel_release_load_detect_pipe(crt, &load_detect_temp, &ctx); | 12673 | intel_release_load_detect_pipe(crt, &load_detect_temp); |
| 12681 | |||
| 12682 | |||
| 12683 | } | 12674 | } |
| 12684 | 12675 | ||
| 12685 | static bool | 12676 | static bool |
| @@ -13112,7 +13103,7 @@ void intel_modeset_cleanup(struct drm_device *dev) | |||
| 13112 | * experience fancy races otherwise. | 13103 | * experience fancy races otherwise. |
| 13113 | */ | 13104 | */ |
| 13114 | drm_irq_uninstall(dev); | 13105 | drm_irq_uninstall(dev); |
| 13115 | cancel_work_sync(&dev_priv->hotplug_work); | 13106 | intel_hpd_cancel_work(dev_priv); |
| 13116 | dev_priv->pm._irqs_disabled = true; | 13107 | dev_priv->pm._irqs_disabled = true; |
| 13117 | 13108 | ||
| 13118 | /* | 13109 | /* |
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index ee3942f0b068..67cfed6d911a 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c | |||
| @@ -3553,6 +3553,9 @@ intel_dp_check_link_status(struct intel_dp *intel_dp) | |||
| 3553 | if (WARN_ON(!intel_encoder->base.crtc)) | 3553 | if (WARN_ON(!intel_encoder->base.crtc)) |
| 3554 | return; | 3554 | return; |
| 3555 | 3555 | ||
| 3556 | if (!to_intel_crtc(intel_encoder->base.crtc)->active) | ||
| 3557 | return; | ||
| 3558 | |||
| 3556 | /* Try to read receiver status if the link appears to be up */ | 3559 | /* Try to read receiver status if the link appears to be up */ |
| 3557 | if (!intel_dp_get_link_status(intel_dp, link_status)) { | 3560 | if (!intel_dp_get_link_status(intel_dp, link_status)) { |
| 3558 | return; | 3561 | return; |
| @@ -4003,6 +4006,16 @@ void intel_dp_encoder_destroy(struct drm_encoder *encoder) | |||
| 4003 | kfree(intel_dig_port); | 4006 | kfree(intel_dig_port); |
| 4004 | } | 4007 | } |
| 4005 | 4008 | ||
| 4009 | static void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder) | ||
| 4010 | { | ||
| 4011 | struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base); | ||
| 4012 | |||
| 4013 | if (!is_edp(intel_dp)) | ||
| 4014 | return; | ||
| 4015 | |||
| 4016 | edp_panel_vdd_off_sync(intel_dp); | ||
| 4017 | } | ||
| 4018 | |||
| 4006 | static void intel_dp_encoder_reset(struct drm_encoder *encoder) | 4019 | static void intel_dp_encoder_reset(struct drm_encoder *encoder) |
| 4007 | { | 4020 | { |
| 4008 | intel_edp_panel_vdd_sanitize(to_intel_encoder(encoder)); | 4021 | intel_edp_panel_vdd_sanitize(to_intel_encoder(encoder)); |
| @@ -4037,15 +4050,21 @@ bool | |||
| 4037 | intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd) | 4050 | intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd) |
| 4038 | { | 4051 | { |
| 4039 | struct intel_dp *intel_dp = &intel_dig_port->dp; | 4052 | struct intel_dp *intel_dp = &intel_dig_port->dp; |
| 4053 | struct intel_encoder *intel_encoder = &intel_dig_port->base; | ||
| 4040 | struct drm_device *dev = intel_dig_port->base.base.dev; | 4054 | struct drm_device *dev = intel_dig_port->base.base.dev; |
| 4041 | struct drm_i915_private *dev_priv = dev->dev_private; | 4055 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 4042 | int ret; | 4056 | enum intel_display_power_domain power_domain; |
| 4057 | bool ret = true; | ||
| 4058 | |||
| 4043 | if (intel_dig_port->base.type != INTEL_OUTPUT_EDP) | 4059 | if (intel_dig_port->base.type != INTEL_OUTPUT_EDP) |
| 4044 | intel_dig_port->base.type = INTEL_OUTPUT_DISPLAYPORT; | 4060 | intel_dig_port->base.type = INTEL_OUTPUT_DISPLAYPORT; |
| 4045 | 4061 | ||
| 4046 | DRM_DEBUG_KMS("got hpd irq on port %d - %s\n", intel_dig_port->port, | 4062 | DRM_DEBUG_KMS("got hpd irq on port %d - %s\n", intel_dig_port->port, |
| 4047 | long_hpd ? "long" : "short"); | 4063 | long_hpd ? "long" : "short"); |
| 4048 | 4064 | ||
| 4065 | power_domain = intel_display_port_power_domain(intel_encoder); | ||
| 4066 | intel_display_power_get(dev_priv, power_domain); | ||
| 4067 | |||
| 4049 | if (long_hpd) { | 4068 | if (long_hpd) { |
| 4050 | if (!ibx_digital_port_connected(dev_priv, intel_dig_port)) | 4069 | if (!ibx_digital_port_connected(dev_priv, intel_dig_port)) |
| 4051 | goto mst_fail; | 4070 | goto mst_fail; |
| @@ -4061,8 +4080,7 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd) | |||
| 4061 | 4080 | ||
| 4062 | } else { | 4081 | } else { |
| 4063 | if (intel_dp->is_mst) { | 4082 | if (intel_dp->is_mst) { |
| 4064 | ret = intel_dp_check_mst_status(intel_dp); | 4083 | if (intel_dp_check_mst_status(intel_dp) == -EINVAL) |
| 4065 | if (ret == -EINVAL) | ||
| 4066 | goto mst_fail; | 4084 | goto mst_fail; |
| 4067 | } | 4085 | } |
| 4068 | 4086 | ||
| @@ -4076,7 +4094,8 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd) | |||
| 4076 | drm_modeset_unlock(&dev->mode_config.connection_mutex); | 4094 | drm_modeset_unlock(&dev->mode_config.connection_mutex); |
| 4077 | } | 4095 | } |
| 4078 | } | 4096 | } |
| 4079 | return false; | 4097 | ret = false; |
| 4098 | goto put_power; | ||
| 4080 | mst_fail: | 4099 | mst_fail: |
| 4081 | /* if we were in MST mode, and device is not there get out of MST mode */ | 4100 | /* if we were in MST mode, and device is not there get out of MST mode */ |
| 4082 | if (intel_dp->is_mst) { | 4101 | if (intel_dp->is_mst) { |
| @@ -4084,7 +4103,10 @@ mst_fail: | |||
| 4084 | intel_dp->is_mst = false; | 4103 | intel_dp->is_mst = false; |
| 4085 | drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst); | 4104 | drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst); |
| 4086 | } | 4105 | } |
| 4087 | return true; | 4106 | put_power: |
| 4107 | intel_display_power_put(dev_priv, power_domain); | ||
| 4108 | |||
| 4109 | return ret; | ||
| 4088 | } | 4110 | } |
| 4089 | 4111 | ||
| 4090 | /* Return which DP Port should be selected for Transcoder DP control */ | 4112 | /* Return which DP Port should be selected for Transcoder DP control */ |
| @@ -4722,6 +4744,7 @@ intel_dp_init(struct drm_device *dev, int output_reg, enum port port) | |||
| 4722 | intel_encoder->disable = intel_disable_dp; | 4744 | intel_encoder->disable = intel_disable_dp; |
| 4723 | intel_encoder->get_hw_state = intel_dp_get_hw_state; | 4745 | intel_encoder->get_hw_state = intel_dp_get_hw_state; |
| 4724 | intel_encoder->get_config = intel_dp_get_config; | 4746 | intel_encoder->get_config = intel_dp_get_config; |
| 4747 | intel_encoder->suspend = intel_dp_encoder_suspend; | ||
| 4725 | if (IS_CHERRYVIEW(dev)) { | 4748 | if (IS_CHERRYVIEW(dev)) { |
| 4726 | intel_encoder->pre_pll_enable = chv_dp_pre_pll_enable; | 4749 | intel_encoder->pre_pll_enable = chv_dp_pre_pll_enable; |
| 4727 | intel_encoder->pre_enable = chv_pre_enable_dp; | 4750 | intel_encoder->pre_enable = chv_pre_enable_dp; |
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 4b2664bd5b81..b8c8bbd8e5f9 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h | |||
| @@ -153,6 +153,12 @@ struct intel_encoder { | |||
| 153 | * be set correctly before calling this function. */ | 153 | * be set correctly before calling this function. */ |
| 154 | void (*get_config)(struct intel_encoder *, | 154 | void (*get_config)(struct intel_encoder *, |
| 155 | struct intel_crtc_config *pipe_config); | 155 | struct intel_crtc_config *pipe_config); |
| 156 | /* | ||
| 157 | * Called during system suspend after all pending requests for the | ||
| 158 | * encoder are flushed (for example for DP AUX transactions) and | ||
| 159 | * device interrupts are disabled. | ||
| 160 | */ | ||
| 161 | void (*suspend)(struct intel_encoder *); | ||
| 156 | int crtc_mask; | 162 | int crtc_mask; |
| 157 | enum hpd_pin hpd_pin; | 163 | enum hpd_pin hpd_pin; |
| 158 | }; | 164 | }; |
| @@ -830,8 +836,7 @@ bool intel_get_load_detect_pipe(struct drm_connector *connector, | |||
| 830 | struct intel_load_detect_pipe *old, | 836 | struct intel_load_detect_pipe *old, |
| 831 | struct drm_modeset_acquire_ctx *ctx); | 837 | struct drm_modeset_acquire_ctx *ctx); |
| 832 | void intel_release_load_detect_pipe(struct drm_connector *connector, | 838 | void intel_release_load_detect_pipe(struct drm_connector *connector, |
| 833 | struct intel_load_detect_pipe *old, | 839 | struct intel_load_detect_pipe *old); |
| 834 | struct drm_modeset_acquire_ctx *ctx); | ||
| 835 | int intel_pin_and_fence_fb_obj(struct drm_device *dev, | 840 | int intel_pin_and_fence_fb_obj(struct drm_device *dev, |
| 836 | struct drm_i915_gem_object *obj, | 841 | struct drm_i915_gem_object *obj, |
| 837 | struct intel_engine_cs *pipelined); | 842 | struct intel_engine_cs *pipelined); |
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c index e211eef4b7e4..32186a656816 100644 --- a/drivers/gpu/drm/i915/intel_tv.c +++ b/drivers/gpu/drm/i915/intel_tv.c | |||
| @@ -1323,11 +1323,16 @@ intel_tv_detect(struct drm_connector *connector, bool force) | |||
| 1323 | struct intel_load_detect_pipe tmp; | 1323 | struct intel_load_detect_pipe tmp; |
| 1324 | struct drm_modeset_acquire_ctx ctx; | 1324 | struct drm_modeset_acquire_ctx ctx; |
| 1325 | 1325 | ||
| 1326 | drm_modeset_acquire_init(&ctx, 0); | ||
| 1327 | |||
| 1326 | if (intel_get_load_detect_pipe(connector, &mode, &tmp, &ctx)) { | 1328 | if (intel_get_load_detect_pipe(connector, &mode, &tmp, &ctx)) { |
| 1327 | type = intel_tv_detect_type(intel_tv, connector); | 1329 | type = intel_tv_detect_type(intel_tv, connector); |
| 1328 | intel_release_load_detect_pipe(connector, &tmp, &ctx); | 1330 | intel_release_load_detect_pipe(connector, &tmp); |
| 1329 | } else | 1331 | } else |
| 1330 | return connector_status_unknown; | 1332 | return connector_status_unknown; |
| 1333 | |||
| 1334 | drm_modeset_drop_locks(&ctx); | ||
| 1335 | drm_modeset_acquire_fini(&ctx); | ||
| 1331 | } else | 1336 | } else |
| 1332 | return connector->status; | 1337 | return connector->status; |
| 1333 | 1338 | ||
diff --git a/drivers/gpu/drm/radeon/Makefile b/drivers/gpu/drm/radeon/Makefile index 0013ad0db9ef..f77b7135ee4c 100644 --- a/drivers/gpu/drm/radeon/Makefile +++ b/drivers/gpu/drm/radeon/Makefile | |||
| @@ -76,7 +76,7 @@ radeon-y += radeon_device.o radeon_asic.o radeon_kms.o \ | |||
| 76 | evergreen.o evergreen_cs.o evergreen_blit_shaders.o \ | 76 | evergreen.o evergreen_cs.o evergreen_blit_shaders.o \ |
| 77 | evergreen_hdmi.o radeon_trace_points.o ni.o cayman_blit_shaders.o \ | 77 | evergreen_hdmi.o radeon_trace_points.o ni.o cayman_blit_shaders.o \ |
| 78 | atombios_encoders.o radeon_semaphore.o radeon_sa.o atombios_i2c.o si.o \ | 78 | atombios_encoders.o radeon_semaphore.o radeon_sa.o atombios_i2c.o si.o \ |
| 79 | si_blit_shaders.o radeon_prime.o radeon_uvd.o cik.o cik_blit_shaders.o \ | 79 | si_blit_shaders.o radeon_prime.o cik.o cik_blit_shaders.o \ |
| 80 | r600_dpm.o rs780_dpm.o rv6xx_dpm.o rv770_dpm.o rv730_dpm.o rv740_dpm.o \ | 80 | r600_dpm.o rs780_dpm.o rv6xx_dpm.o rv770_dpm.o rv730_dpm.o rv740_dpm.o \ |
| 81 | rv770_smc.o cypress_dpm.o btc_dpm.o sumo_dpm.o sumo_smc.o trinity_dpm.o \ | 81 | rv770_smc.o cypress_dpm.o btc_dpm.o sumo_dpm.o sumo_smc.o trinity_dpm.o \ |
| 82 | trinity_smc.o ni_dpm.o si_smc.o si_dpm.o kv_smc.o kv_dpm.o ci_smc.o \ | 82 | trinity_smc.o ni_dpm.o si_smc.o si_dpm.o kv_smc.o kv_dpm.o ci_smc.o \ |
diff --git a/drivers/gpu/drm/radeon/ci_dpm.c b/drivers/gpu/drm/radeon/ci_dpm.c index 022561e28707..d416bb2ff48d 100644 --- a/drivers/gpu/drm/radeon/ci_dpm.c +++ b/drivers/gpu/drm/radeon/ci_dpm.c | |||
| @@ -869,6 +869,9 @@ static int ci_set_thermal_temperature_range(struct radeon_device *rdev, | |||
| 869 | WREG32_SMC(CG_THERMAL_CTRL, tmp); | 869 | WREG32_SMC(CG_THERMAL_CTRL, tmp); |
| 870 | #endif | 870 | #endif |
| 871 | 871 | ||
| 872 | rdev->pm.dpm.thermal.min_temp = low_temp; | ||
| 873 | rdev->pm.dpm.thermal.max_temp = high_temp; | ||
| 874 | |||
| 872 | return 0; | 875 | return 0; |
| 873 | } | 876 | } |
| 874 | 877 | ||
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c index b625646bf3e2..79a5a5519bd6 100644 --- a/drivers/gpu/drm/radeon/cik.c +++ b/drivers/gpu/drm/radeon/cik.c | |||
| @@ -3483,7 +3483,7 @@ static void cik_gpu_init(struct radeon_device *rdev) | |||
| 3483 | u32 mc_shared_chmap, mc_arb_ramcfg; | 3483 | u32 mc_shared_chmap, mc_arb_ramcfg; |
| 3484 | u32 hdp_host_path_cntl; | 3484 | u32 hdp_host_path_cntl; |
| 3485 | u32 tmp; | 3485 | u32 tmp; |
| 3486 | int i, j, k; | 3486 | int i, j; |
| 3487 | 3487 | ||
| 3488 | switch (rdev->family) { | 3488 | switch (rdev->family) { |
| 3489 | case CHIP_BONAIRE: | 3489 | case CHIP_BONAIRE: |
| @@ -3544,6 +3544,7 @@ static void cik_gpu_init(struct radeon_device *rdev) | |||
| 3544 | (rdev->pdev->device == 0x130B) || | 3544 | (rdev->pdev->device == 0x130B) || |
| 3545 | (rdev->pdev->device == 0x130E) || | 3545 | (rdev->pdev->device == 0x130E) || |
| 3546 | (rdev->pdev->device == 0x1315) || | 3546 | (rdev->pdev->device == 0x1315) || |
| 3547 | (rdev->pdev->device == 0x1318) || | ||
| 3547 | (rdev->pdev->device == 0x131B)) { | 3548 | (rdev->pdev->device == 0x131B)) { |
| 3548 | rdev->config.cik.max_cu_per_sh = 4; | 3549 | rdev->config.cik.max_cu_per_sh = 4; |
| 3549 | rdev->config.cik.max_backends_per_se = 1; | 3550 | rdev->config.cik.max_backends_per_se = 1; |
| @@ -3672,12 +3673,11 @@ static void cik_gpu_init(struct radeon_device *rdev) | |||
| 3672 | rdev->config.cik.max_sh_per_se, | 3673 | rdev->config.cik.max_sh_per_se, |
| 3673 | rdev->config.cik.max_backends_per_se); | 3674 | rdev->config.cik.max_backends_per_se); |
| 3674 | 3675 | ||
| 3676 | rdev->config.cik.active_cus = 0; | ||
| 3675 | for (i = 0; i < rdev->config.cik.max_shader_engines; i++) { | 3677 | for (i = 0; i < rdev->config.cik.max_shader_engines; i++) { |
| 3676 | for (j = 0; j < rdev->config.cik.max_sh_per_se; j++) { | 3678 | for (j = 0; j < rdev->config.cik.max_sh_per_se; j++) { |
| 3677 | for (k = 0; k < rdev->config.cik.max_cu_per_sh; k++) { | 3679 | rdev->config.cik.active_cus += |
| 3678 | rdev->config.cik.active_cus += | 3680 | hweight32(cik_get_cu_active_bitmap(rdev, i, j)); |
| 3679 | hweight32(cik_get_cu_active_bitmap(rdev, i, j)); | ||
| 3680 | } | ||
| 3681 | } | 3681 | } |
| 3682 | } | 3682 | } |
| 3683 | 3683 | ||
| @@ -3801,7 +3801,7 @@ int cik_ring_test(struct radeon_device *rdev, struct radeon_ring *ring) | |||
| 3801 | radeon_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1)); | 3801 | radeon_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1)); |
| 3802 | radeon_ring_write(ring, ((scratch - PACKET3_SET_UCONFIG_REG_START) >> 2)); | 3802 | radeon_ring_write(ring, ((scratch - PACKET3_SET_UCONFIG_REG_START) >> 2)); |
| 3803 | radeon_ring_write(ring, 0xDEADBEEF); | 3803 | radeon_ring_write(ring, 0xDEADBEEF); |
| 3804 | radeon_ring_unlock_commit(rdev, ring); | 3804 | radeon_ring_unlock_commit(rdev, ring, false); |
| 3805 | 3805 | ||
| 3806 | for (i = 0; i < rdev->usec_timeout; i++) { | 3806 | for (i = 0; i < rdev->usec_timeout; i++) { |
| 3807 | tmp = RREG32(scratch); | 3807 | tmp = RREG32(scratch); |
| @@ -3920,6 +3920,17 @@ void cik_fence_compute_ring_emit(struct radeon_device *rdev, | |||
| 3920 | radeon_ring_write(ring, 0); | 3920 | radeon_ring_write(ring, 0); |
| 3921 | } | 3921 | } |
| 3922 | 3922 | ||
| 3923 | /** | ||
| 3924 | * cik_semaphore_ring_emit - emit a semaphore on the CP ring | ||
| 3925 | * | ||
| 3926 | * @rdev: radeon_device pointer | ||
| 3927 | * @ring: radeon ring buffer object | ||
| 3928 | * @semaphore: radeon semaphore object | ||
| 3929 | * @emit_wait: Is this a sempahore wait? | ||
| 3930 | * | ||
| 3931 | * Emits a semaphore signal/wait packet to the CP ring and prevents the PFP | ||
| 3932 | * from running ahead of semaphore waits. | ||
| 3933 | */ | ||
| 3923 | bool cik_semaphore_ring_emit(struct radeon_device *rdev, | 3934 | bool cik_semaphore_ring_emit(struct radeon_device *rdev, |
| 3924 | struct radeon_ring *ring, | 3935 | struct radeon_ring *ring, |
| 3925 | struct radeon_semaphore *semaphore, | 3936 | struct radeon_semaphore *semaphore, |
| @@ -3932,6 +3943,12 @@ bool cik_semaphore_ring_emit(struct radeon_device *rdev, | |||
| 3932 | radeon_ring_write(ring, lower_32_bits(addr)); | 3943 | radeon_ring_write(ring, lower_32_bits(addr)); |
| 3933 | radeon_ring_write(ring, (upper_32_bits(addr) & 0xffff) | sel); | 3944 | radeon_ring_write(ring, (upper_32_bits(addr) & 0xffff) | sel); |
| 3934 | 3945 | ||
| 3946 | if (emit_wait && ring->idx == RADEON_RING_TYPE_GFX_INDEX) { | ||
| 3947 | /* Prevent the PFP from running ahead of the semaphore wait */ | ||
| 3948 | radeon_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0)); | ||
| 3949 | radeon_ring_write(ring, 0x0); | ||
| 3950 | } | ||
| 3951 | |||
| 3935 | return true; | 3952 | return true; |
| 3936 | } | 3953 | } |
| 3937 | 3954 | ||
| @@ -4004,7 +4021,7 @@ int cik_copy_cpdma(struct radeon_device *rdev, | |||
| 4004 | return r; | 4021 | return r; |
| 4005 | } | 4022 | } |
| 4006 | 4023 | ||
| 4007 | radeon_ring_unlock_commit(rdev, ring); | 4024 | radeon_ring_unlock_commit(rdev, ring, false); |
| 4008 | radeon_semaphore_free(rdev, &sem, *fence); | 4025 | radeon_semaphore_free(rdev, &sem, *fence); |
| 4009 | 4026 | ||
| 4010 | return r; | 4027 | return r; |
| @@ -4103,7 +4120,7 @@ int cik_ib_test(struct radeon_device *rdev, struct radeon_ring *ring) | |||
| 4103 | ib.ptr[1] = ((scratch - PACKET3_SET_UCONFIG_REG_START) >> 2); | 4120 | ib.ptr[1] = ((scratch - PACKET3_SET_UCONFIG_REG_START) >> 2); |
| 4104 | ib.ptr[2] = 0xDEADBEEF; | 4121 | ib.ptr[2] = 0xDEADBEEF; |
| 4105 | ib.length_dw = 3; | 4122 | ib.length_dw = 3; |
| 4106 | r = radeon_ib_schedule(rdev, &ib, NULL); | 4123 | r = radeon_ib_schedule(rdev, &ib, NULL, false); |
| 4107 | if (r) { | 4124 | if (r) { |
| 4108 | radeon_scratch_free(rdev, scratch); | 4125 | radeon_scratch_free(rdev, scratch); |
| 4109 | radeon_ib_free(rdev, &ib); | 4126 | radeon_ib_free(rdev, &ib); |
| @@ -4324,7 +4341,7 @@ static int cik_cp_gfx_start(struct radeon_device *rdev) | |||
| 4324 | radeon_ring_write(ring, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */ | 4341 | radeon_ring_write(ring, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */ |
| 4325 | radeon_ring_write(ring, 0x00000010); /* VGT_OUT_DEALLOC_CNTL */ | 4342 | radeon_ring_write(ring, 0x00000010); /* VGT_OUT_DEALLOC_CNTL */ |
| 4326 | 4343 | ||
| 4327 | radeon_ring_unlock_commit(rdev, ring); | 4344 | radeon_ring_unlock_commit(rdev, ring, false); |
| 4328 | 4345 | ||
| 4329 | return 0; | 4346 | return 0; |
| 4330 | } | 4347 | } |
| @@ -5958,14 +5975,14 @@ void cik_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm) | |||
| 5958 | 5975 | ||
| 5959 | /* update SH_MEM_* regs */ | 5976 | /* update SH_MEM_* regs */ |
| 5960 | radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); | 5977 | radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); |
| 5961 | radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) | | 5978 | radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) | |
| 5962 | WRITE_DATA_DST_SEL(0))); | 5979 | WRITE_DATA_DST_SEL(0))); |
| 5963 | radeon_ring_write(ring, SRBM_GFX_CNTL >> 2); | 5980 | radeon_ring_write(ring, SRBM_GFX_CNTL >> 2); |
| 5964 | radeon_ring_write(ring, 0); | 5981 | radeon_ring_write(ring, 0); |
| 5965 | radeon_ring_write(ring, VMID(vm->id)); | 5982 | radeon_ring_write(ring, VMID(vm->id)); |
| 5966 | 5983 | ||
| 5967 | radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 6)); | 5984 | radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 6)); |
| 5968 | radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) | | 5985 | radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) | |
| 5969 | WRITE_DATA_DST_SEL(0))); | 5986 | WRITE_DATA_DST_SEL(0))); |
| 5970 | radeon_ring_write(ring, SH_MEM_BASES >> 2); | 5987 | radeon_ring_write(ring, SH_MEM_BASES >> 2); |
| 5971 | radeon_ring_write(ring, 0); | 5988 | radeon_ring_write(ring, 0); |
| @@ -5976,7 +5993,7 @@ void cik_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm) | |||
| 5976 | radeon_ring_write(ring, 0); /* SH_MEM_APE1_LIMIT */ | 5993 | radeon_ring_write(ring, 0); /* SH_MEM_APE1_LIMIT */ |
| 5977 | 5994 | ||
| 5978 | radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); | 5995 | radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); |
| 5979 | radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) | | 5996 | radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) | |
| 5980 | WRITE_DATA_DST_SEL(0))); | 5997 | WRITE_DATA_DST_SEL(0))); |
| 5981 | radeon_ring_write(ring, SRBM_GFX_CNTL >> 2); | 5998 | radeon_ring_write(ring, SRBM_GFX_CNTL >> 2); |
| 5982 | radeon_ring_write(ring, 0); | 5999 | radeon_ring_write(ring, 0); |
| @@ -5987,7 +6004,7 @@ void cik_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm) | |||
| 5987 | 6004 | ||
| 5988 | /* bits 0-15 are the VM contexts0-15 */ | 6005 | /* bits 0-15 are the VM contexts0-15 */ |
| 5989 | radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); | 6006 | radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); |
| 5990 | radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) | | 6007 | radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) | |
| 5991 | WRITE_DATA_DST_SEL(0))); | 6008 | WRITE_DATA_DST_SEL(0))); |
| 5992 | radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2); | 6009 | radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2); |
| 5993 | radeon_ring_write(ring, 0); | 6010 | radeon_ring_write(ring, 0); |
diff --git a/drivers/gpu/drm/radeon/cik_sdma.c b/drivers/gpu/drm/radeon/cik_sdma.c index bcf480510ac2..192278bc993c 100644 --- a/drivers/gpu/drm/radeon/cik_sdma.c +++ b/drivers/gpu/drm/radeon/cik_sdma.c | |||
| @@ -596,7 +596,7 @@ int cik_copy_dma(struct radeon_device *rdev, | |||
| 596 | return r; | 596 | return r; |
| 597 | } | 597 | } |
| 598 | 598 | ||
| 599 | radeon_ring_unlock_commit(rdev, ring); | 599 | radeon_ring_unlock_commit(rdev, ring, false); |
| 600 | radeon_semaphore_free(rdev, &sem, *fence); | 600 | radeon_semaphore_free(rdev, &sem, *fence); |
| 601 | 601 | ||
| 602 | return r; | 602 | return r; |
| @@ -638,7 +638,7 @@ int cik_sdma_ring_test(struct radeon_device *rdev, | |||
| 638 | radeon_ring_write(ring, upper_32_bits(rdev->vram_scratch.gpu_addr)); | 638 | radeon_ring_write(ring, upper_32_bits(rdev->vram_scratch.gpu_addr)); |
| 639 | radeon_ring_write(ring, 1); /* number of DWs to follow */ | 639 | radeon_ring_write(ring, 1); /* number of DWs to follow */ |
| 640 | radeon_ring_write(ring, 0xDEADBEEF); | 640 | radeon_ring_write(ring, 0xDEADBEEF); |
| 641 | radeon_ring_unlock_commit(rdev, ring); | 641 | radeon_ring_unlock_commit(rdev, ring, false); |
| 642 | 642 | ||
| 643 | for (i = 0; i < rdev->usec_timeout; i++) { | 643 | for (i = 0; i < rdev->usec_timeout; i++) { |
| 644 | tmp = readl(ptr); | 644 | tmp = readl(ptr); |
| @@ -695,7 +695,7 @@ int cik_sdma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring) | |||
| 695 | ib.ptr[4] = 0xDEADBEEF; | 695 | ib.ptr[4] = 0xDEADBEEF; |
| 696 | ib.length_dw = 5; | 696 | ib.length_dw = 5; |
| 697 | 697 | ||
| 698 | r = radeon_ib_schedule(rdev, &ib, NULL); | 698 | r = radeon_ib_schedule(rdev, &ib, NULL, false); |
| 699 | if (r) { | 699 | if (r) { |
| 700 | radeon_ib_free(rdev, &ib); | 700 | radeon_ib_free(rdev, &ib); |
| 701 | DRM_ERROR("radeon: failed to schedule ib (%d).\n", r); | 701 | DRM_ERROR("radeon: failed to schedule ib (%d).\n", r); |
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index 4fedd14e670a..dbca60c7d097 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c | |||
| @@ -2869,7 +2869,7 @@ static int evergreen_cp_start(struct radeon_device *rdev) | |||
| 2869 | radeon_ring_write(ring, PACKET3_ME_INITIALIZE_DEVICE_ID(1)); | 2869 | radeon_ring_write(ring, PACKET3_ME_INITIALIZE_DEVICE_ID(1)); |
| 2870 | radeon_ring_write(ring, 0); | 2870 | radeon_ring_write(ring, 0); |
| 2871 | radeon_ring_write(ring, 0); | 2871 | radeon_ring_write(ring, 0); |
| 2872 | radeon_ring_unlock_commit(rdev, ring); | 2872 | radeon_ring_unlock_commit(rdev, ring, false); |
| 2873 | 2873 | ||
| 2874 | cp_me = 0xff; | 2874 | cp_me = 0xff; |
| 2875 | WREG32(CP_ME_CNTL, cp_me); | 2875 | WREG32(CP_ME_CNTL, cp_me); |
| @@ -2912,7 +2912,7 @@ static int evergreen_cp_start(struct radeon_device *rdev) | |||
| 2912 | radeon_ring_write(ring, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */ | 2912 | radeon_ring_write(ring, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */ |
| 2913 | radeon_ring_write(ring, 0x00000010); /* */ | 2913 | radeon_ring_write(ring, 0x00000010); /* */ |
| 2914 | 2914 | ||
| 2915 | radeon_ring_unlock_commit(rdev, ring); | 2915 | radeon_ring_unlock_commit(rdev, ring, false); |
| 2916 | 2916 | ||
| 2917 | return 0; | 2917 | return 0; |
| 2918 | } | 2918 | } |
diff --git a/drivers/gpu/drm/radeon/evergreen_dma.c b/drivers/gpu/drm/radeon/evergreen_dma.c index 478caefe0fef..afaba388c36d 100644 --- a/drivers/gpu/drm/radeon/evergreen_dma.c +++ b/drivers/gpu/drm/radeon/evergreen_dma.c | |||
| @@ -155,7 +155,7 @@ int evergreen_copy_dma(struct radeon_device *rdev, | |||
| 155 | return r; | 155 | return r; |
| 156 | } | 156 | } |
| 157 | 157 | ||
| 158 | radeon_ring_unlock_commit(rdev, ring); | 158 | radeon_ring_unlock_commit(rdev, ring, false); |
| 159 | radeon_semaphore_free(rdev, &sem, *fence); | 159 | radeon_semaphore_free(rdev, &sem, *fence); |
| 160 | 160 | ||
| 161 | return r; | 161 | return r; |
diff --git a/drivers/gpu/drm/radeon/kv_dpm.c b/drivers/gpu/drm/radeon/kv_dpm.c index 9ef8c38f2d66..8b58e11b64fa 100644 --- a/drivers/gpu/drm/radeon/kv_dpm.c +++ b/drivers/gpu/drm/radeon/kv_dpm.c | |||
| @@ -1438,14 +1438,14 @@ static int kv_update_uvd_dpm(struct radeon_device *rdev, bool gate) | |||
| 1438 | return kv_enable_uvd_dpm(rdev, !gate); | 1438 | return kv_enable_uvd_dpm(rdev, !gate); |
| 1439 | } | 1439 | } |
| 1440 | 1440 | ||
| 1441 | static u8 kv_get_vce_boot_level(struct radeon_device *rdev) | 1441 | static u8 kv_get_vce_boot_level(struct radeon_device *rdev, u32 evclk) |
| 1442 | { | 1442 | { |
| 1443 | u8 i; | 1443 | u8 i; |
| 1444 | struct radeon_vce_clock_voltage_dependency_table *table = | 1444 | struct radeon_vce_clock_voltage_dependency_table *table = |
| 1445 | &rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table; | 1445 | &rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table; |
| 1446 | 1446 | ||
| 1447 | for (i = 0; i < table->count; i++) { | 1447 | for (i = 0; i < table->count; i++) { |
| 1448 | if (table->entries[i].evclk >= 0) /* XXX */ | 1448 | if (table->entries[i].evclk >= evclk) |
| 1449 | break; | 1449 | break; |
| 1450 | } | 1450 | } |
| 1451 | 1451 | ||
| @@ -1468,7 +1468,7 @@ static int kv_update_vce_dpm(struct radeon_device *rdev, | |||
| 1468 | if (pi->caps_stable_p_state) | 1468 | if (pi->caps_stable_p_state) |
| 1469 | pi->vce_boot_level = table->count - 1; | 1469 | pi->vce_boot_level = table->count - 1; |
| 1470 | else | 1470 | else |
| 1471 | pi->vce_boot_level = kv_get_vce_boot_level(rdev); | 1471 | pi->vce_boot_level = kv_get_vce_boot_level(rdev, radeon_new_state->evclk); |
| 1472 | 1472 | ||
| 1473 | ret = kv_copy_bytes_to_smc(rdev, | 1473 | ret = kv_copy_bytes_to_smc(rdev, |
| 1474 | pi->dpm_table_start + | 1474 | pi->dpm_table_start + |
| @@ -2726,7 +2726,10 @@ int kv_dpm_init(struct radeon_device *rdev) | |||
| 2726 | pi->caps_sclk_ds = true; | 2726 | pi->caps_sclk_ds = true; |
| 2727 | pi->enable_auto_thermal_throttling = true; | 2727 | pi->enable_auto_thermal_throttling = true; |
| 2728 | pi->disable_nb_ps3_in_battery = false; | 2728 | pi->disable_nb_ps3_in_battery = false; |
| 2729 | pi->bapm_enable = true; | 2729 | if (radeon_bapm == 0) |
| 2730 | pi->bapm_enable = false; | ||
| 2731 | else | ||
| 2732 | pi->bapm_enable = true; | ||
| 2730 | pi->voltage_drop_t = 0; | 2733 | pi->voltage_drop_t = 0; |
| 2731 | pi->caps_sclk_throttle_low_notification = false; | 2734 | pi->caps_sclk_throttle_low_notification = false; |
| 2732 | pi->caps_fps = false; /* true? */ | 2735 | pi->caps_fps = false; /* true? */ |
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c index 327b85f7fd0d..ba89375f197f 100644 --- a/drivers/gpu/drm/radeon/ni.c +++ b/drivers/gpu/drm/radeon/ni.c | |||
| @@ -1505,7 +1505,7 @@ static int cayman_cp_start(struct radeon_device *rdev) | |||
| 1505 | radeon_ring_write(ring, PACKET3_ME_INITIALIZE_DEVICE_ID(1)); | 1505 | radeon_ring_write(ring, PACKET3_ME_INITIALIZE_DEVICE_ID(1)); |
| 1506 | radeon_ring_write(ring, 0); | 1506 | radeon_ring_write(ring, 0); |
| 1507 | radeon_ring_write(ring, 0); | 1507 | radeon_ring_write(ring, 0); |
| 1508 | radeon_ring_unlock_commit(rdev, ring); | 1508 | radeon_ring_unlock_commit(rdev, ring, false); |
| 1509 | 1509 | ||
| 1510 | cayman_cp_enable(rdev, true); | 1510 | cayman_cp_enable(rdev, true); |
| 1511 | 1511 | ||
| @@ -1547,7 +1547,7 @@ static int cayman_cp_start(struct radeon_device *rdev) | |||
| 1547 | radeon_ring_write(ring, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */ | 1547 | radeon_ring_write(ring, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */ |
| 1548 | radeon_ring_write(ring, 0x00000010); /* */ | 1548 | radeon_ring_write(ring, 0x00000010); /* */ |
| 1549 | 1549 | ||
| 1550 | radeon_ring_unlock_commit(rdev, ring); | 1550 | radeon_ring_unlock_commit(rdev, ring, false); |
| 1551 | 1551 | ||
| 1552 | /* XXX init other rings */ | 1552 | /* XXX init other rings */ |
| 1553 | 1553 | ||
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c index 04b5940b8923..4c5ec44ff328 100644 --- a/drivers/gpu/drm/radeon/r100.c +++ b/drivers/gpu/drm/radeon/r100.c | |||
| @@ -925,7 +925,7 @@ int r100_copy_blit(struct radeon_device *rdev, | |||
| 925 | if (fence) { | 925 | if (fence) { |
| 926 | r = radeon_fence_emit(rdev, fence, RADEON_RING_TYPE_GFX_INDEX); | 926 | r = radeon_fence_emit(rdev, fence, RADEON_RING_TYPE_GFX_INDEX); |
| 927 | } | 927 | } |
| 928 | radeon_ring_unlock_commit(rdev, ring); | 928 | radeon_ring_unlock_commit(rdev, ring, false); |
| 929 | return r; | 929 | return r; |
| 930 | } | 930 | } |
| 931 | 931 | ||
| @@ -958,7 +958,7 @@ void r100_ring_start(struct radeon_device *rdev, struct radeon_ring *ring) | |||
| 958 | RADEON_ISYNC_ANY3D_IDLE2D | | 958 | RADEON_ISYNC_ANY3D_IDLE2D | |
| 959 | RADEON_ISYNC_WAIT_IDLEGUI | | 959 | RADEON_ISYNC_WAIT_IDLEGUI | |
| 960 | RADEON_ISYNC_CPSCRATCH_IDLEGUI); | 960 | RADEON_ISYNC_CPSCRATCH_IDLEGUI); |
| 961 | radeon_ring_unlock_commit(rdev, ring); | 961 | radeon_ring_unlock_commit(rdev, ring, false); |
| 962 | } | 962 | } |
| 963 | 963 | ||
| 964 | 964 | ||
| @@ -3638,7 +3638,7 @@ int r100_ring_test(struct radeon_device *rdev, struct radeon_ring *ring) | |||
| 3638 | } | 3638 | } |
| 3639 | radeon_ring_write(ring, PACKET0(scratch, 0)); | 3639 | radeon_ring_write(ring, PACKET0(scratch, 0)); |
| 3640 | radeon_ring_write(ring, 0xDEADBEEF); | 3640 | radeon_ring_write(ring, 0xDEADBEEF); |
| 3641 | radeon_ring_unlock_commit(rdev, ring); | 3641 | radeon_ring_unlock_commit(rdev, ring, false); |
| 3642 | for (i = 0; i < rdev->usec_timeout; i++) { | 3642 | for (i = 0; i < rdev->usec_timeout; i++) { |
| 3643 | tmp = RREG32(scratch); | 3643 | tmp = RREG32(scratch); |
| 3644 | if (tmp == 0xDEADBEEF) { | 3644 | if (tmp == 0xDEADBEEF) { |
| @@ -3700,7 +3700,7 @@ int r100_ib_test(struct radeon_device *rdev, struct radeon_ring *ring) | |||
| 3700 | ib.ptr[6] = PACKET2(0); | 3700 | ib.ptr[6] = PACKET2(0); |
| 3701 | ib.ptr[7] = PACKET2(0); | 3701 | ib.ptr[7] = PACKET2(0); |
| 3702 | ib.length_dw = 8; | 3702 | ib.length_dw = 8; |
| 3703 | r = radeon_ib_schedule(rdev, &ib, NULL); | 3703 | r = radeon_ib_schedule(rdev, &ib, NULL, false); |
| 3704 | if (r) { | 3704 | if (r) { |
| 3705 | DRM_ERROR("radeon: failed to schedule ib (%d).\n", r); | 3705 | DRM_ERROR("radeon: failed to schedule ib (%d).\n", r); |
| 3706 | goto free_ib; | 3706 | goto free_ib; |
diff --git a/drivers/gpu/drm/radeon/r200.c b/drivers/gpu/drm/radeon/r200.c index 58f0473aa73f..67780374a652 100644 --- a/drivers/gpu/drm/radeon/r200.c +++ b/drivers/gpu/drm/radeon/r200.c | |||
| @@ -121,7 +121,7 @@ int r200_copy_dma(struct radeon_device *rdev, | |||
| 121 | if (fence) { | 121 | if (fence) { |
| 122 | r = radeon_fence_emit(rdev, fence, RADEON_RING_TYPE_GFX_INDEX); | 122 | r = radeon_fence_emit(rdev, fence, RADEON_RING_TYPE_GFX_INDEX); |
| 123 | } | 123 | } |
| 124 | radeon_ring_unlock_commit(rdev, ring); | 124 | radeon_ring_unlock_commit(rdev, ring, false); |
| 125 | return r; | 125 | return r; |
| 126 | } | 126 | } |
| 127 | 127 | ||
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c index 75b30338c226..1bc4704034ce 100644 --- a/drivers/gpu/drm/radeon/r300.c +++ b/drivers/gpu/drm/radeon/r300.c | |||
| @@ -295,7 +295,7 @@ void r300_ring_start(struct radeon_device *rdev, struct radeon_ring *ring) | |||
| 295 | radeon_ring_write(ring, | 295 | radeon_ring_write(ring, |
| 296 | R300_GEOMETRY_ROUND_NEAREST | | 296 | R300_GEOMETRY_ROUND_NEAREST | |
| 297 | R300_COLOR_ROUND_NEAREST); | 297 | R300_COLOR_ROUND_NEAREST); |
| 298 | radeon_ring_unlock_commit(rdev, ring); | 298 | radeon_ring_unlock_commit(rdev, ring, false); |
| 299 | } | 299 | } |
| 300 | 300 | ||
| 301 | static void r300_errata(struct radeon_device *rdev) | 301 | static void r300_errata(struct radeon_device *rdev) |
diff --git a/drivers/gpu/drm/radeon/r420.c b/drivers/gpu/drm/radeon/r420.c index 802b19220a21..2828605aef3f 100644 --- a/drivers/gpu/drm/radeon/r420.c +++ b/drivers/gpu/drm/radeon/r420.c | |||
| @@ -219,7 +219,7 @@ static void r420_cp_errata_init(struct radeon_device *rdev) | |||
| 219 | radeon_ring_write(ring, PACKET0(R300_CP_RESYNC_ADDR, 1)); | 219 | radeon_ring_write(ring, PACKET0(R300_CP_RESYNC_ADDR, 1)); |
| 220 | radeon_ring_write(ring, rdev->config.r300.resync_scratch); | 220 | radeon_ring_write(ring, rdev->config.r300.resync_scratch); |
| 221 | radeon_ring_write(ring, 0xDEADBEEF); | 221 | radeon_ring_write(ring, 0xDEADBEEF); |
| 222 | radeon_ring_unlock_commit(rdev, ring); | 222 | radeon_ring_unlock_commit(rdev, ring, false); |
| 223 | } | 223 | } |
| 224 | 224 | ||
| 225 | static void r420_cp_errata_fini(struct radeon_device *rdev) | 225 | static void r420_cp_errata_fini(struct radeon_device *rdev) |
| @@ -232,7 +232,7 @@ static void r420_cp_errata_fini(struct radeon_device *rdev) | |||
| 232 | radeon_ring_lock(rdev, ring, 8); | 232 | radeon_ring_lock(rdev, ring, 8); |
| 233 | radeon_ring_write(ring, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0)); | 233 | radeon_ring_write(ring, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0)); |
| 234 | radeon_ring_write(ring, R300_RB3D_DC_FINISH); | 234 | radeon_ring_write(ring, R300_RB3D_DC_FINISH); |
| 235 | radeon_ring_unlock_commit(rdev, ring); | 235 | radeon_ring_unlock_commit(rdev, ring, false); |
| 236 | radeon_scratch_free(rdev, rdev->config.r300.resync_scratch); | 236 | radeon_scratch_free(rdev, rdev->config.r300.resync_scratch); |
| 237 | } | 237 | } |
| 238 | 238 | ||
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index c70a504d96af..e8bf0ea2dade 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c | |||
| @@ -2547,7 +2547,7 @@ int r600_cp_start(struct radeon_device *rdev) | |||
| 2547 | radeon_ring_write(ring, PACKET3_ME_INITIALIZE_DEVICE_ID(1)); | 2547 | radeon_ring_write(ring, PACKET3_ME_INITIALIZE_DEVICE_ID(1)); |
| 2548 | radeon_ring_write(ring, 0); | 2548 | radeon_ring_write(ring, 0); |
| 2549 | radeon_ring_write(ring, 0); | 2549 | radeon_ring_write(ring, 0); |
| 2550 | radeon_ring_unlock_commit(rdev, ring); | 2550 | radeon_ring_unlock_commit(rdev, ring, false); |
| 2551 | 2551 | ||
| 2552 | cp_me = 0xff; | 2552 | cp_me = 0xff; |
| 2553 | WREG32(R_0086D8_CP_ME_CNTL, cp_me); | 2553 | WREG32(R_0086D8_CP_ME_CNTL, cp_me); |
| @@ -2683,7 +2683,7 @@ int r600_ring_test(struct radeon_device *rdev, struct radeon_ring *ring) | |||
| 2683 | radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1)); | 2683 | radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1)); |
| 2684 | radeon_ring_write(ring, ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2)); | 2684 | radeon_ring_write(ring, ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2)); |
| 2685 | radeon_ring_write(ring, 0xDEADBEEF); | 2685 | radeon_ring_write(ring, 0xDEADBEEF); |
| 2686 | radeon_ring_unlock_commit(rdev, ring); | 2686 | radeon_ring_unlock_commit(rdev, ring, false); |
| 2687 | for (i = 0; i < rdev->usec_timeout; i++) { | 2687 | for (i = 0; i < rdev->usec_timeout; i++) { |
| 2688 | tmp = RREG32(scratch); | 2688 | tmp = RREG32(scratch); |
| 2689 | if (tmp == 0xDEADBEEF) | 2689 | if (tmp == 0xDEADBEEF) |
| @@ -2753,6 +2753,17 @@ void r600_fence_ring_emit(struct radeon_device *rdev, | |||
| 2753 | } | 2753 | } |
| 2754 | } | 2754 | } |
| 2755 | 2755 | ||
| 2756 | /** | ||
| 2757 | * r600_semaphore_ring_emit - emit a semaphore on the CP ring | ||
| 2758 | * | ||
| 2759 | * @rdev: radeon_device pointer | ||
| 2760 | * @ring: radeon ring buffer object | ||
| 2761 | * @semaphore: radeon semaphore object | ||
| 2762 | * @emit_wait: Is this a sempahore wait? | ||
| 2763 | * | ||
| 2764 | * Emits a semaphore signal/wait packet to the CP ring and prevents the PFP | ||
| 2765 | * from running ahead of semaphore waits. | ||
| 2766 | */ | ||
| 2756 | bool r600_semaphore_ring_emit(struct radeon_device *rdev, | 2767 | bool r600_semaphore_ring_emit(struct radeon_device *rdev, |
| 2757 | struct radeon_ring *ring, | 2768 | struct radeon_ring *ring, |
| 2758 | struct radeon_semaphore *semaphore, | 2769 | struct radeon_semaphore *semaphore, |
| @@ -2768,6 +2779,13 @@ bool r600_semaphore_ring_emit(struct radeon_device *rdev, | |||
| 2768 | radeon_ring_write(ring, lower_32_bits(addr)); | 2779 | radeon_ring_write(ring, lower_32_bits(addr)); |
| 2769 | radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | sel); | 2780 | radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | sel); |
| 2770 | 2781 | ||
| 2782 | /* PFP_SYNC_ME packet only exists on 7xx+ */ | ||
| 2783 | if (emit_wait && (rdev->family >= CHIP_RV770)) { | ||
| 2784 | /* Prevent the PFP from running ahead of the semaphore wait */ | ||
| 2785 | radeon_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0)); | ||
| 2786 | radeon_ring_write(ring, 0x0); | ||
| 2787 | } | ||
| 2788 | |||
| 2771 | return true; | 2789 | return true; |
| 2772 | } | 2790 | } |
| 2773 | 2791 | ||
| @@ -2845,7 +2863,7 @@ int r600_copy_cpdma(struct radeon_device *rdev, | |||
| 2845 | return r; | 2863 | return r; |
| 2846 | } | 2864 | } |
| 2847 | 2865 | ||
| 2848 | radeon_ring_unlock_commit(rdev, ring); | 2866 | radeon_ring_unlock_commit(rdev, ring, false); |
| 2849 | radeon_semaphore_free(rdev, &sem, *fence); | 2867 | radeon_semaphore_free(rdev, &sem, *fence); |
| 2850 | 2868 | ||
| 2851 | return r; | 2869 | return r; |
| @@ -3165,7 +3183,7 @@ int r600_ib_test(struct radeon_device *rdev, struct radeon_ring *ring) | |||
| 3165 | ib.ptr[1] = ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2); | 3183 | ib.ptr[1] = ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2); |
| 3166 | ib.ptr[2] = 0xDEADBEEF; | 3184 | ib.ptr[2] = 0xDEADBEEF; |
| 3167 | ib.length_dw = 3; | 3185 | ib.length_dw = 3; |
| 3168 | r = radeon_ib_schedule(rdev, &ib, NULL); | 3186 | r = radeon_ib_schedule(rdev, &ib, NULL, false); |
| 3169 | if (r) { | 3187 | if (r) { |
| 3170 | DRM_ERROR("radeon: failed to schedule ib (%d).\n", r); | 3188 | DRM_ERROR("radeon: failed to schedule ib (%d).\n", r); |
| 3171 | goto free_ib; | 3189 | goto free_ib; |
diff --git a/drivers/gpu/drm/radeon/r600_dma.c b/drivers/gpu/drm/radeon/r600_dma.c index 4969cef44a19..51fd98553eaf 100644 --- a/drivers/gpu/drm/radeon/r600_dma.c +++ b/drivers/gpu/drm/radeon/r600_dma.c | |||
| @@ -261,7 +261,7 @@ int r600_dma_ring_test(struct radeon_device *rdev, | |||
| 261 | radeon_ring_write(ring, rdev->vram_scratch.gpu_addr & 0xfffffffc); | 261 | radeon_ring_write(ring, rdev->vram_scratch.gpu_addr & 0xfffffffc); |
| 262 | radeon_ring_write(ring, upper_32_bits(rdev->vram_scratch.gpu_addr) & 0xff); | 262 | radeon_ring_write(ring, upper_32_bits(rdev->vram_scratch.gpu_addr) & 0xff); |
| 263 | radeon_ring_write(ring, 0xDEADBEEF); | 263 | radeon_ring_write(ring, 0xDEADBEEF); |
| 264 | radeon_ring_unlock_commit(rdev, ring); | 264 | radeon_ring_unlock_commit(rdev, ring, false); |
| 265 | 265 | ||
| 266 | for (i = 0; i < rdev->usec_timeout; i++) { | 266 | for (i = 0; i < rdev->usec_timeout; i++) { |
| 267 | tmp = readl(ptr); | 267 | tmp = readl(ptr); |
| @@ -368,7 +368,7 @@ int r600_dma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring) | |||
| 368 | ib.ptr[3] = 0xDEADBEEF; | 368 | ib.ptr[3] = 0xDEADBEEF; |
| 369 | ib.length_dw = 4; | 369 | ib.length_dw = 4; |
| 370 | 370 | ||
| 371 | r = radeon_ib_schedule(rdev, &ib, NULL); | 371 | r = radeon_ib_schedule(rdev, &ib, NULL, false); |
| 372 | if (r) { | 372 | if (r) { |
| 373 | radeon_ib_free(rdev, &ib); | 373 | radeon_ib_free(rdev, &ib); |
| 374 | DRM_ERROR("radeon: failed to schedule ib (%d).\n", r); | 374 | DRM_ERROR("radeon: failed to schedule ib (%d).\n", r); |
| @@ -493,7 +493,7 @@ int r600_copy_dma(struct radeon_device *rdev, | |||
| 493 | return r; | 493 | return r; |
| 494 | } | 494 | } |
| 495 | 495 | ||
| 496 | radeon_ring_unlock_commit(rdev, ring); | 496 | radeon_ring_unlock_commit(rdev, ring, false); |
| 497 | radeon_semaphore_free(rdev, &sem, *fence); | 497 | radeon_semaphore_free(rdev, &sem, *fence); |
| 498 | 498 | ||
| 499 | return r; | 499 | return r; |
diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h index f94e7a9afe75..0c4a7d8d93e0 100644 --- a/drivers/gpu/drm/radeon/r600d.h +++ b/drivers/gpu/drm/radeon/r600d.h | |||
| @@ -1597,6 +1597,7 @@ | |||
| 1597 | */ | 1597 | */ |
| 1598 | # define PACKET3_CP_DMA_CMD_SAIC (1 << 28) | 1598 | # define PACKET3_CP_DMA_CMD_SAIC (1 << 28) |
| 1599 | # define PACKET3_CP_DMA_CMD_DAIC (1 << 29) | 1599 | # define PACKET3_CP_DMA_CMD_DAIC (1 << 29) |
| 1600 | #define PACKET3_PFP_SYNC_ME 0x42 /* r7xx+ only */ | ||
| 1600 | #define PACKET3_SURFACE_SYNC 0x43 | 1601 | #define PACKET3_SURFACE_SYNC 0x43 |
| 1601 | # define PACKET3_CB0_DEST_BASE_ENA (1 << 6) | 1602 | # define PACKET3_CB0_DEST_BASE_ENA (1 << 6) |
| 1602 | # define PACKET3_FULL_CACHE_ENA (1 << 20) /* r7xx+ only */ | 1603 | # define PACKET3_FULL_CACHE_ENA (1 << 20) /* r7xx+ only */ |
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 9e1732eb402c..b281886f6f51 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h | |||
| @@ -105,6 +105,7 @@ extern int radeon_vm_size; | |||
| 105 | extern int radeon_vm_block_size; | 105 | extern int radeon_vm_block_size; |
| 106 | extern int radeon_deep_color; | 106 | extern int radeon_deep_color; |
| 107 | extern int radeon_use_pflipirq; | 107 | extern int radeon_use_pflipirq; |
| 108 | extern int radeon_bapm; | ||
| 108 | 109 | ||
| 109 | /* | 110 | /* |
| 110 | * Copy from radeon_drv.h so we don't have to include both and have conflicting | 111 | * Copy from radeon_drv.h so we don't have to include both and have conflicting |
| @@ -967,7 +968,7 @@ int radeon_ib_get(struct radeon_device *rdev, int ring, | |||
| 967 | unsigned size); | 968 | unsigned size); |
| 968 | void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib *ib); | 969 | void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib *ib); |
| 969 | int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib, | 970 | int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib, |
| 970 | struct radeon_ib *const_ib); | 971 | struct radeon_ib *const_ib, bool hdp_flush); |
| 971 | int radeon_ib_pool_init(struct radeon_device *rdev); | 972 | int radeon_ib_pool_init(struct radeon_device *rdev); |
| 972 | void radeon_ib_pool_fini(struct radeon_device *rdev); | 973 | void radeon_ib_pool_fini(struct radeon_device *rdev); |
| 973 | int radeon_ib_ring_tests(struct radeon_device *rdev); | 974 | int radeon_ib_ring_tests(struct radeon_device *rdev); |
| @@ -977,8 +978,10 @@ bool radeon_ring_supports_scratch_reg(struct radeon_device *rdev, | |||
| 977 | void radeon_ring_free_size(struct radeon_device *rdev, struct radeon_ring *cp); | 978 | void radeon_ring_free_size(struct radeon_device *rdev, struct radeon_ring *cp); |
| 978 | int radeon_ring_alloc(struct radeon_device *rdev, struct radeon_ring *cp, unsigned ndw); | 979 | int radeon_ring_alloc(struct radeon_device *rdev, struct radeon_ring *cp, unsigned ndw); |
| 979 | int radeon_ring_lock(struct radeon_device *rdev, struct radeon_ring *cp, unsigned ndw); | 980 | int radeon_ring_lock(struct radeon_device *rdev, struct radeon_ring *cp, unsigned ndw); |
| 980 | void radeon_ring_commit(struct radeon_device *rdev, struct radeon_ring *cp); | 981 | void radeon_ring_commit(struct radeon_device *rdev, struct radeon_ring *cp, |
| 981 | void radeon_ring_unlock_commit(struct radeon_device *rdev, struct radeon_ring *cp); | 982 | bool hdp_flush); |
| 983 | void radeon_ring_unlock_commit(struct radeon_device *rdev, struct radeon_ring *cp, | ||
| 984 | bool hdp_flush); | ||
| 982 | void radeon_ring_undo(struct radeon_ring *ring); | 985 | void radeon_ring_undo(struct radeon_ring *ring); |
| 983 | void radeon_ring_unlock_undo(struct radeon_device *rdev, struct radeon_ring *cp); | 986 | void radeon_ring_unlock_undo(struct radeon_device *rdev, struct radeon_ring *cp); |
| 984 | int radeon_ring_test(struct radeon_device *rdev, struct radeon_ring *cp); | 987 | int radeon_ring_test(struct radeon_device *rdev, struct radeon_ring *cp); |
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c index ee712c199b25..83f382e8e40e 100644 --- a/drivers/gpu/drm/radeon/radeon_cs.c +++ b/drivers/gpu/drm/radeon/radeon_cs.c | |||
| @@ -132,7 +132,8 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p) | |||
| 132 | * the buffers used for read only, which doubles the range | 132 | * the buffers used for read only, which doubles the range |
| 133 | * to 0 to 31. 32 is reserved for the kernel driver. | 133 | * to 0 to 31. 32 is reserved for the kernel driver. |
| 134 | */ | 134 | */ |
| 135 | priority = (r->flags & 0xf) * 2 + !!r->write_domain; | 135 | priority = (r->flags & RADEON_RELOC_PRIO_MASK) * 2 |
| 136 | + !!r->write_domain; | ||
| 136 | 137 | ||
| 137 | /* the first reloc of an UVD job is the msg and that must be in | 138 | /* the first reloc of an UVD job is the msg and that must be in |
| 138 | VRAM, also but everything into VRAM on AGP cards to avoid | 139 | VRAM, also but everything into VRAM on AGP cards to avoid |
| @@ -450,7 +451,7 @@ static int radeon_cs_ib_chunk(struct radeon_device *rdev, | |||
| 450 | radeon_vce_note_usage(rdev); | 451 | radeon_vce_note_usage(rdev); |
| 451 | 452 | ||
| 452 | radeon_cs_sync_rings(parser); | 453 | radeon_cs_sync_rings(parser); |
| 453 | r = radeon_ib_schedule(rdev, &parser->ib, NULL); | 454 | r = radeon_ib_schedule(rdev, &parser->ib, NULL, true); |
| 454 | if (r) { | 455 | if (r) { |
| 455 | DRM_ERROR("Failed to schedule IB !\n"); | 456 | DRM_ERROR("Failed to schedule IB !\n"); |
| 456 | } | 457 | } |
| @@ -541,9 +542,9 @@ static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev, | |||
| 541 | 542 | ||
| 542 | if ((rdev->family >= CHIP_TAHITI) && | 543 | if ((rdev->family >= CHIP_TAHITI) && |
| 543 | (parser->chunk_const_ib_idx != -1)) { | 544 | (parser->chunk_const_ib_idx != -1)) { |
| 544 | r = radeon_ib_schedule(rdev, &parser->ib, &parser->const_ib); | 545 | r = radeon_ib_schedule(rdev, &parser->ib, &parser->const_ib, true); |
| 545 | } else { | 546 | } else { |
| 546 | r = radeon_ib_schedule(rdev, &parser->ib, NULL); | 547 | r = radeon_ib_schedule(rdev, &parser->ib, NULL, true); |
| 547 | } | 548 | } |
| 548 | 549 | ||
| 549 | out: | 550 | out: |
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index c8ea050c8fa4..6a219bcee66d 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c | |||
| @@ -1680,8 +1680,8 @@ int radeon_gpu_reset(struct radeon_device *rdev) | |||
| 1680 | radeon_save_bios_scratch_regs(rdev); | 1680 | radeon_save_bios_scratch_regs(rdev); |
| 1681 | /* block TTM */ | 1681 | /* block TTM */ |
| 1682 | resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev); | 1682 | resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev); |
| 1683 | radeon_pm_suspend(rdev); | ||
| 1684 | radeon_suspend(rdev); | 1683 | radeon_suspend(rdev); |
| 1684 | radeon_hpd_fini(rdev); | ||
| 1685 | 1685 | ||
| 1686 | for (i = 0; i < RADEON_NUM_RINGS; ++i) { | 1686 | for (i = 0; i < RADEON_NUM_RINGS; ++i) { |
| 1687 | ring_sizes[i] = radeon_ring_backup(rdev, &rdev->ring[i], | 1687 | ring_sizes[i] = radeon_ring_backup(rdev, &rdev->ring[i], |
| @@ -1726,9 +1726,39 @@ retry: | |||
| 1726 | } | 1726 | } |
| 1727 | } | 1727 | } |
| 1728 | 1728 | ||
| 1729 | radeon_pm_resume(rdev); | 1729 | if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) { |
| 1730 | /* do dpm late init */ | ||
| 1731 | r = radeon_pm_late_init(rdev); | ||
| 1732 | if (r) { | ||
| 1733 | rdev->pm.dpm_enabled = false; | ||
| 1734 | DRM_ERROR("radeon_pm_late_init failed, disabling dpm\n"); | ||
| 1735 | } | ||
| 1736 | } else { | ||
| 1737 | /* resume old pm late */ | ||
| 1738 | radeon_pm_resume(rdev); | ||
| 1739 | } | ||
| 1740 | |||
| 1741 | /* init dig PHYs, disp eng pll */ | ||
| 1742 | if (rdev->is_atom_bios) { | ||
| 1743 | radeon_atom_encoder_init(rdev); | ||
| 1744 | radeon_atom_disp_eng_pll_init(rdev); | ||
| 1745 | /* turn on the BL */ | ||
| 1746 | if (rdev->mode_info.bl_encoder) { | ||
| 1747 | u8 bl_level = radeon_get_backlight_level(rdev, | ||
| 1748 | rdev->mode_info.bl_encoder); | ||
| 1749 | radeon_set_backlight_level(rdev, rdev->mode_info.bl_encoder, | ||
| 1750 | bl_level); | ||
| 1751 | } | ||
| 1752 | } | ||
| 1753 | /* reset hpd state */ | ||
| 1754 | radeon_hpd_init(rdev); | ||
| 1755 | |||
| 1730 | drm_helper_resume_force_mode(rdev->ddev); | 1756 | drm_helper_resume_force_mode(rdev->ddev); |
| 1731 | 1757 | ||
| 1758 | /* set the power state here in case we are a PX system or headless */ | ||
| 1759 | if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) | ||
| 1760 | radeon_pm_compute_clocks(rdev); | ||
| 1761 | |||
| 1732 | ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched); | 1762 | ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched); |
| 1733 | if (r) { | 1763 | if (r) { |
| 1734 | /* bad news, how to tell it to userspace ? */ | 1764 | /* bad news, how to tell it to userspace ? */ |
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c index 092d067f93e1..8df888908833 100644 --- a/drivers/gpu/drm/radeon/radeon_drv.c +++ b/drivers/gpu/drm/radeon/radeon_drv.c | |||
| @@ -180,6 +180,7 @@ int radeon_vm_size = 8; | |||
| 180 | int radeon_vm_block_size = -1; | 180 | int radeon_vm_block_size = -1; |
| 181 | int radeon_deep_color = 0; | 181 | int radeon_deep_color = 0; |
| 182 | int radeon_use_pflipirq = 2; | 182 | int radeon_use_pflipirq = 2; |
| 183 | int radeon_bapm = -1; | ||
| 183 | 184 | ||
| 184 | MODULE_PARM_DESC(no_wb, "Disable AGP writeback for scratch registers"); | 185 | MODULE_PARM_DESC(no_wb, "Disable AGP writeback for scratch registers"); |
| 185 | module_param_named(no_wb, radeon_no_wb, int, 0444); | 186 | module_param_named(no_wb, radeon_no_wb, int, 0444); |
| @@ -259,6 +260,9 @@ module_param_named(deep_color, radeon_deep_color, int, 0444); | |||
| 259 | MODULE_PARM_DESC(use_pflipirq, "Pflip irqs for pageflip completion (0 = disable, 1 = as fallback, 2 = exclusive (default))"); | 260 | MODULE_PARM_DESC(use_pflipirq, "Pflip irqs for pageflip completion (0 = disable, 1 = as fallback, 2 = exclusive (default))"); |
| 260 | module_param_named(use_pflipirq, radeon_use_pflipirq, int, 0444); | 261 | module_param_named(use_pflipirq, radeon_use_pflipirq, int, 0444); |
| 261 | 262 | ||
| 263 | MODULE_PARM_DESC(bapm, "BAPM support (1 = enable, 0 = disable, -1 = auto)"); | ||
| 264 | module_param_named(bapm, radeon_bapm, int, 0444); | ||
| 265 | |||
| 262 | static struct pci_device_id pciidlist[] = { | 266 | static struct pci_device_id pciidlist[] = { |
| 263 | radeon_PCI_IDS | 267 | radeon_PCI_IDS |
| 264 | }; | 268 | }; |
diff --git a/drivers/gpu/drm/radeon/radeon_ib.c b/drivers/gpu/drm/radeon/radeon_ib.c index 65b0c213488d..5bf2c0a05827 100644 --- a/drivers/gpu/drm/radeon/radeon_ib.c +++ b/drivers/gpu/drm/radeon/radeon_ib.c | |||
| @@ -107,6 +107,7 @@ void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib *ib) | |||
| 107 | * @rdev: radeon_device pointer | 107 | * @rdev: radeon_device pointer |
| 108 | * @ib: IB object to schedule | 108 | * @ib: IB object to schedule |
| 109 | * @const_ib: Const IB to schedule (SI only) | 109 | * @const_ib: Const IB to schedule (SI only) |
| 110 | * @hdp_flush: Whether or not to perform an HDP cache flush | ||
| 110 | * | 111 | * |
| 111 | * Schedule an IB on the associated ring (all asics). | 112 | * Schedule an IB on the associated ring (all asics). |
| 112 | * Returns 0 on success, error on failure. | 113 | * Returns 0 on success, error on failure. |
| @@ -122,7 +123,7 @@ void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib *ib) | |||
| 122 | * to SI there was just a DE IB. | 123 | * to SI there was just a DE IB. |
| 123 | */ | 124 | */ |
| 124 | int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib, | 125 | int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib, |
| 125 | struct radeon_ib *const_ib) | 126 | struct radeon_ib *const_ib, bool hdp_flush) |
| 126 | { | 127 | { |
| 127 | struct radeon_ring *ring = &rdev->ring[ib->ring]; | 128 | struct radeon_ring *ring = &rdev->ring[ib->ring]; |
| 128 | int r = 0; | 129 | int r = 0; |
| @@ -176,7 +177,7 @@ int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib, | |||
| 176 | if (ib->vm) | 177 | if (ib->vm) |
| 177 | radeon_vm_fence(rdev, ib->vm, ib->fence); | 178 | radeon_vm_fence(rdev, ib->vm, ib->fence); |
| 178 | 179 | ||
| 179 | radeon_ring_unlock_commit(rdev, ring); | 180 | radeon_ring_unlock_commit(rdev, ring, hdp_flush); |
| 180 | return 0; | 181 | return 0; |
| 181 | } | 182 | } |
| 182 | 183 | ||
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c index 23314be49480..164898b0010c 100644 --- a/drivers/gpu/drm/radeon/radeon_pm.c +++ b/drivers/gpu/drm/radeon/radeon_pm.c | |||
| @@ -460,10 +460,6 @@ static ssize_t radeon_get_dpm_state(struct device *dev, | |||
| 460 | struct radeon_device *rdev = ddev->dev_private; | 460 | struct radeon_device *rdev = ddev->dev_private; |
| 461 | enum radeon_pm_state_type pm = rdev->pm.dpm.user_state; | 461 | enum radeon_pm_state_type pm = rdev->pm.dpm.user_state; |
| 462 | 462 | ||
| 463 | if ((rdev->flags & RADEON_IS_PX) && | ||
| 464 | (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) | ||
| 465 | return snprintf(buf, PAGE_SIZE, "off\n"); | ||
| 466 | |||
| 467 | return snprintf(buf, PAGE_SIZE, "%s\n", | 463 | return snprintf(buf, PAGE_SIZE, "%s\n", |
| 468 | (pm == POWER_STATE_TYPE_BATTERY) ? "battery" : | 464 | (pm == POWER_STATE_TYPE_BATTERY) ? "battery" : |
| 469 | (pm == POWER_STATE_TYPE_BALANCED) ? "balanced" : "performance"); | 465 | (pm == POWER_STATE_TYPE_BALANCED) ? "balanced" : "performance"); |
| @@ -477,11 +473,6 @@ static ssize_t radeon_set_dpm_state(struct device *dev, | |||
| 477 | struct drm_device *ddev = dev_get_drvdata(dev); | 473 | struct drm_device *ddev = dev_get_drvdata(dev); |
| 478 | struct radeon_device *rdev = ddev->dev_private; | 474 | struct radeon_device *rdev = ddev->dev_private; |
| 479 | 475 | ||
| 480 | /* Can't set dpm state when the card is off */ | ||
| 481 | if ((rdev->flags & RADEON_IS_PX) && | ||
| 482 | (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) | ||
| 483 | return -EINVAL; | ||
| 484 | |||
| 485 | mutex_lock(&rdev->pm.mutex); | 476 | mutex_lock(&rdev->pm.mutex); |
| 486 | if (strncmp("battery", buf, strlen("battery")) == 0) | 477 | if (strncmp("battery", buf, strlen("battery")) == 0) |
| 487 | rdev->pm.dpm.user_state = POWER_STATE_TYPE_BATTERY; | 478 | rdev->pm.dpm.user_state = POWER_STATE_TYPE_BATTERY; |
| @@ -495,7 +486,12 @@ static ssize_t radeon_set_dpm_state(struct device *dev, | |||
| 495 | goto fail; | 486 | goto fail; |
| 496 | } | 487 | } |
| 497 | mutex_unlock(&rdev->pm.mutex); | 488 | mutex_unlock(&rdev->pm.mutex); |
| 498 | radeon_pm_compute_clocks(rdev); | 489 | |
| 490 | /* Can't set dpm state when the card is off */ | ||
| 491 | if (!(rdev->flags & RADEON_IS_PX) || | ||
| 492 | (ddev->switch_power_state == DRM_SWITCH_POWER_ON)) | ||
| 493 | radeon_pm_compute_clocks(rdev); | ||
| 494 | |||
| 499 | fail: | 495 | fail: |
| 500 | return count; | 496 | return count; |
| 501 | } | 497 | } |
diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c index 5b4e0cf231a0..d65607902537 100644 --- a/drivers/gpu/drm/radeon/radeon_ring.c +++ b/drivers/gpu/drm/radeon/radeon_ring.c | |||
| @@ -177,16 +177,18 @@ int radeon_ring_lock(struct radeon_device *rdev, struct radeon_ring *ring, unsig | |||
| 177 | * | 177 | * |
| 178 | * @rdev: radeon_device pointer | 178 | * @rdev: radeon_device pointer |
| 179 | * @ring: radeon_ring structure holding ring information | 179 | * @ring: radeon_ring structure holding ring information |
| 180 | * @hdp_flush: Whether or not to perform an HDP cache flush | ||
| 180 | * | 181 | * |
| 181 | * Update the wptr (write pointer) to tell the GPU to | 182 | * Update the wptr (write pointer) to tell the GPU to |
| 182 | * execute new commands on the ring buffer (all asics). | 183 | * execute new commands on the ring buffer (all asics). |
| 183 | */ | 184 | */ |
| 184 | void radeon_ring_commit(struct radeon_device *rdev, struct radeon_ring *ring) | 185 | void radeon_ring_commit(struct radeon_device *rdev, struct radeon_ring *ring, |
| 186 | bool hdp_flush) | ||
| 185 | { | 187 | { |
| 186 | /* If we are emitting the HDP flush via the ring buffer, we need to | 188 | /* If we are emitting the HDP flush via the ring buffer, we need to |
| 187 | * do it before padding. | 189 | * do it before padding. |
| 188 | */ | 190 | */ |
| 189 | if (rdev->asic->ring[ring->idx]->hdp_flush) | 191 | if (hdp_flush && rdev->asic->ring[ring->idx]->hdp_flush) |
| 190 | rdev->asic->ring[ring->idx]->hdp_flush(rdev, ring); | 192 | rdev->asic->ring[ring->idx]->hdp_flush(rdev, ring); |
| 191 | /* We pad to match fetch size */ | 193 | /* We pad to match fetch size */ |
| 192 | while (ring->wptr & ring->align_mask) { | 194 | while (ring->wptr & ring->align_mask) { |
| @@ -196,7 +198,7 @@ void radeon_ring_commit(struct radeon_device *rdev, struct radeon_ring *ring) | |||
| 196 | /* If we are emitting the HDP flush via MMIO, we need to do it after | 198 | /* If we are emitting the HDP flush via MMIO, we need to do it after |
| 197 | * all CPU writes to VRAM finished. | 199 | * all CPU writes to VRAM finished. |
| 198 | */ | 200 | */ |
| 199 | if (rdev->asic->mmio_hdp_flush) | 201 | if (hdp_flush && rdev->asic->mmio_hdp_flush) |
| 200 | rdev->asic->mmio_hdp_flush(rdev); | 202 | rdev->asic->mmio_hdp_flush(rdev); |
| 201 | radeon_ring_set_wptr(rdev, ring); | 203 | radeon_ring_set_wptr(rdev, ring); |
| 202 | } | 204 | } |
| @@ -207,12 +209,14 @@ void radeon_ring_commit(struct radeon_device *rdev, struct radeon_ring *ring) | |||
| 207 | * | 209 | * |
| 208 | * @rdev: radeon_device pointer | 210 | * @rdev: radeon_device pointer |
| 209 | * @ring: radeon_ring structure holding ring information | 211 | * @ring: radeon_ring structure holding ring information |
| 212 | * @hdp_flush: Whether or not to perform an HDP cache flush | ||
| 210 | * | 213 | * |
| 211 | * Call radeon_ring_commit() then unlock the ring (all asics). | 214 | * Call radeon_ring_commit() then unlock the ring (all asics). |
| 212 | */ | 215 | */ |
| 213 | void radeon_ring_unlock_commit(struct radeon_device *rdev, struct radeon_ring *ring) | 216 | void radeon_ring_unlock_commit(struct radeon_device *rdev, struct radeon_ring *ring, |
| 217 | bool hdp_flush) | ||
| 214 | { | 218 | { |
| 215 | radeon_ring_commit(rdev, ring); | 219 | radeon_ring_commit(rdev, ring, hdp_flush); |
| 216 | mutex_unlock(&rdev->ring_lock); | 220 | mutex_unlock(&rdev->ring_lock); |
| 217 | } | 221 | } |
| 218 | 222 | ||
| @@ -372,7 +376,7 @@ int radeon_ring_restore(struct radeon_device *rdev, struct radeon_ring *ring, | |||
| 372 | radeon_ring_write(ring, data[i]); | 376 | radeon_ring_write(ring, data[i]); |
| 373 | } | 377 | } |
| 374 | 378 | ||
| 375 | radeon_ring_unlock_commit(rdev, ring); | 379 | radeon_ring_unlock_commit(rdev, ring, false); |
| 376 | kfree(data); | 380 | kfree(data); |
| 377 | return 0; | 381 | return 0; |
| 378 | } | 382 | } |
| @@ -400,9 +404,7 @@ int radeon_ring_init(struct radeon_device *rdev, struct radeon_ring *ring, unsig | |||
| 400 | /* Allocate ring buffer */ | 404 | /* Allocate ring buffer */ |
| 401 | if (ring->ring_obj == NULL) { | 405 | if (ring->ring_obj == NULL) { |
| 402 | r = radeon_bo_create(rdev, ring->ring_size, PAGE_SIZE, true, | 406 | r = radeon_bo_create(rdev, ring->ring_size, PAGE_SIZE, true, |
| 403 | RADEON_GEM_DOMAIN_GTT, | 407 | RADEON_GEM_DOMAIN_GTT, 0, |
| 404 | (rdev->flags & RADEON_IS_PCIE) ? | ||
| 405 | RADEON_GEM_GTT_WC : 0, | ||
| 406 | NULL, &ring->ring_obj); | 408 | NULL, &ring->ring_obj); |
| 407 | if (r) { | 409 | if (r) { |
| 408 | dev_err(rdev->dev, "(%d) ring create failed\n", r); | 410 | dev_err(rdev->dev, "(%d) ring create failed\n", r); |
diff --git a/drivers/gpu/drm/radeon/radeon_semaphore.c b/drivers/gpu/drm/radeon/radeon_semaphore.c index dbd6bcde92de..56d9fd66d8ae 100644 --- a/drivers/gpu/drm/radeon/radeon_semaphore.c +++ b/drivers/gpu/drm/radeon/radeon_semaphore.c | |||
| @@ -179,7 +179,7 @@ int radeon_semaphore_sync_rings(struct radeon_device *rdev, | |||
| 179 | continue; | 179 | continue; |
| 180 | } | 180 | } |
| 181 | 181 | ||
| 182 | radeon_ring_commit(rdev, &rdev->ring[i]); | 182 | radeon_ring_commit(rdev, &rdev->ring[i], false); |
| 183 | radeon_fence_note_sync(fence, ring); | 183 | radeon_fence_note_sync(fence, ring); |
| 184 | 184 | ||
| 185 | semaphore->gpu_addr += 8; | 185 | semaphore->gpu_addr += 8; |
diff --git a/drivers/gpu/drm/radeon/radeon_test.c b/drivers/gpu/drm/radeon/radeon_test.c index 5adf4207453d..17bc3dced9f1 100644 --- a/drivers/gpu/drm/radeon/radeon_test.c +++ b/drivers/gpu/drm/radeon/radeon_test.c | |||
| @@ -288,7 +288,7 @@ static int radeon_test_create_and_emit_fence(struct radeon_device *rdev, | |||
| 288 | return r; | 288 | return r; |
| 289 | } | 289 | } |
| 290 | radeon_fence_emit(rdev, fence, ring->idx); | 290 | radeon_fence_emit(rdev, fence, ring->idx); |
| 291 | radeon_ring_unlock_commit(rdev, ring); | 291 | radeon_ring_unlock_commit(rdev, ring, false); |
| 292 | } | 292 | } |
| 293 | return 0; | 293 | return 0; |
| 294 | } | 294 | } |
| @@ -313,7 +313,7 @@ void radeon_test_ring_sync(struct radeon_device *rdev, | |||
| 313 | goto out_cleanup; | 313 | goto out_cleanup; |
| 314 | } | 314 | } |
| 315 | radeon_semaphore_emit_wait(rdev, ringA->idx, semaphore); | 315 | radeon_semaphore_emit_wait(rdev, ringA->idx, semaphore); |
| 316 | radeon_ring_unlock_commit(rdev, ringA); | 316 | radeon_ring_unlock_commit(rdev, ringA, false); |
| 317 | 317 | ||
| 318 | r = radeon_test_create_and_emit_fence(rdev, ringA, &fence1); | 318 | r = radeon_test_create_and_emit_fence(rdev, ringA, &fence1); |
| 319 | if (r) | 319 | if (r) |
| @@ -325,7 +325,7 @@ void radeon_test_ring_sync(struct radeon_device *rdev, | |||
| 325 | goto out_cleanup; | 325 | goto out_cleanup; |
| 326 | } | 326 | } |
| 327 | radeon_semaphore_emit_wait(rdev, ringA->idx, semaphore); | 327 | radeon_semaphore_emit_wait(rdev, ringA->idx, semaphore); |
| 328 | radeon_ring_unlock_commit(rdev, ringA); | 328 | radeon_ring_unlock_commit(rdev, ringA, false); |
| 329 | 329 | ||
| 330 | r = radeon_test_create_and_emit_fence(rdev, ringA, &fence2); | 330 | r = radeon_test_create_and_emit_fence(rdev, ringA, &fence2); |
| 331 | if (r) | 331 | if (r) |
| @@ -344,7 +344,7 @@ void radeon_test_ring_sync(struct radeon_device *rdev, | |||
| 344 | goto out_cleanup; | 344 | goto out_cleanup; |
| 345 | } | 345 | } |
| 346 | radeon_semaphore_emit_signal(rdev, ringB->idx, semaphore); | 346 | radeon_semaphore_emit_signal(rdev, ringB->idx, semaphore); |
| 347 | radeon_ring_unlock_commit(rdev, ringB); | 347 | radeon_ring_unlock_commit(rdev, ringB, false); |
| 348 | 348 | ||
| 349 | r = radeon_fence_wait(fence1, false); | 349 | r = radeon_fence_wait(fence1, false); |
| 350 | if (r) { | 350 | if (r) { |
| @@ -365,7 +365,7 @@ void radeon_test_ring_sync(struct radeon_device *rdev, | |||
| 365 | goto out_cleanup; | 365 | goto out_cleanup; |
| 366 | } | 366 | } |
| 367 | radeon_semaphore_emit_signal(rdev, ringB->idx, semaphore); | 367 | radeon_semaphore_emit_signal(rdev, ringB->idx, semaphore); |
| 368 | radeon_ring_unlock_commit(rdev, ringB); | 368 | radeon_ring_unlock_commit(rdev, ringB, false); |
| 369 | 369 | ||
| 370 | r = radeon_fence_wait(fence2, false); | 370 | r = radeon_fence_wait(fence2, false); |
| 371 | if (r) { | 371 | if (r) { |
| @@ -408,7 +408,7 @@ static void radeon_test_ring_sync2(struct radeon_device *rdev, | |||
| 408 | goto out_cleanup; | 408 | goto out_cleanup; |
| 409 | } | 409 | } |
| 410 | radeon_semaphore_emit_wait(rdev, ringA->idx, semaphore); | 410 | radeon_semaphore_emit_wait(rdev, ringA->idx, semaphore); |
| 411 | radeon_ring_unlock_commit(rdev, ringA); | 411 | radeon_ring_unlock_commit(rdev, ringA, false); |
| 412 | 412 | ||
| 413 | r = radeon_test_create_and_emit_fence(rdev, ringA, &fenceA); | 413 | r = radeon_test_create_and_emit_fence(rdev, ringA, &fenceA); |
| 414 | if (r) | 414 | if (r) |
| @@ -420,7 +420,7 @@ static void radeon_test_ring_sync2(struct radeon_device *rdev, | |||
| 420 | goto out_cleanup; | 420 | goto out_cleanup; |
| 421 | } | 421 | } |
| 422 | radeon_semaphore_emit_wait(rdev, ringB->idx, semaphore); | 422 | radeon_semaphore_emit_wait(rdev, ringB->idx, semaphore); |
| 423 | radeon_ring_unlock_commit(rdev, ringB); | 423 | radeon_ring_unlock_commit(rdev, ringB, false); |
| 424 | r = radeon_test_create_and_emit_fence(rdev, ringB, &fenceB); | 424 | r = radeon_test_create_and_emit_fence(rdev, ringB, &fenceB); |
| 425 | if (r) | 425 | if (r) |
| 426 | goto out_cleanup; | 426 | goto out_cleanup; |
| @@ -442,7 +442,7 @@ static void radeon_test_ring_sync2(struct radeon_device *rdev, | |||
| 442 | goto out_cleanup; | 442 | goto out_cleanup; |
| 443 | } | 443 | } |
| 444 | radeon_semaphore_emit_signal(rdev, ringC->idx, semaphore); | 444 | radeon_semaphore_emit_signal(rdev, ringC->idx, semaphore); |
| 445 | radeon_ring_unlock_commit(rdev, ringC); | 445 | radeon_ring_unlock_commit(rdev, ringC, false); |
| 446 | 446 | ||
| 447 | for (i = 0; i < 30; ++i) { | 447 | for (i = 0; i < 30; ++i) { |
| 448 | mdelay(100); | 448 | mdelay(100); |
| @@ -468,7 +468,7 @@ static void radeon_test_ring_sync2(struct radeon_device *rdev, | |||
| 468 | goto out_cleanup; | 468 | goto out_cleanup; |
| 469 | } | 469 | } |
| 470 | radeon_semaphore_emit_signal(rdev, ringC->idx, semaphore); | 470 | radeon_semaphore_emit_signal(rdev, ringC->idx, semaphore); |
| 471 | radeon_ring_unlock_commit(rdev, ringC); | 471 | radeon_ring_unlock_commit(rdev, ringC, false); |
| 472 | 472 | ||
| 473 | mdelay(1000); | 473 | mdelay(1000); |
| 474 | 474 | ||
diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c index 6bf55ec85b62..341848a14376 100644 --- a/drivers/gpu/drm/radeon/radeon_uvd.c +++ b/drivers/gpu/drm/radeon/radeon_uvd.c | |||
| @@ -646,7 +646,7 @@ static int radeon_uvd_send_msg(struct radeon_device *rdev, | |||
| 646 | ib.ptr[i] = PACKET2(0); | 646 | ib.ptr[i] = PACKET2(0); |
| 647 | ib.length_dw = 16; | 647 | ib.length_dw = 16; |
| 648 | 648 | ||
| 649 | r = radeon_ib_schedule(rdev, &ib, NULL); | 649 | r = radeon_ib_schedule(rdev, &ib, NULL, false); |
| 650 | if (r) | 650 | if (r) |
| 651 | goto err; | 651 | goto err; |
| 652 | ttm_eu_fence_buffer_objects(&ticket, &head, ib.fence); | 652 | ttm_eu_fence_buffer_objects(&ticket, &head, ib.fence); |
diff --git a/drivers/gpu/drm/radeon/radeon_vce.c b/drivers/gpu/drm/radeon/radeon_vce.c index f9b70a43aa52..c7190aadbd89 100644 --- a/drivers/gpu/drm/radeon/radeon_vce.c +++ b/drivers/gpu/drm/radeon/radeon_vce.c | |||
| @@ -368,7 +368,7 @@ int radeon_vce_get_create_msg(struct radeon_device *rdev, int ring, | |||
| 368 | for (i = ib.length_dw; i < ib_size_dw; ++i) | 368 | for (i = ib.length_dw; i < ib_size_dw; ++i) |
| 369 | ib.ptr[i] = 0x0; | 369 | ib.ptr[i] = 0x0; |
| 370 | 370 | ||
| 371 | r = radeon_ib_schedule(rdev, &ib, NULL); | 371 | r = radeon_ib_schedule(rdev, &ib, NULL, false); |
| 372 | if (r) { | 372 | if (r) { |
| 373 | DRM_ERROR("radeon: failed to schedule ib (%d).\n", r); | 373 | DRM_ERROR("radeon: failed to schedule ib (%d).\n", r); |
| 374 | } | 374 | } |
| @@ -425,7 +425,7 @@ int radeon_vce_get_destroy_msg(struct radeon_device *rdev, int ring, | |||
| 425 | for (i = ib.length_dw; i < ib_size_dw; ++i) | 425 | for (i = ib.length_dw; i < ib_size_dw; ++i) |
| 426 | ib.ptr[i] = 0x0; | 426 | ib.ptr[i] = 0x0; |
| 427 | 427 | ||
| 428 | r = radeon_ib_schedule(rdev, &ib, NULL); | 428 | r = radeon_ib_schedule(rdev, &ib, NULL, false); |
| 429 | if (r) { | 429 | if (r) { |
| 430 | DRM_ERROR("radeon: failed to schedule ib (%d).\n", r); | 430 | DRM_ERROR("radeon: failed to schedule ib (%d).\n", r); |
| 431 | } | 431 | } |
| @@ -715,7 +715,7 @@ int radeon_vce_ring_test(struct radeon_device *rdev, struct radeon_ring *ring) | |||
| 715 | return r; | 715 | return r; |
| 716 | } | 716 | } |
| 717 | radeon_ring_write(ring, VCE_CMD_END); | 717 | radeon_ring_write(ring, VCE_CMD_END); |
| 718 | radeon_ring_unlock_commit(rdev, ring); | 718 | radeon_ring_unlock_commit(rdev, ring, false); |
| 719 | 719 | ||
| 720 | for (i = 0; i < rdev->usec_timeout; i++) { | 720 | for (i = 0; i < rdev->usec_timeout; i++) { |
| 721 | if (vce_v1_0_get_rptr(rdev, ring) != rptr) | 721 | if (vce_v1_0_get_rptr(rdev, ring) != rptr) |
diff --git a/drivers/gpu/drm/radeon/radeon_vm.c b/drivers/gpu/drm/radeon/radeon_vm.c index ccae4d9dc3de..088ffdc2f577 100644 --- a/drivers/gpu/drm/radeon/radeon_vm.c +++ b/drivers/gpu/drm/radeon/radeon_vm.c | |||
| @@ -420,7 +420,7 @@ static int radeon_vm_clear_bo(struct radeon_device *rdev, | |||
| 420 | radeon_asic_vm_pad_ib(rdev, &ib); | 420 | radeon_asic_vm_pad_ib(rdev, &ib); |
| 421 | WARN_ON(ib.length_dw > 64); | 421 | WARN_ON(ib.length_dw > 64); |
| 422 | 422 | ||
| 423 | r = radeon_ib_schedule(rdev, &ib, NULL); | 423 | r = radeon_ib_schedule(rdev, &ib, NULL, false); |
| 424 | if (r) | 424 | if (r) |
| 425 | goto error; | 425 | goto error; |
| 426 | 426 | ||
| @@ -483,6 +483,10 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev, | |||
| 483 | /* add a clone of the bo_va to clear the old address */ | 483 | /* add a clone of the bo_va to clear the old address */ |
| 484 | struct radeon_bo_va *tmp; | 484 | struct radeon_bo_va *tmp; |
| 485 | tmp = kzalloc(sizeof(struct radeon_bo_va), GFP_KERNEL); | 485 | tmp = kzalloc(sizeof(struct radeon_bo_va), GFP_KERNEL); |
| 486 | if (!tmp) { | ||
| 487 | mutex_unlock(&vm->mutex); | ||
| 488 | return -ENOMEM; | ||
| 489 | } | ||
| 486 | tmp->it.start = bo_va->it.start; | 490 | tmp->it.start = bo_va->it.start; |
| 487 | tmp->it.last = bo_va->it.last; | 491 | tmp->it.last = bo_va->it.last; |
| 488 | tmp->vm = vm; | 492 | tmp->vm = vm; |
| @@ -693,7 +697,7 @@ int radeon_vm_update_page_directory(struct radeon_device *rdev, | |||
| 693 | radeon_semaphore_sync_to(ib.semaphore, pd->tbo.sync_obj); | 697 | radeon_semaphore_sync_to(ib.semaphore, pd->tbo.sync_obj); |
| 694 | radeon_semaphore_sync_to(ib.semaphore, vm->last_id_use); | 698 | radeon_semaphore_sync_to(ib.semaphore, vm->last_id_use); |
| 695 | WARN_ON(ib.length_dw > ndw); | 699 | WARN_ON(ib.length_dw > ndw); |
| 696 | r = radeon_ib_schedule(rdev, &ib, NULL); | 700 | r = radeon_ib_schedule(rdev, &ib, NULL, false); |
| 697 | if (r) { | 701 | if (r) { |
| 698 | radeon_ib_free(rdev, &ib); | 702 | radeon_ib_free(rdev, &ib); |
| 699 | return r; | 703 | return r; |
| @@ -957,7 +961,7 @@ int radeon_vm_bo_update(struct radeon_device *rdev, | |||
| 957 | WARN_ON(ib.length_dw > ndw); | 961 | WARN_ON(ib.length_dw > ndw); |
| 958 | 962 | ||
| 959 | radeon_semaphore_sync_to(ib.semaphore, vm->fence); | 963 | radeon_semaphore_sync_to(ib.semaphore, vm->fence); |
| 960 | r = radeon_ib_schedule(rdev, &ib, NULL); | 964 | r = radeon_ib_schedule(rdev, &ib, NULL, false); |
| 961 | if (r) { | 965 | if (r) { |
| 962 | radeon_ib_free(rdev, &ib); | 966 | radeon_ib_free(rdev, &ib); |
| 963 | return r; | 967 | return r; |
diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c index 3e21e869015f..8a477bf1fdb3 100644 --- a/drivers/gpu/drm/radeon/rv515.c +++ b/drivers/gpu/drm/radeon/rv515.c | |||
| @@ -124,7 +124,7 @@ void rv515_ring_start(struct radeon_device *rdev, struct radeon_ring *ring) | |||
| 124 | radeon_ring_write(ring, GEOMETRY_ROUND_NEAREST | COLOR_ROUND_NEAREST); | 124 | radeon_ring_write(ring, GEOMETRY_ROUND_NEAREST | COLOR_ROUND_NEAREST); |
| 125 | radeon_ring_write(ring, PACKET0(0x20C8, 0)); | 125 | radeon_ring_write(ring, PACKET0(0x20C8, 0)); |
| 126 | radeon_ring_write(ring, 0); | 126 | radeon_ring_write(ring, 0); |
| 127 | radeon_ring_unlock_commit(rdev, ring); | 127 | radeon_ring_unlock_commit(rdev, ring, false); |
| 128 | } | 128 | } |
| 129 | 129 | ||
| 130 | int rv515_mc_wait_for_idle(struct radeon_device *rdev) | 130 | int rv515_mc_wait_for_idle(struct radeon_device *rdev) |
diff --git a/drivers/gpu/drm/radeon/rv770_dma.c b/drivers/gpu/drm/radeon/rv770_dma.c index bbf2e076ee45..74426ac2bb5c 100644 --- a/drivers/gpu/drm/radeon/rv770_dma.c +++ b/drivers/gpu/drm/radeon/rv770_dma.c | |||
| @@ -90,7 +90,7 @@ int rv770_copy_dma(struct radeon_device *rdev, | |||
| 90 | return r; | 90 | return r; |
| 91 | } | 91 | } |
| 92 | 92 | ||
| 93 | radeon_ring_unlock_commit(rdev, ring); | 93 | radeon_ring_unlock_commit(rdev, ring, false); |
| 94 | radeon_semaphore_free(rdev, &sem, *fence); | 94 | radeon_semaphore_free(rdev, &sem, *fence); |
| 95 | 95 | ||
| 96 | return r; | 96 | return r; |
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c index 011779bd2b3d..a1274a31405c 100644 --- a/drivers/gpu/drm/radeon/si.c +++ b/drivers/gpu/drm/radeon/si.c | |||
| @@ -3057,7 +3057,7 @@ static void si_gpu_init(struct radeon_device *rdev) | |||
| 3057 | u32 sx_debug_1; | 3057 | u32 sx_debug_1; |
| 3058 | u32 hdp_host_path_cntl; | 3058 | u32 hdp_host_path_cntl; |
| 3059 | u32 tmp; | 3059 | u32 tmp; |
| 3060 | int i, j, k; | 3060 | int i, j; |
| 3061 | 3061 | ||
| 3062 | switch (rdev->family) { | 3062 | switch (rdev->family) { |
| 3063 | case CHIP_TAHITI: | 3063 | case CHIP_TAHITI: |
| @@ -3255,12 +3255,11 @@ static void si_gpu_init(struct radeon_device *rdev) | |||
| 3255 | rdev->config.si.max_sh_per_se, | 3255 | rdev->config.si.max_sh_per_se, |
| 3256 | rdev->config.si.max_cu_per_sh); | 3256 | rdev->config.si.max_cu_per_sh); |
| 3257 | 3257 | ||
| 3258 | rdev->config.si.active_cus = 0; | ||
| 3258 | for (i = 0; i < rdev->config.si.max_shader_engines; i++) { | 3259 | for (i = 0; i < rdev->config.si.max_shader_engines; i++) { |
| 3259 | for (j = 0; j < rdev->config.si.max_sh_per_se; j++) { | 3260 | for (j = 0; j < rdev->config.si.max_sh_per_se; j++) { |
| 3260 | for (k = 0; k < rdev->config.si.max_cu_per_sh; k++) { | 3261 | rdev->config.si.active_cus += |
| 3261 | rdev->config.si.active_cus += | 3262 | hweight32(si_get_cu_active_bitmap(rdev, i, j)); |
| 3262 | hweight32(si_get_cu_active_bitmap(rdev, i, j)); | ||
| 3263 | } | ||
| 3264 | } | 3263 | } |
| 3265 | } | 3264 | } |
| 3266 | 3265 | ||
| @@ -3541,7 +3540,7 @@ static int si_cp_start(struct radeon_device *rdev) | |||
| 3541 | radeon_ring_write(ring, PACKET3_BASE_INDEX(CE_PARTITION_BASE)); | 3540 | radeon_ring_write(ring, PACKET3_BASE_INDEX(CE_PARTITION_BASE)); |
| 3542 | radeon_ring_write(ring, 0xc000); | 3541 | radeon_ring_write(ring, 0xc000); |
| 3543 | radeon_ring_write(ring, 0xe000); | 3542 | radeon_ring_write(ring, 0xe000); |
| 3544 | radeon_ring_unlock_commit(rdev, ring); | 3543 | radeon_ring_unlock_commit(rdev, ring, false); |
| 3545 | 3544 | ||
| 3546 | si_cp_enable(rdev, true); | 3545 | si_cp_enable(rdev, true); |
| 3547 | 3546 | ||
| @@ -3570,7 +3569,7 @@ static int si_cp_start(struct radeon_device *rdev) | |||
| 3570 | radeon_ring_write(ring, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */ | 3569 | radeon_ring_write(ring, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */ |
| 3571 | radeon_ring_write(ring, 0x00000010); /* VGT_OUT_DEALLOC_CNTL */ | 3570 | radeon_ring_write(ring, 0x00000010); /* VGT_OUT_DEALLOC_CNTL */ |
| 3572 | 3571 | ||
| 3573 | radeon_ring_unlock_commit(rdev, ring); | 3572 | radeon_ring_unlock_commit(rdev, ring, false); |
| 3574 | 3573 | ||
| 3575 | for (i = RADEON_RING_TYPE_GFX_INDEX; i <= CAYMAN_RING_TYPE_CP2_INDEX; ++i) { | 3574 | for (i = RADEON_RING_TYPE_GFX_INDEX; i <= CAYMAN_RING_TYPE_CP2_INDEX; ++i) { |
| 3576 | ring = &rdev->ring[i]; | 3575 | ring = &rdev->ring[i]; |
| @@ -3580,7 +3579,7 @@ static int si_cp_start(struct radeon_device *rdev) | |||
| 3580 | radeon_ring_write(ring, PACKET3_COMPUTE(PACKET3_CLEAR_STATE, 0)); | 3579 | radeon_ring_write(ring, PACKET3_COMPUTE(PACKET3_CLEAR_STATE, 0)); |
| 3581 | radeon_ring_write(ring, 0); | 3580 | radeon_ring_write(ring, 0); |
| 3582 | 3581 | ||
| 3583 | radeon_ring_unlock_commit(rdev, ring); | 3582 | radeon_ring_unlock_commit(rdev, ring, false); |
| 3584 | } | 3583 | } |
| 3585 | 3584 | ||
| 3586 | return 0; | 3585 | return 0; |
| @@ -5028,7 +5027,7 @@ void si_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm) | |||
| 5028 | 5027 | ||
| 5029 | /* flush hdp cache */ | 5028 | /* flush hdp cache */ |
| 5030 | radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); | 5029 | radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); |
| 5031 | radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) | | 5030 | radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(1) | |
| 5032 | WRITE_DATA_DST_SEL(0))); | 5031 | WRITE_DATA_DST_SEL(0))); |
| 5033 | radeon_ring_write(ring, HDP_MEM_COHERENCY_FLUSH_CNTL >> 2); | 5032 | radeon_ring_write(ring, HDP_MEM_COHERENCY_FLUSH_CNTL >> 2); |
| 5034 | radeon_ring_write(ring, 0); | 5033 | radeon_ring_write(ring, 0); |
| @@ -5036,7 +5035,7 @@ void si_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm) | |||
| 5036 | 5035 | ||
| 5037 | /* bits 0-15 are the VM contexts0-15 */ | 5036 | /* bits 0-15 are the VM contexts0-15 */ |
| 5038 | radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); | 5037 | radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); |
| 5039 | radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) | | 5038 | radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(1) | |
| 5040 | WRITE_DATA_DST_SEL(0))); | 5039 | WRITE_DATA_DST_SEL(0))); |
| 5041 | radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2); | 5040 | radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2); |
| 5042 | radeon_ring_write(ring, 0); | 5041 | radeon_ring_write(ring, 0); |
diff --git a/drivers/gpu/drm/radeon/si_dma.c b/drivers/gpu/drm/radeon/si_dma.c index 716505129450..7c22baaf94db 100644 --- a/drivers/gpu/drm/radeon/si_dma.c +++ b/drivers/gpu/drm/radeon/si_dma.c | |||
| @@ -275,7 +275,7 @@ int si_copy_dma(struct radeon_device *rdev, | |||
| 275 | return r; | 275 | return r; |
| 276 | } | 276 | } |
| 277 | 277 | ||
| 278 | radeon_ring_unlock_commit(rdev, ring); | 278 | radeon_ring_unlock_commit(rdev, ring, false); |
| 279 | radeon_semaphore_free(rdev, &sem, *fence); | 279 | radeon_semaphore_free(rdev, &sem, *fence); |
| 280 | 280 | ||
| 281 | return r; | 281 | return r; |
diff --git a/drivers/gpu/drm/radeon/trinity_dpm.c b/drivers/gpu/drm/radeon/trinity_dpm.c index 32e50be9c4ac..57f780053b3e 100644 --- a/drivers/gpu/drm/radeon/trinity_dpm.c +++ b/drivers/gpu/drm/radeon/trinity_dpm.c | |||
| @@ -1874,16 +1874,22 @@ int trinity_dpm_init(struct radeon_device *rdev) | |||
| 1874 | for (i = 0; i < SUMO_MAX_HARDWARE_POWERLEVELS; i++) | 1874 | for (i = 0; i < SUMO_MAX_HARDWARE_POWERLEVELS; i++) |
| 1875 | pi->at[i] = TRINITY_AT_DFLT; | 1875 | pi->at[i] = TRINITY_AT_DFLT; |
| 1876 | 1876 | ||
| 1877 | /* There are stability issues reported on with | 1877 | if (radeon_bapm == -1) { |
| 1878 | * bapm enabled when switching between AC and battery | 1878 | /* There are stability issues reported on with |
| 1879 | * power. At the same time, some MSI boards hang | 1879 | * bapm enabled when switching between AC and battery |
| 1880 | * if it's not enabled and dpm is enabled. Just enable | 1880 | * power. At the same time, some MSI boards hang |
| 1881 | * it for MSI boards right now. | 1881 | * if it's not enabled and dpm is enabled. Just enable |
| 1882 | */ | 1882 | * it for MSI boards right now. |
| 1883 | if (rdev->pdev->subsystem_vendor == 0x1462) | 1883 | */ |
| 1884 | pi->enable_bapm = true; | 1884 | if (rdev->pdev->subsystem_vendor == 0x1462) |
| 1885 | else | 1885 | pi->enable_bapm = true; |
| 1886 | else | ||
| 1887 | pi->enable_bapm = false; | ||
| 1888 | } else if (radeon_bapm == 0) { | ||
| 1886 | pi->enable_bapm = false; | 1889 | pi->enable_bapm = false; |
| 1890 | } else { | ||
| 1891 | pi->enable_bapm = true; | ||
| 1892 | } | ||
| 1887 | pi->enable_nbps_policy = true; | 1893 | pi->enable_nbps_policy = true; |
| 1888 | pi->enable_sclk_ds = true; | 1894 | pi->enable_sclk_ds = true; |
| 1889 | pi->enable_gfx_power_gating = true; | 1895 | pi->enable_gfx_power_gating = true; |
diff --git a/drivers/gpu/drm/radeon/uvd_v1_0.c b/drivers/gpu/drm/radeon/uvd_v1_0.c index be42c8125203..cda391347286 100644 --- a/drivers/gpu/drm/radeon/uvd_v1_0.c +++ b/drivers/gpu/drm/radeon/uvd_v1_0.c | |||
| @@ -124,7 +124,7 @@ int uvd_v1_0_init(struct radeon_device *rdev) | |||
| 124 | radeon_ring_write(ring, PACKET0(UVD_SEMA_CNTL, 0)); | 124 | radeon_ring_write(ring, PACKET0(UVD_SEMA_CNTL, 0)); |
| 125 | radeon_ring_write(ring, 3); | 125 | radeon_ring_write(ring, 3); |
| 126 | 126 | ||
| 127 | radeon_ring_unlock_commit(rdev, ring); | 127 | radeon_ring_unlock_commit(rdev, ring, false); |
| 128 | 128 | ||
| 129 | done: | 129 | done: |
| 130 | /* lower clocks again */ | 130 | /* lower clocks again */ |
| @@ -331,7 +331,7 @@ int uvd_v1_0_ring_test(struct radeon_device *rdev, struct radeon_ring *ring) | |||
| 331 | } | 331 | } |
| 332 | radeon_ring_write(ring, PACKET0(UVD_CONTEXT_ID, 0)); | 332 | radeon_ring_write(ring, PACKET0(UVD_CONTEXT_ID, 0)); |
| 333 | radeon_ring_write(ring, 0xDEADBEEF); | 333 | radeon_ring_write(ring, 0xDEADBEEF); |
| 334 | radeon_ring_unlock_commit(rdev, ring); | 334 | radeon_ring_unlock_commit(rdev, ring, false); |
| 335 | for (i = 0; i < rdev->usec_timeout; i++) { | 335 | for (i = 0; i < rdev->usec_timeout; i++) { |
| 336 | tmp = RREG32(UVD_CONTEXT_ID); | 336 | tmp = RREG32(UVD_CONTEXT_ID); |
| 337 | if (tmp == 0xDEADBEEF) | 337 | if (tmp == 0xDEADBEEF) |
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c index eb50818de41f..73bd9e2e42bc 100644 --- a/drivers/hid/hid-core.c +++ b/drivers/hid/hid-core.c | |||
| @@ -1591,6 +1591,9 @@ int hid_connect(struct hid_device *hdev, unsigned int connect_mask) | |||
| 1591 | if ((connect_mask & HID_CONNECT_HIDRAW) && !hidraw_connect(hdev)) | 1591 | if ((connect_mask & HID_CONNECT_HIDRAW) && !hidraw_connect(hdev)) |
| 1592 | hdev->claimed |= HID_CLAIMED_HIDRAW; | 1592 | hdev->claimed |= HID_CLAIMED_HIDRAW; |
| 1593 | 1593 | ||
| 1594 | if (connect_mask & HID_CONNECT_DRIVER) | ||
| 1595 | hdev->claimed |= HID_CLAIMED_DRIVER; | ||
| 1596 | |||
| 1594 | /* Drivers with the ->raw_event callback set are not required to connect | 1597 | /* Drivers with the ->raw_event callback set are not required to connect |
| 1595 | * to any other listener. */ | 1598 | * to any other listener. */ |
| 1596 | if (!hdev->claimed && !hdev->driver->raw_event) { | 1599 | if (!hdev->claimed && !hdev->driver->raw_event) { |
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h index 29e9b4872ebd..cd9c9e96cf0e 100644 --- a/drivers/hid/hid-ids.h +++ b/drivers/hid/hid-ids.h | |||
| @@ -296,6 +296,9 @@ | |||
| 296 | #define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_73F7 0x73f7 | 296 | #define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_73F7 0x73f7 |
| 297 | #define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_A001 0xa001 | 297 | #define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_A001 0xa001 |
| 298 | 298 | ||
| 299 | #define USB_VENDOR_ID_ELAN 0x04f3 | ||
| 300 | #define USB_DEVICE_ID_ELAN_TOUCHSCREEN 0x0089 | ||
| 301 | |||
| 299 | #define USB_VENDOR_ID_ELECOM 0x056e | 302 | #define USB_VENDOR_ID_ELECOM 0x056e |
| 300 | #define USB_DEVICE_ID_ELECOM_BM084 0x0061 | 303 | #define USB_DEVICE_ID_ELECOM_BM084 0x0061 |
| 301 | 304 | ||
| @@ -735,6 +738,8 @@ | |||
| 735 | #define USB_DEVICE_ID_PI_ENGINEERING_VEC_USB_FOOTPEDAL 0xff | 738 | #define USB_DEVICE_ID_PI_ENGINEERING_VEC_USB_FOOTPEDAL 0xff |
| 736 | 739 | ||
| 737 | #define USB_VENDOR_ID_PIXART 0x093a | 740 | #define USB_VENDOR_ID_PIXART 0x093a |
| 741 | #define USB_DEVICE_ID_PIXART_USB_OPTICAL_MOUSE_ID2 0x0137 | ||
| 742 | #define USB_DEVICE_ID_PIXART_USB_OPTICAL_MOUSE 0x2510 | ||
| 738 | #define USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN 0x8001 | 743 | #define USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN 0x8001 |
| 739 | #define USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN1 0x8002 | 744 | #define USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN1 0x8002 |
| 740 | #define USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN2 0x8003 | 745 | #define USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN2 0x8003 |
diff --git a/drivers/hid/hid-logitech-dj.c b/drivers/hid/hid-logitech-dj.c index b7ba82960c79..71f569292cab 100644 --- a/drivers/hid/hid-logitech-dj.c +++ b/drivers/hid/hid-logitech-dj.c | |||
| @@ -385,18 +385,6 @@ static void logi_dj_recv_forward_null_report(struct dj_receiver_dev *djrcv_dev, | |||
| 385 | 385 | ||
| 386 | djdev = djrcv_dev->paired_dj_devices[dj_report->device_index]; | 386 | djdev = djrcv_dev->paired_dj_devices[dj_report->device_index]; |
| 387 | 387 | ||
| 388 | if (!djdev) { | ||
| 389 | dbg_hid("djrcv_dev->paired_dj_devices[dj_report->device_index]" | ||
| 390 | " is NULL, index %d\n", dj_report->device_index); | ||
| 391 | kfifo_in(&djrcv_dev->notif_fifo, dj_report, sizeof(struct dj_report)); | ||
| 392 | |||
| 393 | if (schedule_work(&djrcv_dev->work) == 0) { | ||
| 394 | dbg_hid("%s: did not schedule the work item, was already " | ||
| 395 | "queued\n", __func__); | ||
| 396 | } | ||
| 397 | return; | ||
| 398 | } | ||
| 399 | |||
| 400 | memset(reportbuffer, 0, sizeof(reportbuffer)); | 388 | memset(reportbuffer, 0, sizeof(reportbuffer)); |
| 401 | 389 | ||
| 402 | for (i = 0; i < NUMBER_OF_HID_REPORTS; i++) { | 390 | for (i = 0; i < NUMBER_OF_HID_REPORTS; i++) { |
| @@ -421,18 +409,6 @@ static void logi_dj_recv_forward_report(struct dj_receiver_dev *djrcv_dev, | |||
| 421 | 409 | ||
| 422 | dj_device = djrcv_dev->paired_dj_devices[dj_report->device_index]; | 410 | dj_device = djrcv_dev->paired_dj_devices[dj_report->device_index]; |
| 423 | 411 | ||
| 424 | if (dj_device == NULL) { | ||
| 425 | dbg_hid("djrcv_dev->paired_dj_devices[dj_report->device_index]" | ||
| 426 | " is NULL, index %d\n", dj_report->device_index); | ||
| 427 | kfifo_in(&djrcv_dev->notif_fifo, dj_report, sizeof(struct dj_report)); | ||
| 428 | |||
| 429 | if (schedule_work(&djrcv_dev->work) == 0) { | ||
| 430 | dbg_hid("%s: did not schedule the work item, was already " | ||
| 431 | "queued\n", __func__); | ||
| 432 | } | ||
| 433 | return; | ||
| 434 | } | ||
| 435 | |||
| 436 | if ((dj_report->report_type > ARRAY_SIZE(hid_reportid_size_map) - 1) || | 412 | if ((dj_report->report_type > ARRAY_SIZE(hid_reportid_size_map) - 1) || |
| 437 | (hid_reportid_size_map[dj_report->report_type] == 0)) { | 413 | (hid_reportid_size_map[dj_report->report_type] == 0)) { |
| 438 | dbg_hid("invalid report type:%x\n", dj_report->report_type); | 414 | dbg_hid("invalid report type:%x\n", dj_report->report_type); |
| @@ -656,7 +632,6 @@ static int logi_dj_raw_event(struct hid_device *hdev, | |||
| 656 | struct dj_receiver_dev *djrcv_dev = hid_get_drvdata(hdev); | 632 | struct dj_receiver_dev *djrcv_dev = hid_get_drvdata(hdev); |
| 657 | struct dj_report *dj_report = (struct dj_report *) data; | 633 | struct dj_report *dj_report = (struct dj_report *) data; |
| 658 | unsigned long flags; | 634 | unsigned long flags; |
| 659 | bool report_processed = false; | ||
| 660 | 635 | ||
| 661 | dbg_hid("%s, size:%d\n", __func__, size); | 636 | dbg_hid("%s, size:%d\n", __func__, size); |
| 662 | 637 | ||
| @@ -683,34 +658,53 @@ static int logi_dj_raw_event(struct hid_device *hdev, | |||
| 683 | * device (via hid_input_report() ) and return 1 so hid-core does not do | 658 | * device (via hid_input_report() ) and return 1 so hid-core does not do |
| 684 | * anything else with it. | 659 | * anything else with it. |
| 685 | */ | 660 | */ |
| 661 | |||
| 662 | /* case 1) */ | ||
| 663 | if (data[0] != REPORT_ID_DJ_SHORT) | ||
| 664 | return false; | ||
| 665 | |||
| 686 | if ((dj_report->device_index < DJ_DEVICE_INDEX_MIN) || | 666 | if ((dj_report->device_index < DJ_DEVICE_INDEX_MIN) || |
| 687 | (dj_report->device_index > DJ_DEVICE_INDEX_MAX)) { | 667 | (dj_report->device_index > DJ_DEVICE_INDEX_MAX)) { |
| 688 | dev_err(&hdev->dev, "%s: invalid device index:%d\n", | 668 | /* |
| 669 | * Device index is wrong, bail out. | ||
| 670 | * This driver can ignore safely the receiver notifications, | ||
| 671 | * so ignore those reports too. | ||
| 672 | */ | ||
| 673 | if (dj_report->device_index != DJ_RECEIVER_INDEX) | ||
| 674 | dev_err(&hdev->dev, "%s: invalid device index:%d\n", | ||
| 689 | __func__, dj_report->device_index); | 675 | __func__, dj_report->device_index); |
| 690 | return false; | 676 | return false; |
| 691 | } | 677 | } |
| 692 | 678 | ||
| 693 | spin_lock_irqsave(&djrcv_dev->lock, flags); | 679 | spin_lock_irqsave(&djrcv_dev->lock, flags); |
| 694 | if (dj_report->report_id == REPORT_ID_DJ_SHORT) { | 680 | |
| 695 | switch (dj_report->report_type) { | 681 | if (!djrcv_dev->paired_dj_devices[dj_report->device_index]) { |
| 696 | case REPORT_TYPE_NOTIF_DEVICE_PAIRED: | 682 | /* received an event for an unknown device, bail out */ |
| 697 | case REPORT_TYPE_NOTIF_DEVICE_UNPAIRED: | 683 | logi_dj_recv_queue_notification(djrcv_dev, dj_report); |
| 698 | logi_dj_recv_queue_notification(djrcv_dev, dj_report); | 684 | goto out; |
| 699 | break; | 685 | } |
| 700 | case REPORT_TYPE_NOTIF_CONNECTION_STATUS: | 686 | |
| 701 | if (dj_report->report_params[CONNECTION_STATUS_PARAM_STATUS] == | 687 | switch (dj_report->report_type) { |
| 702 | STATUS_LINKLOSS) { | 688 | case REPORT_TYPE_NOTIF_DEVICE_PAIRED: |
| 703 | logi_dj_recv_forward_null_report(djrcv_dev, dj_report); | 689 | /* pairing notifications are handled above the switch */ |
| 704 | } | 690 | break; |
| 705 | break; | 691 | case REPORT_TYPE_NOTIF_DEVICE_UNPAIRED: |
| 706 | default: | 692 | logi_dj_recv_queue_notification(djrcv_dev, dj_report); |
| 707 | logi_dj_recv_forward_report(djrcv_dev, dj_report); | 693 | break; |
| 694 | case REPORT_TYPE_NOTIF_CONNECTION_STATUS: | ||
| 695 | if (dj_report->report_params[CONNECTION_STATUS_PARAM_STATUS] == | ||
| 696 | STATUS_LINKLOSS) { | ||
| 697 | logi_dj_recv_forward_null_report(djrcv_dev, dj_report); | ||
| 708 | } | 698 | } |
| 709 | report_processed = true; | 699 | break; |
| 700 | default: | ||
| 701 | logi_dj_recv_forward_report(djrcv_dev, dj_report); | ||
| 710 | } | 702 | } |
| 703 | |||
| 704 | out: | ||
| 711 | spin_unlock_irqrestore(&djrcv_dev->lock, flags); | 705 | spin_unlock_irqrestore(&djrcv_dev->lock, flags); |
| 712 | 706 | ||
| 713 | return report_processed; | 707 | return true; |
| 714 | } | 708 | } |
| 715 | 709 | ||
| 716 | static int logi_dj_probe(struct hid_device *hdev, | 710 | static int logi_dj_probe(struct hid_device *hdev, |
diff --git a/drivers/hid/hid-logitech-dj.h b/drivers/hid/hid-logitech-dj.h index 4a4000340ce1..daeb0aa4bee9 100644 --- a/drivers/hid/hid-logitech-dj.h +++ b/drivers/hid/hid-logitech-dj.h | |||
| @@ -27,6 +27,7 @@ | |||
| 27 | 27 | ||
| 28 | #define DJ_MAX_PAIRED_DEVICES 6 | 28 | #define DJ_MAX_PAIRED_DEVICES 6 |
| 29 | #define DJ_MAX_NUMBER_NOTIFICATIONS 8 | 29 | #define DJ_MAX_NUMBER_NOTIFICATIONS 8 |
| 30 | #define DJ_RECEIVER_INDEX 0 | ||
| 30 | #define DJ_DEVICE_INDEX_MIN 1 | 31 | #define DJ_DEVICE_INDEX_MIN 1 |
| 31 | #define DJ_DEVICE_INDEX_MAX 6 | 32 | #define DJ_DEVICE_INDEX_MAX 6 |
| 32 | 33 | ||
diff --git a/drivers/hid/hid-magicmouse.c b/drivers/hid/hid-magicmouse.c index ecc2cbf300cc..29a74c1efcb8 100644 --- a/drivers/hid/hid-magicmouse.c +++ b/drivers/hid/hid-magicmouse.c | |||
| @@ -290,6 +290,11 @@ static int magicmouse_raw_event(struct hid_device *hdev, | |||
| 290 | if (size < 4 || ((size - 4) % 9) != 0) | 290 | if (size < 4 || ((size - 4) % 9) != 0) |
| 291 | return 0; | 291 | return 0; |
| 292 | npoints = (size - 4) / 9; | 292 | npoints = (size - 4) / 9; |
| 293 | if (npoints > 15) { | ||
| 294 | hid_warn(hdev, "invalid size value (%d) for TRACKPAD_REPORT_ID\n", | ||
| 295 | size); | ||
| 296 | return 0; | ||
| 297 | } | ||
| 293 | msc->ntouches = 0; | 298 | msc->ntouches = 0; |
| 294 | for (ii = 0; ii < npoints; ii++) | 299 | for (ii = 0; ii < npoints; ii++) |
| 295 | magicmouse_emit_touch(msc, ii, data + ii * 9 + 4); | 300 | magicmouse_emit_touch(msc, ii, data + ii * 9 + 4); |
| @@ -307,6 +312,11 @@ static int magicmouse_raw_event(struct hid_device *hdev, | |||
| 307 | if (size < 6 || ((size - 6) % 8) != 0) | 312 | if (size < 6 || ((size - 6) % 8) != 0) |
| 308 | return 0; | 313 | return 0; |
| 309 | npoints = (size - 6) / 8; | 314 | npoints = (size - 6) / 8; |
| 315 | if (npoints > 15) { | ||
| 316 | hid_warn(hdev, "invalid size value (%d) for MOUSE_REPORT_ID\n", | ||
| 317 | size); | ||
| 318 | return 0; | ||
| 319 | } | ||
| 310 | msc->ntouches = 0; | 320 | msc->ntouches = 0; |
| 311 | for (ii = 0; ii < npoints; ii++) | 321 | for (ii = 0; ii < npoints; ii++) |
| 312 | magicmouse_emit_touch(msc, ii, data + ii * 8 + 6); | 322 | magicmouse_emit_touch(msc, ii, data + ii * 8 + 6); |
diff --git a/drivers/hid/hid-picolcd_core.c b/drivers/hid/hid-picolcd_core.c index acbb021065ec..c1b29a9eb41a 100644 --- a/drivers/hid/hid-picolcd_core.c +++ b/drivers/hid/hid-picolcd_core.c | |||
| @@ -350,6 +350,12 @@ static int picolcd_raw_event(struct hid_device *hdev, | |||
| 350 | if (!data) | 350 | if (!data) |
| 351 | return 1; | 351 | return 1; |
| 352 | 352 | ||
| 353 | if (size > 64) { | ||
| 354 | hid_warn(hdev, "invalid size value (%d) for picolcd raw event (%d)\n", | ||
| 355 | size, report->id); | ||
| 356 | return 0; | ||
| 357 | } | ||
| 358 | |||
| 353 | if (report->id == REPORT_KEY_STATE) { | 359 | if (report->id == REPORT_KEY_STATE) { |
| 354 | if (data->input_keys) | 360 | if (data->input_keys) |
| 355 | ret = picolcd_raw_keypad(data, report, raw_data+1, size-1); | 361 | ret = picolcd_raw_keypad(data, report, raw_data+1, size-1); |
diff --git a/drivers/hid/hid-rmi.c b/drivers/hid/hid-rmi.c index 8389e8109218..3cccff73b9b9 100644 --- a/drivers/hid/hid-rmi.c +++ b/drivers/hid/hid-rmi.c | |||
| @@ -320,10 +320,7 @@ static int rmi_f11_input_event(struct hid_device *hdev, u8 irq, u8 *data, | |||
| 320 | int offset; | 320 | int offset; |
| 321 | int i; | 321 | int i; |
| 322 | 322 | ||
| 323 | if (size < hdata->f11.report_size) | 323 | if (!(irq & hdata->f11.irq_mask) || size <= 0) |
| 324 | return 0; | ||
| 325 | |||
| 326 | if (!(irq & hdata->f11.irq_mask)) | ||
| 327 | return 0; | 324 | return 0; |
| 328 | 325 | ||
| 329 | offset = (hdata->max_fingers >> 2) + 1; | 326 | offset = (hdata->max_fingers >> 2) + 1; |
| @@ -332,9 +329,19 @@ static int rmi_f11_input_event(struct hid_device *hdev, u8 irq, u8 *data, | |||
| 332 | int fs_bit_position = (i & 0x3) << 1; | 329 | int fs_bit_position = (i & 0x3) << 1; |
| 333 | int finger_state = (data[fs_byte_position] >> fs_bit_position) & | 330 | int finger_state = (data[fs_byte_position] >> fs_bit_position) & |
| 334 | 0x03; | 331 | 0x03; |
| 332 | int position = offset + 5 * i; | ||
| 333 | |||
| 334 | if (position + 5 > size) { | ||
| 335 | /* partial report, go on with what we received */ | ||
| 336 | printk_once(KERN_WARNING | ||
| 337 | "%s %s: Detected incomplete finger report. Finger reports may occasionally get dropped on this platform.\n", | ||
| 338 | dev_driver_string(&hdev->dev), | ||
| 339 | dev_name(&hdev->dev)); | ||
| 340 | hid_dbg(hdev, "Incomplete finger report\n"); | ||
| 341 | break; | ||
| 342 | } | ||
| 335 | 343 | ||
| 336 | rmi_f11_process_touch(hdata, i, finger_state, | 344 | rmi_f11_process_touch(hdata, i, finger_state, &data[position]); |
| 337 | &data[offset + 5 * i]); | ||
| 338 | } | 345 | } |
| 339 | input_mt_sync_frame(hdata->input); | 346 | input_mt_sync_frame(hdata->input); |
| 340 | input_sync(hdata->input); | 347 | input_sync(hdata->input); |
| @@ -352,6 +359,11 @@ static int rmi_f30_input_event(struct hid_device *hdev, u8 irq, u8 *data, | |||
| 352 | if (!(irq & hdata->f30.irq_mask)) | 359 | if (!(irq & hdata->f30.irq_mask)) |
| 353 | return 0; | 360 | return 0; |
| 354 | 361 | ||
| 362 | if (size < (int)hdata->f30.report_size) { | ||
| 363 | hid_warn(hdev, "Click Button pressed, but the click data is missing\n"); | ||
| 364 | return 0; | ||
| 365 | } | ||
| 366 | |||
| 355 | for (i = 0; i < hdata->gpio_led_count; i++) { | 367 | for (i = 0; i < hdata->gpio_led_count; i++) { |
| 356 | if (test_bit(i, &hdata->button_mask)) { | 368 | if (test_bit(i, &hdata->button_mask)) { |
| 357 | value = (data[i / 8] >> (i & 0x07)) & BIT(0); | 369 | value = (data[i / 8] >> (i & 0x07)) & BIT(0); |
| @@ -412,9 +424,29 @@ static int rmi_read_data_event(struct hid_device *hdev, u8 *data, int size) | |||
| 412 | return 1; | 424 | return 1; |
| 413 | } | 425 | } |
| 414 | 426 | ||
| 427 | static int rmi_check_sanity(struct hid_device *hdev, u8 *data, int size) | ||
| 428 | { | ||
| 429 | int valid_size = size; | ||
| 430 | /* | ||
| 431 | * On the Dell XPS 13 9333, the bus sometimes get confused and fills | ||
| 432 | * the report with a sentinel value "ff". Synaptics told us that such | ||
| 433 | * behavior does not comes from the touchpad itself, so we filter out | ||
| 434 | * such reports here. | ||
| 435 | */ | ||
| 436 | |||
| 437 | while ((data[valid_size - 1] == 0xff) && valid_size > 0) | ||
| 438 | valid_size--; | ||
| 439 | |||
| 440 | return valid_size; | ||
| 441 | } | ||
| 442 | |||
| 415 | static int rmi_raw_event(struct hid_device *hdev, | 443 | static int rmi_raw_event(struct hid_device *hdev, |
| 416 | struct hid_report *report, u8 *data, int size) | 444 | struct hid_report *report, u8 *data, int size) |
| 417 | { | 445 | { |
| 446 | size = rmi_check_sanity(hdev, data, size); | ||
| 447 | if (size < 2) | ||
| 448 | return 0; | ||
| 449 | |||
| 418 | switch (data[0]) { | 450 | switch (data[0]) { |
| 419 | case RMI_READ_DATA_REPORT_ID: | 451 | case RMI_READ_DATA_REPORT_ID: |
| 420 | return rmi_read_data_event(hdev, data, size); | 452 | return rmi_read_data_event(hdev, data, size); |
diff --git a/drivers/hid/hid-sensor-hub.c b/drivers/hid/hid-sensor-hub.c index 2ac25760a9a9..e6d8e18dae97 100644 --- a/drivers/hid/hid-sensor-hub.c +++ b/drivers/hid/hid-sensor-hub.c | |||
| @@ -709,6 +709,9 @@ static const struct hid_device_id sensor_hub_devices[] = { | |||
| 709 | USB_DEVICE_ID_MS_TYPE_COVER_2), | 709 | USB_DEVICE_ID_MS_TYPE_COVER_2), |
| 710 | .driver_data = HID_SENSOR_HUB_ENUM_QUIRK}, | 710 | .driver_data = HID_SENSOR_HUB_ENUM_QUIRK}, |
| 711 | { HID_DEVICE(HID_BUS_ANY, HID_GROUP_SENSOR_HUB, USB_VENDOR_ID_STM_0, | 711 | { HID_DEVICE(HID_BUS_ANY, HID_GROUP_SENSOR_HUB, USB_VENDOR_ID_STM_0, |
| 712 | USB_DEVICE_ID_STM_HID_SENSOR), | ||
| 713 | .driver_data = HID_SENSOR_HUB_ENUM_QUIRK}, | ||
| 714 | { HID_DEVICE(HID_BUS_ANY, HID_GROUP_SENSOR_HUB, USB_VENDOR_ID_STM_0, | ||
| 712 | USB_DEVICE_ID_STM_HID_SENSOR_1), | 715 | USB_DEVICE_ID_STM_HID_SENSOR_1), |
| 713 | .driver_data = HID_SENSOR_HUB_ENUM_QUIRK}, | 716 | .driver_data = HID_SENSOR_HUB_ENUM_QUIRK}, |
| 714 | { HID_DEVICE(HID_BUS_ANY, HID_GROUP_SENSOR_HUB, USB_VENDOR_ID_TEXAS_INSTRUMENTS, | 717 | { HID_DEVICE(HID_BUS_ANY, HID_GROUP_SENSOR_HUB, USB_VENDOR_ID_TEXAS_INSTRUMENTS, |
diff --git a/drivers/hid/hid-sony.c b/drivers/hid/hid-sony.c index c372368e438c..bc4269e559f1 100644 --- a/drivers/hid/hid-sony.c +++ b/drivers/hid/hid-sony.c | |||
| @@ -1,5 +1,5 @@ | |||
| 1 | /* | 1 | /* |
| 2 | * HID driver for Sony / PS2 / PS3 BD devices. | 2 | * HID driver for Sony / PS2 / PS3 / PS4 BD devices. |
| 3 | * | 3 | * |
| 4 | * Copyright (c) 1999 Andreas Gal | 4 | * Copyright (c) 1999 Andreas Gal |
| 5 | * Copyright (c) 2000-2005 Vojtech Pavlik <vojtech@suse.cz> | 5 | * Copyright (c) 2000-2005 Vojtech Pavlik <vojtech@suse.cz> |
| @@ -8,6 +8,7 @@ | |||
| 8 | * Copyright (c) 2012 David Dillow <dave@thedillows.org> | 8 | * Copyright (c) 2012 David Dillow <dave@thedillows.org> |
| 9 | * Copyright (c) 2006-2013 Jiri Kosina | 9 | * Copyright (c) 2006-2013 Jiri Kosina |
| 10 | * Copyright (c) 2013 Colin Leitner <colin.leitner@gmail.com> | 10 | * Copyright (c) 2013 Colin Leitner <colin.leitner@gmail.com> |
| 11 | * Copyright (c) 2014 Frank Praznik <frank.praznik@gmail.com> | ||
| 11 | */ | 12 | */ |
| 12 | 13 | ||
| 13 | /* | 14 | /* |
| @@ -176,7 +177,7 @@ static u8 dualshock4_usb_rdesc[] = { | |||
| 176 | 0x75, 0x06, /* Report Size (6), */ | 177 | 0x75, 0x06, /* Report Size (6), */ |
| 177 | 0x95, 0x01, /* Report Count (1), */ | 178 | 0x95, 0x01, /* Report Count (1), */ |
| 178 | 0x15, 0x00, /* Logical Minimum (0), */ | 179 | 0x15, 0x00, /* Logical Minimum (0), */ |
| 179 | 0x25, 0x7F, /* Logical Maximum (127), */ | 180 | 0x25, 0x3F, /* Logical Maximum (63), */ |
| 180 | 0x81, 0x02, /* Input (Variable), */ | 181 | 0x81, 0x02, /* Input (Variable), */ |
| 181 | 0x05, 0x01, /* Usage Page (Desktop), */ | 182 | 0x05, 0x01, /* Usage Page (Desktop), */ |
| 182 | 0x09, 0x33, /* Usage (Rx), */ | 183 | 0x09, 0x33, /* Usage (Rx), */ |
| @@ -200,14 +201,14 @@ static u8 dualshock4_usb_rdesc[] = { | |||
| 200 | 0x81, 0x02, /* Input (Variable), */ | 201 | 0x81, 0x02, /* Input (Variable), */ |
| 201 | 0x19, 0x43, /* Usage Minimum (43h), */ | 202 | 0x19, 0x43, /* Usage Minimum (43h), */ |
| 202 | 0x29, 0x45, /* Usage Maximum (45h), */ | 203 | 0x29, 0x45, /* Usage Maximum (45h), */ |
| 203 | 0x16, 0xFF, 0xBF, /* Logical Minimum (-16385), */ | 204 | 0x16, 0x00, 0xE0, /* Logical Minimum (-8192), */ |
| 204 | 0x26, 0x00, 0x40, /* Logical Maximum (16384), */ | 205 | 0x26, 0xFF, 0x1F, /* Logical Maximum (8191), */ |
| 205 | 0x95, 0x03, /* Report Count (3), */ | 206 | 0x95, 0x03, /* Report Count (3), */ |
| 206 | 0x81, 0x02, /* Input (Variable), */ | 207 | 0x81, 0x02, /* Input (Variable), */ |
| 207 | 0x06, 0x00, 0xFF, /* Usage Page (FF00h), */ | 208 | 0x06, 0x00, 0xFF, /* Usage Page (FF00h), */ |
| 208 | 0x09, 0x21, /* Usage (21h), */ | 209 | 0x09, 0x21, /* Usage (21h), */ |
| 209 | 0x15, 0x00, /* Logical Minimum (0), */ | 210 | 0x15, 0x00, /* Logical Minimum (0), */ |
| 210 | 0x25, 0xFF, /* Logical Maximum (255), */ | 211 | 0x26, 0xFF, 0x00, /* Logical Maximum (255), */ |
| 211 | 0x75, 0x08, /* Report Size (8), */ | 212 | 0x75, 0x08, /* Report Size (8), */ |
| 212 | 0x95, 0x27, /* Report Count (39), */ | 213 | 0x95, 0x27, /* Report Count (39), */ |
| 213 | 0x81, 0x02, /* Input (Variable), */ | 214 | 0x81, 0x02, /* Input (Variable), */ |
| @@ -395,11 +396,11 @@ static u8 dualshock4_usb_rdesc[] = { | |||
| 395 | 396 | ||
| 396 | /* | 397 | /* |
| 397 | * The default behavior of the Dualshock 4 is to send reports using report | 398 | * The default behavior of the Dualshock 4 is to send reports using report |
| 398 | * type 1 when running over Bluetooth. However, as soon as it receives a | 399 | * type 1 when running over Bluetooth. However, when feature report 2 is |
| 399 | * report of type 17 to set the LEDs or rumble it starts returning it's state | 400 | * requested during the controller initialization it starts sending input |
| 400 | * in report 17 instead of 1. Since report 17 is undefined in the default HID | 401 | * reports in report 17. Since report 17 is undefined in the default HID |
| 401 | * descriptor the button and axis definitions must be moved to report 17 or | 402 | * descriptor the button and axis definitions must be moved to report 17 or |
| 402 | * the HID layer won't process the received input once a report is sent. | 403 | * the HID layer won't process the received input. |
| 403 | */ | 404 | */ |
| 404 | static u8 dualshock4_bt_rdesc[] = { | 405 | static u8 dualshock4_bt_rdesc[] = { |
| 405 | 0x05, 0x01, /* Usage Page (Desktop), */ | 406 | 0x05, 0x01, /* Usage Page (Desktop), */ |
| @@ -509,8 +510,8 @@ static u8 dualshock4_bt_rdesc[] = { | |||
| 509 | 0x81, 0x02, /* Input (Variable), */ | 510 | 0x81, 0x02, /* Input (Variable), */ |
| 510 | 0x19, 0x43, /* Usage Minimum (43h), */ | 511 | 0x19, 0x43, /* Usage Minimum (43h), */ |
| 511 | 0x29, 0x45, /* Usage Maximum (45h), */ | 512 | 0x29, 0x45, /* Usage Maximum (45h), */ |
| 512 | 0x16, 0xFF, 0xBF, /* Logical Minimum (-16385), */ | 513 | 0x16, 0x00, 0xE0, /* Logical Minimum (-8192), */ |
| 513 | 0x26, 0x00, 0x40, /* Logical Maximum (16384), */ | 514 | 0x26, 0xFF, 0x1F, /* Logical Maximum (8191), */ |
| 514 | 0x95, 0x03, /* Report Count (3), */ | 515 | 0x95, 0x03, /* Report Count (3), */ |
| 515 | 0x81, 0x02, /* Input (Variable), */ | 516 | 0x81, 0x02, /* Input (Variable), */ |
| 516 | 0x06, 0x00, 0xFF, /* Usage Page (FF00h), */ | 517 | 0x06, 0x00, 0xFF, /* Usage Page (FF00h), */ |
| @@ -935,12 +936,13 @@ static void sixaxis_parse_report(struct sony_sc *sc, __u8 *rd, int size) | |||
| 935 | if (rd[30] >= 0xee) { | 936 | if (rd[30] >= 0xee) { |
| 936 | battery_capacity = 100; | 937 | battery_capacity = 100; |
| 937 | battery_charging = !(rd[30] & 0x01); | 938 | battery_charging = !(rd[30] & 0x01); |
| 939 | cable_state = 1; | ||
| 938 | } else { | 940 | } else { |
| 939 | __u8 index = rd[30] <= 5 ? rd[30] : 5; | 941 | __u8 index = rd[30] <= 5 ? rd[30] : 5; |
| 940 | battery_capacity = sixaxis_battery_capacity[index]; | 942 | battery_capacity = sixaxis_battery_capacity[index]; |
| 941 | battery_charging = 0; | 943 | battery_charging = 0; |
| 944 | cable_state = 0; | ||
| 942 | } | 945 | } |
| 943 | cable_state = !(rd[31] & 0x04); | ||
| 944 | 946 | ||
| 945 | spin_lock_irqsave(&sc->lock, flags); | 947 | spin_lock_irqsave(&sc->lock, flags); |
| 946 | sc->cable_state = cable_state; | 948 | sc->cable_state = cable_state; |
| @@ -1082,6 +1084,38 @@ static int sony_mapping(struct hid_device *hdev, struct hid_input *hi, | |||
| 1082 | return 0; | 1084 | return 0; |
| 1083 | } | 1085 | } |
| 1084 | 1086 | ||
| 1087 | static int sony_register_touchpad(struct hid_input *hi, int touch_count, | ||
| 1088 | int w, int h) | ||
| 1089 | { | ||
| 1090 | struct input_dev *input_dev = hi->input; | ||
| 1091 | int ret; | ||
| 1092 | |||
| 1093 | ret = input_mt_init_slots(input_dev, touch_count, 0); | ||
| 1094 | if (ret < 0) | ||
| 1095 | return ret; | ||
| 1096 | |||
| 1097 | input_set_abs_params(input_dev, ABS_MT_POSITION_X, 0, w, 0, 0); | ||
| 1098 | input_set_abs_params(input_dev, ABS_MT_POSITION_Y, 0, h, 0, 0); | ||
| 1099 | |||
| 1100 | return 0; | ||
| 1101 | } | ||
| 1102 | |||
| 1103 | static void sony_input_configured(struct hid_device *hdev, | ||
| 1104 | struct hid_input *hidinput) | ||
| 1105 | { | ||
| 1106 | struct sony_sc *sc = hid_get_drvdata(hdev); | ||
| 1107 | |||
| 1108 | /* | ||
| 1109 | * The Dualshock 4 touchpad supports 2 touches and has a | ||
| 1110 | * resolution of 1920x942 (44.86 dots/mm). | ||
| 1111 | */ | ||
| 1112 | if (sc->quirks & DUALSHOCK4_CONTROLLER) { | ||
| 1113 | if (sony_register_touchpad(hidinput, 2, 1920, 942) != 0) | ||
| 1114 | hid_err(sc->hdev, | ||
| 1115 | "Unable to initialize multi-touch slots\n"); | ||
| 1116 | } | ||
| 1117 | } | ||
| 1118 | |||
| 1085 | /* | 1119 | /* |
| 1086 | * Sending HID_REQ_GET_REPORT changes the operation mode of the ps3 controller | 1120 | * Sending HID_REQ_GET_REPORT changes the operation mode of the ps3 controller |
| 1087 | * to "operational". Without this, the ps3 controller will not report any | 1121 | * to "operational". Without this, the ps3 controller will not report any |
| @@ -1654,26 +1688,6 @@ static void sony_battery_remove(struct sony_sc *sc) | |||
| 1654 | sc->battery.name = NULL; | 1688 | sc->battery.name = NULL; |
| 1655 | } | 1689 | } |
| 1656 | 1690 | ||
| 1657 | static int sony_register_touchpad(struct sony_sc *sc, int touch_count, | ||
| 1658 | int w, int h) | ||
| 1659 | { | ||
| 1660 | struct hid_input *hidinput = list_entry(sc->hdev->inputs.next, | ||
| 1661 | struct hid_input, list); | ||
| 1662 | struct input_dev *input_dev = hidinput->input; | ||
| 1663 | int ret; | ||
| 1664 | |||
| 1665 | ret = input_mt_init_slots(input_dev, touch_count, 0); | ||
| 1666 | if (ret < 0) { | ||
| 1667 | hid_err(sc->hdev, "Unable to initialize multi-touch slots\n"); | ||
| 1668 | return ret; | ||
| 1669 | } | ||
| 1670 | |||
| 1671 | input_set_abs_params(input_dev, ABS_MT_POSITION_X, 0, w, 0, 0); | ||
| 1672 | input_set_abs_params(input_dev, ABS_MT_POSITION_Y, 0, h, 0, 0); | ||
| 1673 | |||
| 1674 | return 0; | ||
| 1675 | } | ||
| 1676 | |||
| 1677 | /* | 1691 | /* |
| 1678 | * If a controller is plugged in via USB while already connected via Bluetooth | 1692 | * If a controller is plugged in via USB while already connected via Bluetooth |
| 1679 | * it will show up as two devices. A global list of connected controllers and | 1693 | * it will show up as two devices. A global list of connected controllers and |
| @@ -1923,13 +1937,6 @@ static int sony_probe(struct hid_device *hdev, const struct hid_device_id *id) | |||
| 1923 | goto err_stop; | 1937 | goto err_stop; |
| 1924 | } | 1938 | } |
| 1925 | } | 1939 | } |
| 1926 | /* | ||
| 1927 | * The Dualshock 4 touchpad supports 2 touches and has a | ||
| 1928 | * resolution of 1920x940. | ||
| 1929 | */ | ||
| 1930 | ret = sony_register_touchpad(sc, 2, 1920, 940); | ||
| 1931 | if (ret < 0) | ||
| 1932 | goto err_stop; | ||
| 1933 | 1940 | ||
| 1934 | sony_init_work(sc, dualshock4_state_worker); | 1941 | sony_init_work(sc, dualshock4_state_worker); |
| 1935 | } else { | 1942 | } else { |
| @@ -2037,13 +2044,14 @@ static const struct hid_device_id sony_devices[] = { | |||
| 2037 | MODULE_DEVICE_TABLE(hid, sony_devices); | 2044 | MODULE_DEVICE_TABLE(hid, sony_devices); |
| 2038 | 2045 | ||
| 2039 | static struct hid_driver sony_driver = { | 2046 | static struct hid_driver sony_driver = { |
| 2040 | .name = "sony", | 2047 | .name = "sony", |
| 2041 | .id_table = sony_devices, | 2048 | .id_table = sony_devices, |
| 2042 | .input_mapping = sony_mapping, | 2049 | .input_mapping = sony_mapping, |
| 2043 | .probe = sony_probe, | 2050 | .input_configured = sony_input_configured, |
| 2044 | .remove = sony_remove, | 2051 | .probe = sony_probe, |
| 2045 | .report_fixup = sony_report_fixup, | 2052 | .remove = sony_remove, |
| 2046 | .raw_event = sony_raw_event | 2053 | .report_fixup = sony_report_fixup, |
| 2054 | .raw_event = sony_raw_event | ||
| 2047 | }; | 2055 | }; |
| 2048 | 2056 | ||
| 2049 | static int __init sony_init(void) | 2057 | static int __init sony_init(void) |
diff --git a/drivers/hid/hid-thingm.c b/drivers/hid/hid-thingm.c index f206398a5d54..b95d3978c272 100644 --- a/drivers/hid/hid-thingm.c +++ b/drivers/hid/hid-thingm.c | |||
| @@ -250,6 +250,7 @@ static int thingm_probe(struct hid_device *hdev, const struct hid_device_id *id) | |||
| 250 | 250 | ||
| 251 | if (!tdev->fwinfo) { | 251 | if (!tdev->fwinfo) { |
| 252 | hid_err(hdev, "unsupported firmware %c\n", tdev->version.major); | 252 | hid_err(hdev, "unsupported firmware %c\n", tdev->version.major); |
| 253 | err = -ENODEV; | ||
| 253 | goto stop; | 254 | goto stop; |
| 254 | } | 255 | } |
| 255 | 256 | ||
diff --git a/drivers/hid/uhid.c b/drivers/hid/uhid.c index 0cb92e347258..e094c572b86e 100644 --- a/drivers/hid/uhid.c +++ b/drivers/hid/uhid.c | |||
| @@ -44,10 +44,12 @@ struct uhid_device { | |||
| 44 | __u8 tail; | 44 | __u8 tail; |
| 45 | struct uhid_event *outq[UHID_BUFSIZE]; | 45 | struct uhid_event *outq[UHID_BUFSIZE]; |
| 46 | 46 | ||
| 47 | /* blocking GET_REPORT support; state changes protected by qlock */ | ||
| 47 | struct mutex report_lock; | 48 | struct mutex report_lock; |
| 48 | wait_queue_head_t report_wait; | 49 | wait_queue_head_t report_wait; |
| 49 | atomic_t report_done; | 50 | bool report_running; |
| 50 | atomic_t report_id; | 51 | u32 report_id; |
| 52 | u32 report_type; | ||
| 51 | struct uhid_event report_buf; | 53 | struct uhid_event report_buf; |
| 52 | }; | 54 | }; |
| 53 | 55 | ||
| @@ -90,8 +92,27 @@ static int uhid_queue_event(struct uhid_device *uhid, __u32 event) | |||
| 90 | static int uhid_hid_start(struct hid_device *hid) | 92 | static int uhid_hid_start(struct hid_device *hid) |
| 91 | { | 93 | { |
| 92 | struct uhid_device *uhid = hid->driver_data; | 94 | struct uhid_device *uhid = hid->driver_data; |
| 95 | struct uhid_event *ev; | ||
| 96 | unsigned long flags; | ||
| 97 | |||
| 98 | ev = kzalloc(sizeof(*ev), GFP_KERNEL); | ||
| 99 | if (!ev) | ||
| 100 | return -ENOMEM; | ||
| 101 | |||
| 102 | ev->type = UHID_START; | ||
| 93 | 103 | ||
| 94 | return uhid_queue_event(uhid, UHID_START); | 104 | if (hid->report_enum[HID_FEATURE_REPORT].numbered) |
| 105 | ev->u.start.dev_flags |= UHID_DEV_NUMBERED_FEATURE_REPORTS; | ||
| 106 | if (hid->report_enum[HID_OUTPUT_REPORT].numbered) | ||
| 107 | ev->u.start.dev_flags |= UHID_DEV_NUMBERED_OUTPUT_REPORTS; | ||
| 108 | if (hid->report_enum[HID_INPUT_REPORT].numbered) | ||
| 109 | ev->u.start.dev_flags |= UHID_DEV_NUMBERED_INPUT_REPORTS; | ||
| 110 | |||
| 111 | spin_lock_irqsave(&uhid->qlock, flags); | ||
| 112 | uhid_queue(uhid, ev); | ||
| 113 | spin_unlock_irqrestore(&uhid->qlock, flags); | ||
| 114 | |||
| 115 | return 0; | ||
| 95 | } | 116 | } |
| 96 | 117 | ||
| 97 | static void uhid_hid_stop(struct hid_device *hid) | 118 | static void uhid_hid_stop(struct hid_device *hid) |
| @@ -123,87 +144,169 @@ static int uhid_hid_parse(struct hid_device *hid) | |||
| 123 | return hid_parse_report(hid, uhid->rd_data, uhid->rd_size); | 144 | return hid_parse_report(hid, uhid->rd_data, uhid->rd_size); |
| 124 | } | 145 | } |
| 125 | 146 | ||
| 126 | static int uhid_hid_get_raw(struct hid_device *hid, unsigned char rnum, | 147 | /* must be called with report_lock held */ |
| 127 | __u8 *buf, size_t count, unsigned char rtype) | 148 | static int __uhid_report_queue_and_wait(struct uhid_device *uhid, |
| 149 | struct uhid_event *ev, | ||
| 150 | __u32 *report_id) | ||
| 151 | { | ||
| 152 | unsigned long flags; | ||
| 153 | int ret; | ||
| 154 | |||
| 155 | spin_lock_irqsave(&uhid->qlock, flags); | ||
| 156 | *report_id = ++uhid->report_id; | ||
| 157 | uhid->report_type = ev->type + 1; | ||
| 158 | uhid->report_running = true; | ||
| 159 | uhid_queue(uhid, ev); | ||
| 160 | spin_unlock_irqrestore(&uhid->qlock, flags); | ||
| 161 | |||
| 162 | ret = wait_event_interruptible_timeout(uhid->report_wait, | ||
| 163 | !uhid->report_running || !uhid->running, | ||
| 164 | 5 * HZ); | ||
| 165 | if (!ret || !uhid->running || uhid->report_running) | ||
| 166 | ret = -EIO; | ||
| 167 | else if (ret < 0) | ||
| 168 | ret = -ERESTARTSYS; | ||
| 169 | else | ||
| 170 | ret = 0; | ||
| 171 | |||
| 172 | uhid->report_running = false; | ||
| 173 | |||
| 174 | return ret; | ||
| 175 | } | ||
| 176 | |||
| 177 | static void uhid_report_wake_up(struct uhid_device *uhid, u32 id, | ||
| 178 | const struct uhid_event *ev) | ||
| 179 | { | ||
| 180 | unsigned long flags; | ||
| 181 | |||
| 182 | spin_lock_irqsave(&uhid->qlock, flags); | ||
| 183 | |||
| 184 | /* id for old report; drop it silently */ | ||
| 185 | if (uhid->report_type != ev->type || uhid->report_id != id) | ||
| 186 | goto unlock; | ||
| 187 | if (!uhid->report_running) | ||
| 188 | goto unlock; | ||
| 189 | |||
| 190 | memcpy(&uhid->report_buf, ev, sizeof(*ev)); | ||
| 191 | uhid->report_running = false; | ||
| 192 | wake_up_interruptible(&uhid->report_wait); | ||
| 193 | |||
| 194 | unlock: | ||
| 195 | spin_unlock_irqrestore(&uhid->qlock, flags); | ||
| 196 | } | ||
| 197 | |||
| 198 | static int uhid_hid_get_report(struct hid_device *hid, unsigned char rnum, | ||
| 199 | u8 *buf, size_t count, u8 rtype) | ||
| 128 | { | 200 | { |
| 129 | struct uhid_device *uhid = hid->driver_data; | 201 | struct uhid_device *uhid = hid->driver_data; |
| 130 | __u8 report_type; | 202 | struct uhid_get_report_reply_req *req; |
| 131 | struct uhid_event *ev; | 203 | struct uhid_event *ev; |
| 132 | unsigned long flags; | ||
| 133 | int ret; | 204 | int ret; |
| 134 | size_t uninitialized_var(len); | ||
| 135 | struct uhid_feature_answer_req *req; | ||
| 136 | 205 | ||
| 137 | if (!uhid->running) | 206 | if (!uhid->running) |
| 138 | return -EIO; | 207 | return -EIO; |
| 139 | 208 | ||
| 140 | switch (rtype) { | 209 | ev = kzalloc(sizeof(*ev), GFP_KERNEL); |
| 141 | case HID_FEATURE_REPORT: | 210 | if (!ev) |
| 142 | report_type = UHID_FEATURE_REPORT; | 211 | return -ENOMEM; |
| 143 | break; | 212 | |
| 144 | case HID_OUTPUT_REPORT: | 213 | ev->type = UHID_GET_REPORT; |
| 145 | report_type = UHID_OUTPUT_REPORT; | 214 | ev->u.get_report.rnum = rnum; |
| 146 | break; | 215 | ev->u.get_report.rtype = rtype; |
| 147 | case HID_INPUT_REPORT: | ||
| 148 | report_type = UHID_INPUT_REPORT; | ||
| 149 | break; | ||
| 150 | default: | ||
| 151 | return -EINVAL; | ||
| 152 | } | ||
| 153 | 216 | ||
| 154 | ret = mutex_lock_interruptible(&uhid->report_lock); | 217 | ret = mutex_lock_interruptible(&uhid->report_lock); |
| 155 | if (ret) | 218 | if (ret) { |
| 219 | kfree(ev); | ||
| 156 | return ret; | 220 | return ret; |
| 221 | } | ||
| 157 | 222 | ||
| 158 | ev = kzalloc(sizeof(*ev), GFP_KERNEL); | 223 | /* this _always_ takes ownership of @ev */ |
| 159 | if (!ev) { | 224 | ret = __uhid_report_queue_and_wait(uhid, ev, &ev->u.get_report.id); |
| 160 | ret = -ENOMEM; | 225 | if (ret) |
| 161 | goto unlock; | 226 | goto unlock; |
| 227 | |||
| 228 | req = &uhid->report_buf.u.get_report_reply; | ||
| 229 | if (req->err) { | ||
| 230 | ret = -EIO; | ||
| 231 | } else { | ||
| 232 | ret = min3(count, (size_t)req->size, (size_t)UHID_DATA_MAX); | ||
| 233 | memcpy(buf, req->data, ret); | ||
| 162 | } | 234 | } |
| 163 | 235 | ||
| 164 | spin_lock_irqsave(&uhid->qlock, flags); | 236 | unlock: |
| 165 | ev->type = UHID_FEATURE; | 237 | mutex_unlock(&uhid->report_lock); |
| 166 | ev->u.feature.id = atomic_inc_return(&uhid->report_id); | 238 | return ret; |
| 167 | ev->u.feature.rnum = rnum; | 239 | } |
| 168 | ev->u.feature.rtype = report_type; | ||
| 169 | 240 | ||
| 170 | atomic_set(&uhid->report_done, 0); | 241 | static int uhid_hid_set_report(struct hid_device *hid, unsigned char rnum, |
| 171 | uhid_queue(uhid, ev); | 242 | const u8 *buf, size_t count, u8 rtype) |
| 172 | spin_unlock_irqrestore(&uhid->qlock, flags); | 243 | { |
| 244 | struct uhid_device *uhid = hid->driver_data; | ||
| 245 | struct uhid_event *ev; | ||
| 246 | int ret; | ||
| 173 | 247 | ||
| 174 | ret = wait_event_interruptible_timeout(uhid->report_wait, | 248 | if (!uhid->running || count > UHID_DATA_MAX) |
| 175 | atomic_read(&uhid->report_done), 5 * HZ); | 249 | return -EIO; |
| 176 | |||
| 177 | /* | ||
| 178 | * Make sure "uhid->running" is cleared on shutdown before | ||
| 179 | * "uhid->report_done" is set. | ||
| 180 | */ | ||
| 181 | smp_rmb(); | ||
| 182 | if (!ret || !uhid->running) { | ||
| 183 | ret = -EIO; | ||
| 184 | } else if (ret < 0) { | ||
| 185 | ret = -ERESTARTSYS; | ||
| 186 | } else { | ||
| 187 | spin_lock_irqsave(&uhid->qlock, flags); | ||
| 188 | req = &uhid->report_buf.u.feature_answer; | ||
| 189 | 250 | ||
| 190 | if (req->err) { | 251 | ev = kzalloc(sizeof(*ev), GFP_KERNEL); |
| 191 | ret = -EIO; | 252 | if (!ev) |
| 192 | } else { | 253 | return -ENOMEM; |
| 193 | ret = 0; | 254 | |
| 194 | len = min(count, | 255 | ev->type = UHID_SET_REPORT; |
| 195 | min_t(size_t, req->size, UHID_DATA_MAX)); | 256 | ev->u.set_report.rnum = rnum; |
| 196 | memcpy(buf, req->data, len); | 257 | ev->u.set_report.rtype = rtype; |
| 197 | } | 258 | ev->u.set_report.size = count; |
| 259 | memcpy(ev->u.set_report.data, buf, count); | ||
| 198 | 260 | ||
| 199 | spin_unlock_irqrestore(&uhid->qlock, flags); | 261 | ret = mutex_lock_interruptible(&uhid->report_lock); |
| 262 | if (ret) { | ||
| 263 | kfree(ev); | ||
| 264 | return ret; | ||
| 200 | } | 265 | } |
| 201 | 266 | ||
| 202 | atomic_set(&uhid->report_done, 1); | 267 | /* this _always_ takes ownership of @ev */ |
| 268 | ret = __uhid_report_queue_and_wait(uhid, ev, &ev->u.set_report.id); | ||
| 269 | if (ret) | ||
| 270 | goto unlock; | ||
| 271 | |||
| 272 | if (uhid->report_buf.u.set_report_reply.err) | ||
| 273 | ret = -EIO; | ||
| 274 | else | ||
| 275 | ret = count; | ||
| 203 | 276 | ||
| 204 | unlock: | 277 | unlock: |
| 205 | mutex_unlock(&uhid->report_lock); | 278 | mutex_unlock(&uhid->report_lock); |
| 206 | return ret ? ret : len; | 279 | return ret; |
| 280 | } | ||
| 281 | |||
| 282 | static int uhid_hid_raw_request(struct hid_device *hid, unsigned char reportnum, | ||
| 283 | __u8 *buf, size_t len, unsigned char rtype, | ||
| 284 | int reqtype) | ||
| 285 | { | ||
| 286 | u8 u_rtype; | ||
| 287 | |||
| 288 | switch (rtype) { | ||
| 289 | case HID_FEATURE_REPORT: | ||
| 290 | u_rtype = UHID_FEATURE_REPORT; | ||
| 291 | break; | ||
| 292 | case HID_OUTPUT_REPORT: | ||
| 293 | u_rtype = UHID_OUTPUT_REPORT; | ||
| 294 | break; | ||
| 295 | case HID_INPUT_REPORT: | ||
| 296 | u_rtype = UHID_INPUT_REPORT; | ||
| 297 | break; | ||
| 298 | default: | ||
| 299 | return -EINVAL; | ||
| 300 | } | ||
| 301 | |||
| 302 | switch (reqtype) { | ||
| 303 | case HID_REQ_GET_REPORT: | ||
| 304 | return uhid_hid_get_report(hid, reportnum, buf, len, u_rtype); | ||
| 305 | case HID_REQ_SET_REPORT: | ||
| 306 | return uhid_hid_set_report(hid, reportnum, buf, len, u_rtype); | ||
| 307 | default: | ||
| 308 | return -EIO; | ||
| 309 | } | ||
| 207 | } | 310 | } |
| 208 | 311 | ||
| 209 | static int uhid_hid_output_raw(struct hid_device *hid, __u8 *buf, size_t count, | 312 | static int uhid_hid_output_raw(struct hid_device *hid, __u8 *buf, size_t count, |
| @@ -250,29 +353,14 @@ static int uhid_hid_output_report(struct hid_device *hid, __u8 *buf, | |||
| 250 | return uhid_hid_output_raw(hid, buf, count, HID_OUTPUT_REPORT); | 353 | return uhid_hid_output_raw(hid, buf, count, HID_OUTPUT_REPORT); |
| 251 | } | 354 | } |
| 252 | 355 | ||
| 253 | static int uhid_raw_request(struct hid_device *hid, unsigned char reportnum, | ||
| 254 | __u8 *buf, size_t len, unsigned char rtype, | ||
| 255 | int reqtype) | ||
| 256 | { | ||
| 257 | switch (reqtype) { | ||
| 258 | case HID_REQ_GET_REPORT: | ||
| 259 | return uhid_hid_get_raw(hid, reportnum, buf, len, rtype); | ||
| 260 | case HID_REQ_SET_REPORT: | ||
| 261 | /* TODO: implement proper SET_REPORT functionality */ | ||
| 262 | return -ENOSYS; | ||
| 263 | default: | ||
| 264 | return -EIO; | ||
| 265 | } | ||
| 266 | } | ||
| 267 | |||
| 268 | static struct hid_ll_driver uhid_hid_driver = { | 356 | static struct hid_ll_driver uhid_hid_driver = { |
| 269 | .start = uhid_hid_start, | 357 | .start = uhid_hid_start, |
| 270 | .stop = uhid_hid_stop, | 358 | .stop = uhid_hid_stop, |
| 271 | .open = uhid_hid_open, | 359 | .open = uhid_hid_open, |
| 272 | .close = uhid_hid_close, | 360 | .close = uhid_hid_close, |
| 273 | .parse = uhid_hid_parse, | 361 | .parse = uhid_hid_parse, |
| 362 | .raw_request = uhid_hid_raw_request, | ||
| 274 | .output_report = uhid_hid_output_report, | 363 | .output_report = uhid_hid_output_report, |
| 275 | .raw_request = uhid_raw_request, | ||
| 276 | }; | 364 | }; |
| 277 | 365 | ||
| 278 | #ifdef CONFIG_COMPAT | 366 | #ifdef CONFIG_COMPAT |
| @@ -363,28 +451,27 @@ static int uhid_event_from_user(const char __user *buffer, size_t len, | |||
| 363 | } | 451 | } |
| 364 | #endif | 452 | #endif |
| 365 | 453 | ||
| 366 | static int uhid_dev_create(struct uhid_device *uhid, | 454 | static int uhid_dev_create2(struct uhid_device *uhid, |
| 367 | const struct uhid_event *ev) | 455 | const struct uhid_event *ev) |
| 368 | { | 456 | { |
| 369 | struct hid_device *hid; | 457 | struct hid_device *hid; |
| 458 | size_t rd_size, len; | ||
| 459 | void *rd_data; | ||
| 370 | int ret; | 460 | int ret; |
| 371 | 461 | ||
| 372 | if (uhid->running) | 462 | if (uhid->running) |
| 373 | return -EALREADY; | 463 | return -EALREADY; |
| 374 | 464 | ||
| 375 | uhid->rd_size = ev->u.create.rd_size; | 465 | rd_size = ev->u.create2.rd_size; |
| 376 | if (uhid->rd_size <= 0 || uhid->rd_size > HID_MAX_DESCRIPTOR_SIZE) | 466 | if (rd_size <= 0 || rd_size > HID_MAX_DESCRIPTOR_SIZE) |
| 377 | return -EINVAL; | 467 | return -EINVAL; |
| 378 | 468 | ||
| 379 | uhid->rd_data = kmalloc(uhid->rd_size, GFP_KERNEL); | 469 | rd_data = kmemdup(ev->u.create2.rd_data, rd_size, GFP_KERNEL); |
| 380 | if (!uhid->rd_data) | 470 | if (!rd_data) |
| 381 | return -ENOMEM; | 471 | return -ENOMEM; |
| 382 | 472 | ||
| 383 | if (copy_from_user(uhid->rd_data, ev->u.create.rd_data, | 473 | uhid->rd_size = rd_size; |
| 384 | uhid->rd_size)) { | 474 | uhid->rd_data = rd_data; |
| 385 | ret = -EFAULT; | ||
| 386 | goto err_free; | ||
| 387 | } | ||
| 388 | 475 | ||
| 389 | hid = hid_allocate_device(); | 476 | hid = hid_allocate_device(); |
| 390 | if (IS_ERR(hid)) { | 477 | if (IS_ERR(hid)) { |
| @@ -392,19 +479,19 @@ static int uhid_dev_create(struct uhid_device *uhid, | |||
| 392 | goto err_free; | 479 | goto err_free; |
| 393 | } | 480 | } |
| 394 | 481 | ||
| 395 | strncpy(hid->name, ev->u.create.name, 127); | 482 | len = min(sizeof(hid->name), sizeof(ev->u.create2.name)) - 1; |
| 396 | hid->name[127] = 0; | 483 | strncpy(hid->name, ev->u.create2.name, len); |
| 397 | strncpy(hid->phys, ev->u.create.phys, 63); | 484 | len = min(sizeof(hid->phys), sizeof(ev->u.create2.phys)) - 1; |
| 398 | hid->phys[63] = 0; | 485 | strncpy(hid->phys, ev->u.create2.phys, len); |
| 399 | strncpy(hid->uniq, ev->u.create.uniq, 63); | 486 | len = min(sizeof(hid->uniq), sizeof(ev->u.create2.uniq)) - 1; |
| 400 | hid->uniq[63] = 0; | 487 | strncpy(hid->uniq, ev->u.create2.uniq, len); |
| 401 | 488 | ||
| 402 | hid->ll_driver = &uhid_hid_driver; | 489 | hid->ll_driver = &uhid_hid_driver; |
| 403 | hid->bus = ev->u.create.bus; | 490 | hid->bus = ev->u.create2.bus; |
| 404 | hid->vendor = ev->u.create.vendor; | 491 | hid->vendor = ev->u.create2.vendor; |
| 405 | hid->product = ev->u.create.product; | 492 | hid->product = ev->u.create2.product; |
| 406 | hid->version = ev->u.create.version; | 493 | hid->version = ev->u.create2.version; |
| 407 | hid->country = ev->u.create.country; | 494 | hid->country = ev->u.create2.country; |
| 408 | hid->driver_data = uhid; | 495 | hid->driver_data = uhid; |
| 409 | hid->dev.parent = uhid_misc.this_device; | 496 | hid->dev.parent = uhid_misc.this_device; |
| 410 | 497 | ||
| @@ -425,67 +512,34 @@ err_hid: | |||
| 425 | uhid->running = false; | 512 | uhid->running = false; |
| 426 | err_free: | 513 | err_free: |
| 427 | kfree(uhid->rd_data); | 514 | kfree(uhid->rd_data); |
| 515 | uhid->rd_data = NULL; | ||
| 516 | uhid->rd_size = 0; | ||
| 428 | return ret; | 517 | return ret; |
| 429 | } | 518 | } |
| 430 | 519 | ||
| 431 | static int uhid_dev_create2(struct uhid_device *uhid, | 520 | static int uhid_dev_create(struct uhid_device *uhid, |
| 432 | const struct uhid_event *ev) | 521 | struct uhid_event *ev) |
| 433 | { | 522 | { |
| 434 | struct hid_device *hid; | 523 | struct uhid_create_req orig; |
| 435 | int ret; | ||
| 436 | 524 | ||
| 437 | if (uhid->running) | 525 | orig = ev->u.create; |
| 438 | return -EALREADY; | ||
| 439 | 526 | ||
| 440 | uhid->rd_size = ev->u.create2.rd_size; | 527 | if (orig.rd_size <= 0 || orig.rd_size > HID_MAX_DESCRIPTOR_SIZE) |
| 441 | if (uhid->rd_size <= 0 || uhid->rd_size > HID_MAX_DESCRIPTOR_SIZE) | ||
| 442 | return -EINVAL; | 528 | return -EINVAL; |
| 529 | if (copy_from_user(&ev->u.create2.rd_data, orig.rd_data, orig.rd_size)) | ||
| 530 | return -EFAULT; | ||
| 443 | 531 | ||
| 444 | uhid->rd_data = kmemdup(ev->u.create2.rd_data, uhid->rd_size, | 532 | memcpy(ev->u.create2.name, orig.name, sizeof(orig.name)); |
| 445 | GFP_KERNEL); | 533 | memcpy(ev->u.create2.phys, orig.phys, sizeof(orig.phys)); |
| 446 | if (!uhid->rd_data) | 534 | memcpy(ev->u.create2.uniq, orig.uniq, sizeof(orig.uniq)); |
| 447 | return -ENOMEM; | 535 | ev->u.create2.rd_size = orig.rd_size; |
| 448 | 536 | ev->u.create2.bus = orig.bus; | |
| 449 | hid = hid_allocate_device(); | 537 | ev->u.create2.vendor = orig.vendor; |
| 450 | if (IS_ERR(hid)) { | 538 | ev->u.create2.product = orig.product; |
| 451 | ret = PTR_ERR(hid); | 539 | ev->u.create2.version = orig.version; |
| 452 | goto err_free; | 540 | ev->u.create2.country = orig.country; |
| 453 | } | 541 | |
| 454 | 542 | return uhid_dev_create2(uhid, ev); | |
| 455 | strncpy(hid->name, ev->u.create2.name, 127); | ||
| 456 | hid->name[127] = 0; | ||
| 457 | strncpy(hid->phys, ev->u.create2.phys, 63); | ||
| 458 | hid->phys[63] = 0; | ||
| 459 | strncpy(hid->uniq, ev->u.create2.uniq, 63); | ||
| 460 | hid->uniq[63] = 0; | ||
| 461 | |||
| 462 | hid->ll_driver = &uhid_hid_driver; | ||
| 463 | hid->bus = ev->u.create2.bus; | ||
| 464 | hid->vendor = ev->u.create2.vendor; | ||
| 465 | hid->product = ev->u.create2.product; | ||
| 466 | hid->version = ev->u.create2.version; | ||
| 467 | hid->country = ev->u.create2.country; | ||
| 468 | hid->driver_data = uhid; | ||
| 469 | hid->dev.parent = uhid_misc.this_device; | ||
| 470 | |||
| 471 | uhid->hid = hid; | ||
| 472 | uhid->running = true; | ||
| 473 | |||
| 474 | ret = hid_add_device(hid); | ||
| 475 | if (ret) { | ||
| 476 | hid_err(hid, "Cannot register HID device\n"); | ||
| 477 | goto err_hid; | ||
| 478 | } | ||
| 479 | |||
| 480 | return 0; | ||
| 481 | |||
| 482 | err_hid: | ||
| 483 | hid_destroy_device(hid); | ||
| 484 | uhid->hid = NULL; | ||
| 485 | uhid->running = false; | ||
| 486 | err_free: | ||
| 487 | kfree(uhid->rd_data); | ||
| 488 | return ret; | ||
| 489 | } | 543 | } |
| 490 | 544 | ||
| 491 | static int uhid_dev_destroy(struct uhid_device *uhid) | 545 | static int uhid_dev_destroy(struct uhid_device *uhid) |
| @@ -493,10 +547,7 @@ static int uhid_dev_destroy(struct uhid_device *uhid) | |||
| 493 | if (!uhid->running) | 547 | if (!uhid->running) |
| 494 | return -EINVAL; | 548 | return -EINVAL; |
| 495 | 549 | ||
| 496 | /* clear "running" before setting "report_done" */ | ||
| 497 | uhid->running = false; | 550 | uhid->running = false; |
| 498 | smp_wmb(); | ||
| 499 | atomic_set(&uhid->report_done, 1); | ||
| 500 | wake_up_interruptible(&uhid->report_wait); | 551 | wake_up_interruptible(&uhid->report_wait); |
| 501 | 552 | ||
| 502 | hid_destroy_device(uhid->hid); | 553 | hid_destroy_device(uhid->hid); |
| @@ -527,28 +578,23 @@ static int uhid_dev_input2(struct uhid_device *uhid, struct uhid_event *ev) | |||
| 527 | return 0; | 578 | return 0; |
| 528 | } | 579 | } |
| 529 | 580 | ||
| 530 | static int uhid_dev_feature_answer(struct uhid_device *uhid, | 581 | static int uhid_dev_get_report_reply(struct uhid_device *uhid, |
| 531 | struct uhid_event *ev) | 582 | struct uhid_event *ev) |
| 532 | { | 583 | { |
| 533 | unsigned long flags; | ||
| 534 | |||
| 535 | if (!uhid->running) | 584 | if (!uhid->running) |
| 536 | return -EINVAL; | 585 | return -EINVAL; |
| 537 | 586 | ||
| 538 | spin_lock_irqsave(&uhid->qlock, flags); | 587 | uhid_report_wake_up(uhid, ev->u.get_report_reply.id, ev); |
| 539 | 588 | return 0; | |
| 540 | /* id for old report; drop it silently */ | 589 | } |
| 541 | if (atomic_read(&uhid->report_id) != ev->u.feature_answer.id) | ||
| 542 | goto unlock; | ||
| 543 | if (atomic_read(&uhid->report_done)) | ||
| 544 | goto unlock; | ||
| 545 | 590 | ||
| 546 | memcpy(&uhid->report_buf, ev, sizeof(*ev)); | 591 | static int uhid_dev_set_report_reply(struct uhid_device *uhid, |
| 547 | atomic_set(&uhid->report_done, 1); | 592 | struct uhid_event *ev) |
| 548 | wake_up_interruptible(&uhid->report_wait); | 593 | { |
| 594 | if (!uhid->running) | ||
| 595 | return -EINVAL; | ||
| 549 | 596 | ||
| 550 | unlock: | 597 | uhid_report_wake_up(uhid, ev->u.set_report_reply.id, ev); |
| 551 | spin_unlock_irqrestore(&uhid->qlock, flags); | ||
| 552 | return 0; | 598 | return 0; |
| 553 | } | 599 | } |
| 554 | 600 | ||
| @@ -566,7 +612,6 @@ static int uhid_char_open(struct inode *inode, struct file *file) | |||
| 566 | init_waitqueue_head(&uhid->waitq); | 612 | init_waitqueue_head(&uhid->waitq); |
| 567 | init_waitqueue_head(&uhid->report_wait); | 613 | init_waitqueue_head(&uhid->report_wait); |
| 568 | uhid->running = false; | 614 | uhid->running = false; |
| 569 | atomic_set(&uhid->report_done, 1); | ||
| 570 | 615 | ||
| 571 | file->private_data = uhid; | 616 | file->private_data = uhid; |
| 572 | nonseekable_open(inode, file); | 617 | nonseekable_open(inode, file); |
| @@ -675,8 +720,11 @@ static ssize_t uhid_char_write(struct file *file, const char __user *buffer, | |||
| 675 | case UHID_INPUT2: | 720 | case UHID_INPUT2: |
| 676 | ret = uhid_dev_input2(uhid, &uhid->input_buf); | 721 | ret = uhid_dev_input2(uhid, &uhid->input_buf); |
| 677 | break; | 722 | break; |
| 678 | case UHID_FEATURE_ANSWER: | 723 | case UHID_GET_REPORT_REPLY: |
| 679 | ret = uhid_dev_feature_answer(uhid, &uhid->input_buf); | 724 | ret = uhid_dev_get_report_reply(uhid, &uhid->input_buf); |
| 725 | break; | ||
| 726 | case UHID_SET_REPORT_REPLY: | ||
| 727 | ret = uhid_dev_set_report_reply(uhid, &uhid->input_buf); | ||
| 680 | break; | 728 | break; |
| 681 | default: | 729 | default: |
| 682 | ret = -EOPNOTSUPP; | 730 | ret = -EOPNOTSUPP; |
diff --git a/drivers/hid/usbhid/hid-core.c b/drivers/hid/usbhid/hid-core.c index 80c50763b3f8..ca6849a0121e 100644 --- a/drivers/hid/usbhid/hid-core.c +++ b/drivers/hid/usbhid/hid-core.c | |||
| @@ -82,7 +82,7 @@ static int hid_start_in(struct hid_device *hid) | |||
| 82 | struct usbhid_device *usbhid = hid->driver_data; | 82 | struct usbhid_device *usbhid = hid->driver_data; |
| 83 | 83 | ||
| 84 | spin_lock_irqsave(&usbhid->lock, flags); | 84 | spin_lock_irqsave(&usbhid->lock, flags); |
| 85 | if (hid->open > 0 && | 85 | if ((hid->open > 0 || hid->quirks & HID_QUIRK_ALWAYS_POLL) && |
| 86 | !test_bit(HID_DISCONNECTED, &usbhid->iofl) && | 86 | !test_bit(HID_DISCONNECTED, &usbhid->iofl) && |
| 87 | !test_bit(HID_SUSPENDED, &usbhid->iofl) && | 87 | !test_bit(HID_SUSPENDED, &usbhid->iofl) && |
| 88 | !test_and_set_bit(HID_IN_RUNNING, &usbhid->iofl)) { | 88 | !test_and_set_bit(HID_IN_RUNNING, &usbhid->iofl)) { |
| @@ -276,6 +276,8 @@ static void hid_irq_in(struct urb *urb) | |||
| 276 | case 0: /* success */ | 276 | case 0: /* success */ |
| 277 | usbhid_mark_busy(usbhid); | 277 | usbhid_mark_busy(usbhid); |
| 278 | usbhid->retry_delay = 0; | 278 | usbhid->retry_delay = 0; |
| 279 | if ((hid->quirks & HID_QUIRK_ALWAYS_POLL) && !hid->open) | ||
| 280 | break; | ||
| 279 | hid_input_report(urb->context, HID_INPUT_REPORT, | 281 | hid_input_report(urb->context, HID_INPUT_REPORT, |
| 280 | urb->transfer_buffer, | 282 | urb->transfer_buffer, |
| 281 | urb->actual_length, 1); | 283 | urb->actual_length, 1); |
| @@ -719,8 +721,10 @@ void usbhid_close(struct hid_device *hid) | |||
| 719 | if (!--hid->open) { | 721 | if (!--hid->open) { |
| 720 | spin_unlock_irq(&usbhid->lock); | 722 | spin_unlock_irq(&usbhid->lock); |
| 721 | hid_cancel_delayed_stuff(usbhid); | 723 | hid_cancel_delayed_stuff(usbhid); |
| 722 | usb_kill_urb(usbhid->urbin); | 724 | if (!(hid->quirks & HID_QUIRK_ALWAYS_POLL)) { |
| 723 | usbhid->intf->needs_remote_wakeup = 0; | 725 | usb_kill_urb(usbhid->urbin); |
| 726 | usbhid->intf->needs_remote_wakeup = 0; | ||
| 727 | } | ||
| 724 | } else { | 728 | } else { |
| 725 | spin_unlock_irq(&usbhid->lock); | 729 | spin_unlock_irq(&usbhid->lock); |
| 726 | } | 730 | } |
| @@ -1118,6 +1122,19 @@ static int usbhid_start(struct hid_device *hid) | |||
| 1118 | 1122 | ||
| 1119 | set_bit(HID_STARTED, &usbhid->iofl); | 1123 | set_bit(HID_STARTED, &usbhid->iofl); |
| 1120 | 1124 | ||
| 1125 | if (hid->quirks & HID_QUIRK_ALWAYS_POLL) { | ||
| 1126 | ret = usb_autopm_get_interface(usbhid->intf); | ||
| 1127 | if (ret) | ||
| 1128 | goto fail; | ||
| 1129 | usbhid->intf->needs_remote_wakeup = 1; | ||
| 1130 | ret = hid_start_in(hid); | ||
| 1131 | if (ret) { | ||
| 1132 | dev_err(&hid->dev, | ||
| 1133 | "failed to start in urb: %d\n", ret); | ||
| 1134 | } | ||
| 1135 | usb_autopm_put_interface(usbhid->intf); | ||
| 1136 | } | ||
| 1137 | |||
| 1121 | /* Some keyboards don't work until their LEDs have been set. | 1138 | /* Some keyboards don't work until their LEDs have been set. |
| 1122 | * Since BIOSes do set the LEDs, it must be safe for any device | 1139 | * Since BIOSes do set the LEDs, it must be safe for any device |
| 1123 | * that supports the keyboard boot protocol. | 1140 | * that supports the keyboard boot protocol. |
| @@ -1150,6 +1167,9 @@ static void usbhid_stop(struct hid_device *hid) | |||
| 1150 | if (WARN_ON(!usbhid)) | 1167 | if (WARN_ON(!usbhid)) |
| 1151 | return; | 1168 | return; |
| 1152 | 1169 | ||
| 1170 | if (hid->quirks & HID_QUIRK_ALWAYS_POLL) | ||
| 1171 | usbhid->intf->needs_remote_wakeup = 0; | ||
| 1172 | |||
| 1153 | clear_bit(HID_STARTED, &usbhid->iofl); | 1173 | clear_bit(HID_STARTED, &usbhid->iofl); |
| 1154 | spin_lock_irq(&usbhid->lock); /* Sync with error and led handlers */ | 1174 | spin_lock_irq(&usbhid->lock); /* Sync with error and led handlers */ |
| 1155 | set_bit(HID_DISCONNECTED, &usbhid->iofl); | 1175 | set_bit(HID_DISCONNECTED, &usbhid->iofl); |
diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c index 15225f3eaed1..f3cb5b0a4345 100644 --- a/drivers/hid/usbhid/hid-quirks.c +++ b/drivers/hid/usbhid/hid-quirks.c | |||
| @@ -70,6 +70,7 @@ static const struct hid_blacklist { | |||
| 70 | { USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_3AXIS_5BUTTON_STICK, HID_QUIRK_NOGET }, | 70 | { USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_3AXIS_5BUTTON_STICK, HID_QUIRK_NOGET }, |
| 71 | { USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_AXIS_295, HID_QUIRK_NOGET }, | 71 | { USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_AXIS_295, HID_QUIRK_NOGET }, |
| 72 | { USB_VENDOR_ID_DMI, USB_DEVICE_ID_DMI_ENC, HID_QUIRK_NOGET }, | 72 | { USB_VENDOR_ID_DMI, USB_DEVICE_ID_DMI_ENC, HID_QUIRK_NOGET }, |
| 73 | { USB_VENDOR_ID_ELAN, USB_DEVICE_ID_ELAN_TOUCHSCREEN, HID_QUIRK_ALWAYS_POLL }, | ||
| 73 | { USB_VENDOR_ID_ELO, USB_DEVICE_ID_ELO_TS2700, HID_QUIRK_NOGET }, | 74 | { USB_VENDOR_ID_ELO, USB_DEVICE_ID_ELO_TS2700, HID_QUIRK_NOGET }, |
| 74 | { USB_VENDOR_ID_FORMOSA, USB_DEVICE_ID_FORMOSA_IR_RECEIVER, HID_QUIRK_NO_INIT_REPORTS }, | 75 | { USB_VENDOR_ID_FORMOSA, USB_DEVICE_ID_FORMOSA_IR_RECEIVER, HID_QUIRK_NO_INIT_REPORTS }, |
| 75 | { USB_VENDOR_ID_FREESCALE, USB_DEVICE_ID_FREESCALE_MX28, HID_QUIRK_NOGET }, | 76 | { USB_VENDOR_ID_FREESCALE, USB_DEVICE_ID_FREESCALE_MX28, HID_QUIRK_NOGET }, |
| @@ -79,6 +80,8 @@ static const struct hid_blacklist { | |||
| 79 | { USB_VENDOR_ID_NOVATEK, USB_DEVICE_ID_NOVATEK_MOUSE, HID_QUIRK_NO_INIT_REPORTS }, | 80 | { USB_VENDOR_ID_NOVATEK, USB_DEVICE_ID_NOVATEK_MOUSE, HID_QUIRK_NO_INIT_REPORTS }, |
| 80 | { USB_VENDOR_ID_PENMOUNT, USB_DEVICE_ID_PENMOUNT_1610, HID_QUIRK_NOGET }, | 81 | { USB_VENDOR_ID_PENMOUNT, USB_DEVICE_ID_PENMOUNT_1610, HID_QUIRK_NOGET }, |
| 81 | { USB_VENDOR_ID_PENMOUNT, USB_DEVICE_ID_PENMOUNT_1640, HID_QUIRK_NOGET }, | 82 | { USB_VENDOR_ID_PENMOUNT, USB_DEVICE_ID_PENMOUNT_1640, HID_QUIRK_NOGET }, |
| 83 | { USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_USB_OPTICAL_MOUSE, HID_QUIRK_ALWAYS_POLL }, | ||
| 84 | { USB_VENDOR_ID_KYE, USB_DEVICE_ID_PIXART_USB_OPTICAL_MOUSE_ID2, HID_QUIRK_ALWAYS_POLL }, | ||
| 82 | { USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN, HID_QUIRK_NO_INIT_REPORTS }, | 85 | { USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN, HID_QUIRK_NO_INIT_REPORTS }, |
| 83 | { USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN1, HID_QUIRK_NO_INIT_REPORTS }, | 86 | { USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN1, HID_QUIRK_NO_INIT_REPORTS }, |
| 84 | { USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN2, HID_QUIRK_NO_INIT_REPORTS }, | 87 | { USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN2, HID_QUIRK_NO_INIT_REPORTS }, |
diff --git a/drivers/hid/wacom.h b/drivers/hid/wacom.h index 64bc1b296d91..0cc53440543a 100644 --- a/drivers/hid/wacom.h +++ b/drivers/hid/wacom.h | |||
| @@ -89,6 +89,7 @@ | |||
| 89 | #include <linux/slab.h> | 89 | #include <linux/slab.h> |
| 90 | #include <linux/module.h> | 90 | #include <linux/module.h> |
| 91 | #include <linux/mod_devicetable.h> | 91 | #include <linux/mod_devicetable.h> |
| 92 | #include <linux/hid.h> | ||
| 92 | #include <linux/usb/input.h> | 93 | #include <linux/usb/input.h> |
| 93 | #include <linux/power_supply.h> | 94 | #include <linux/power_supply.h> |
| 94 | #include <asm/unaligned.h> | 95 | #include <asm/unaligned.h> |
| @@ -143,4 +144,9 @@ int wacom_setup_input_capabilities(struct input_dev *input_dev, | |||
| 143 | struct wacom_wac *wacom_wac); | 144 | struct wacom_wac *wacom_wac); |
| 144 | int wacom_setup_pad_input_capabilities(struct input_dev *input_dev, | 145 | int wacom_setup_pad_input_capabilities(struct input_dev *input_dev, |
| 145 | struct wacom_wac *wacom_wac); | 146 | struct wacom_wac *wacom_wac); |
| 147 | void wacom_wac_usage_mapping(struct hid_device *hdev, | ||
| 148 | struct hid_field *field, struct hid_usage *usage); | ||
| 149 | int wacom_wac_event(struct hid_device *hdev, struct hid_field *field, | ||
| 150 | struct hid_usage *usage, __s32 value); | ||
| 151 | void wacom_wac_report(struct hid_device *hdev, struct hid_report *report); | ||
| 146 | #endif | 152 | #endif |
diff --git a/drivers/hid/wacom_sys.c b/drivers/hid/wacom_sys.c index f0db7eca9023..8593047bb726 100644 --- a/drivers/hid/wacom_sys.c +++ b/drivers/hid/wacom_sys.c | |||
| @@ -13,23 +13,26 @@ | |||
| 13 | 13 | ||
| 14 | #include "wacom_wac.h" | 14 | #include "wacom_wac.h" |
| 15 | #include "wacom.h" | 15 | #include "wacom.h" |
| 16 | #include <linux/hid.h> | ||
| 17 | 16 | ||
| 18 | #define WAC_MSG_RETRIES 5 | 17 | #define WAC_MSG_RETRIES 5 |
| 19 | 18 | ||
| 19 | #define WAC_CMD_WL_LED_CONTROL 0x03 | ||
| 20 | #define WAC_CMD_LED_CONTROL 0x20 | 20 | #define WAC_CMD_LED_CONTROL 0x20 |
| 21 | #define WAC_CMD_ICON_START 0x21 | 21 | #define WAC_CMD_ICON_START 0x21 |
| 22 | #define WAC_CMD_ICON_XFER 0x23 | 22 | #define WAC_CMD_ICON_XFER 0x23 |
| 23 | #define WAC_CMD_ICON_BT_XFER 0x26 | 23 | #define WAC_CMD_ICON_BT_XFER 0x26 |
| 24 | #define WAC_CMD_RETRIES 10 | 24 | #define WAC_CMD_RETRIES 10 |
| 25 | 25 | ||
| 26 | static int wacom_get_report(struct hid_device *hdev, u8 type, u8 id, | 26 | #define DEV_ATTR_RW_PERM (S_IRUGO | S_IWUSR | S_IWGRP) |
| 27 | void *buf, size_t size, unsigned int retries) | 27 | #define DEV_ATTR_WO_PERM (S_IWUSR | S_IWGRP) |
| 28 | |||
| 29 | static int wacom_get_report(struct hid_device *hdev, u8 type, u8 *buf, | ||
| 30 | size_t size, unsigned int retries) | ||
| 28 | { | 31 | { |
| 29 | int retval; | 32 | int retval; |
| 30 | 33 | ||
| 31 | do { | 34 | do { |
| 32 | retval = hid_hw_raw_request(hdev, id, buf, size, type, | 35 | retval = hid_hw_raw_request(hdev, buf[0], buf, size, type, |
| 33 | HID_REQ_GET_REPORT); | 36 | HID_REQ_GET_REPORT); |
| 34 | } while ((retval == -ETIMEDOUT || retval == -EPIPE) && --retries); | 37 | } while ((retval == -ETIMEDOUT || retval == -EPIPE) && --retries); |
| 35 | 38 | ||
| @@ -106,12 +109,35 @@ static void wacom_feature_mapping(struct hid_device *hdev, | |||
| 106 | { | 109 | { |
| 107 | struct wacom *wacom = hid_get_drvdata(hdev); | 110 | struct wacom *wacom = hid_get_drvdata(hdev); |
| 108 | struct wacom_features *features = &wacom->wacom_wac.features; | 111 | struct wacom_features *features = &wacom->wacom_wac.features; |
| 112 | struct hid_data *hid_data = &wacom->wacom_wac.hid_data; | ||
| 113 | u8 *data; | ||
| 114 | int ret; | ||
| 109 | 115 | ||
| 110 | switch (usage->hid) { | 116 | switch (usage->hid) { |
| 111 | case HID_DG_CONTACTMAX: | 117 | case HID_DG_CONTACTMAX: |
| 112 | /* leave touch_max as is if predefined */ | 118 | /* leave touch_max as is if predefined */ |
| 113 | if (!features->touch_max) | 119 | if (!features->touch_max) { |
| 114 | features->touch_max = field->value[0]; | 120 | /* read manually */ |
| 121 | data = kzalloc(2, GFP_KERNEL); | ||
| 122 | if (!data) | ||
| 123 | break; | ||
| 124 | data[0] = field->report->id; | ||
| 125 | ret = wacom_get_report(hdev, HID_FEATURE_REPORT, | ||
| 126 | data, 2, 0); | ||
| 127 | if (ret == 2) | ||
| 128 | features->touch_max = data[1]; | ||
| 129 | kfree(data); | ||
| 130 | } | ||
| 131 | break; | ||
| 132 | case HID_DG_INPUTMODE: | ||
| 133 | /* Ignore if value index is out of bounds. */ | ||
| 134 | if (usage->usage_index >= field->report_count) { | ||
| 135 | dev_err(&hdev->dev, "HID_DG_INPUTMODE out of range\n"); | ||
| 136 | break; | ||
| 137 | } | ||
| 138 | |||
| 139 | hid_data->inputmode = field->report->id; | ||
| 140 | hid_data->inputmode_index = usage->usage_index; | ||
| 115 | break; | 141 | break; |
| 116 | } | 142 | } |
| 117 | } | 143 | } |
| @@ -199,6 +225,9 @@ static void wacom_usage_mapping(struct hid_device *hdev, | |||
| 199 | features->pressure_max = field->logical_maximum; | 225 | features->pressure_max = field->logical_maximum; |
| 200 | break; | 226 | break; |
| 201 | } | 227 | } |
| 228 | |||
| 229 | if (features->type == HID_GENERIC) | ||
| 230 | wacom_wac_usage_mapping(hdev, field, usage); | ||
| 202 | } | 231 | } |
| 203 | 232 | ||
| 204 | static void wacom_parse_hid(struct hid_device *hdev, | 233 | static void wacom_parse_hid(struct hid_device *hdev, |
| @@ -237,6 +266,25 @@ static void wacom_parse_hid(struct hid_device *hdev, | |||
| 237 | } | 266 | } |
| 238 | } | 267 | } |
| 239 | 268 | ||
| 269 | static int wacom_hid_set_device_mode(struct hid_device *hdev) | ||
| 270 | { | ||
| 271 | struct wacom *wacom = hid_get_drvdata(hdev); | ||
| 272 | struct hid_data *hid_data = &wacom->wacom_wac.hid_data; | ||
| 273 | struct hid_report *r; | ||
| 274 | struct hid_report_enum *re; | ||
| 275 | |||
| 276 | if (hid_data->inputmode < 0) | ||
| 277 | return 0; | ||
| 278 | |||
| 279 | re = &(hdev->report_enum[HID_FEATURE_REPORT]); | ||
| 280 | r = re->report_id_hash[hid_data->inputmode]; | ||
| 281 | if (r) { | ||
| 282 | r->field[0]->value[hid_data->inputmode_index] = 2; | ||
| 283 | hid_hw_request(hdev, r, HID_REQ_SET_REPORT); | ||
| 284 | } | ||
| 285 | return 0; | ||
| 286 | } | ||
| 287 | |||
| 240 | static int wacom_set_device_mode(struct hid_device *hdev, int report_id, | 288 | static int wacom_set_device_mode(struct hid_device *hdev, int report_id, |
| 241 | int length, int mode) | 289 | int length, int mode) |
| 242 | { | 290 | { |
| @@ -255,7 +303,7 @@ static int wacom_set_device_mode(struct hid_device *hdev, int report_id, | |||
| 255 | length, 1); | 303 | length, 1); |
| 256 | if (error >= 0) | 304 | if (error >= 0) |
| 257 | error = wacom_get_report(hdev, HID_FEATURE_REPORT, | 305 | error = wacom_get_report(hdev, HID_FEATURE_REPORT, |
| 258 | report_id, rep_data, length, 1); | 306 | rep_data, length, 1); |
| 259 | } while ((error < 0 || rep_data[1] != mode) && limit++ < WAC_MSG_RETRIES); | 307 | } while ((error < 0 || rep_data[1] != mode) && limit++ < WAC_MSG_RETRIES); |
| 260 | 308 | ||
| 261 | kfree(rep_data); | 309 | kfree(rep_data); |
| @@ -329,6 +377,9 @@ static int wacom_query_tablet_data(struct hid_device *hdev, | |||
| 329 | if (hdev->bus == BUS_BLUETOOTH) | 377 | if (hdev->bus == BUS_BLUETOOTH) |
| 330 | return wacom_bt_query_tablet_data(hdev, 1, features); | 378 | return wacom_bt_query_tablet_data(hdev, 1, features); |
| 331 | 379 | ||
| 380 | if (features->type == HID_GENERIC) | ||
| 381 | return wacom_hid_set_device_mode(hdev); | ||
| 382 | |||
| 332 | if (features->device_type == BTN_TOOL_FINGER) { | 383 | if (features->device_type == BTN_TOOL_FINGER) { |
| 333 | if (features->type > TABLETPC) { | 384 | if (features->type > TABLETPC) { |
| 334 | /* MT Tablet PC touch */ | 385 | /* MT Tablet PC touch */ |
| @@ -487,8 +538,14 @@ static int wacom_led_control(struct wacom *wacom) | |||
| 487 | { | 538 | { |
| 488 | unsigned char *buf; | 539 | unsigned char *buf; |
| 489 | int retval; | 540 | int retval; |
| 541 | unsigned char report_id = WAC_CMD_LED_CONTROL; | ||
| 542 | int buf_size = 9; | ||
| 490 | 543 | ||
| 491 | buf = kzalloc(9, GFP_KERNEL); | 544 | if (wacom->wacom_wac.pid) { /* wireless connected */ |
| 545 | report_id = WAC_CMD_WL_LED_CONTROL; | ||
| 546 | buf_size = 13; | ||
| 547 | } | ||
| 548 | buf = kzalloc(buf_size, GFP_KERNEL); | ||
| 492 | if (!buf) | 549 | if (!buf) |
| 493 | return -ENOMEM; | 550 | return -ENOMEM; |
| 494 | 551 | ||
| @@ -502,9 +559,16 @@ static int wacom_led_control(struct wacom *wacom) | |||
| 502 | int ring_led = wacom->led.select[0] & 0x03; | 559 | int ring_led = wacom->led.select[0] & 0x03; |
| 503 | int ring_lum = (((wacom->led.llv & 0x60) >> 5) - 1) & 0x03; | 560 | int ring_lum = (((wacom->led.llv & 0x60) >> 5) - 1) & 0x03; |
| 504 | int crop_lum = 0; | 561 | int crop_lum = 0; |
| 505 | 562 | unsigned char led_bits = (crop_lum << 4) | (ring_lum << 2) | (ring_led); | |
| 506 | buf[0] = WAC_CMD_LED_CONTROL; | 563 | |
| 507 | buf[1] = (crop_lum << 4) | (ring_lum << 2) | (ring_led); | 564 | buf[0] = report_id; |
| 565 | if (wacom->wacom_wac.pid) { | ||
| 566 | wacom_get_report(wacom->hdev, HID_FEATURE_REPORT, | ||
| 567 | buf, buf_size, WAC_CMD_RETRIES); | ||
| 568 | buf[0] = report_id; | ||
| 569 | buf[4] = led_bits; | ||
| 570 | } else | ||
| 571 | buf[1] = led_bits; | ||
| 508 | } | 572 | } |
| 509 | else { | 573 | else { |
| 510 | int led = wacom->led.select[0] | 0x4; | 574 | int led = wacom->led.select[0] | 0x4; |
| @@ -513,14 +577,14 @@ static int wacom_led_control(struct wacom *wacom) | |||
| 513 | wacom->wacom_wac.features.type == WACOM_24HD) | 577 | wacom->wacom_wac.features.type == WACOM_24HD) |
| 514 | led |= (wacom->led.select[1] << 4) | 0x40; | 578 | led |= (wacom->led.select[1] << 4) | 0x40; |
| 515 | 579 | ||
| 516 | buf[0] = WAC_CMD_LED_CONTROL; | 580 | buf[0] = report_id; |
| 517 | buf[1] = led; | 581 | buf[1] = led; |
| 518 | buf[2] = wacom->led.llv; | 582 | buf[2] = wacom->led.llv; |
| 519 | buf[3] = wacom->led.hlv; | 583 | buf[3] = wacom->led.hlv; |
| 520 | buf[4] = wacom->led.img_lum; | 584 | buf[4] = wacom->led.img_lum; |
| 521 | } | 585 | } |
| 522 | 586 | ||
| 523 | retval = wacom_set_report(wacom->hdev, HID_FEATURE_REPORT, buf, 9, | 587 | retval = wacom_set_report(wacom->hdev, HID_FEATURE_REPORT, buf, buf_size, |
| 524 | WAC_CMD_RETRIES); | 588 | WAC_CMD_RETRIES); |
| 525 | kfree(buf); | 589 | kfree(buf); |
| 526 | 590 | ||
| @@ -602,9 +666,10 @@ static ssize_t wacom_led##SET_ID##_select_show(struct device *dev, \ | |||
| 602 | { \ | 666 | { \ |
| 603 | struct hid_device *hdev = container_of(dev, struct hid_device, dev);\ | 667 | struct hid_device *hdev = container_of(dev, struct hid_device, dev);\ |
| 604 | struct wacom *wacom = hid_get_drvdata(hdev); \ | 668 | struct wacom *wacom = hid_get_drvdata(hdev); \ |
| 605 | return snprintf(buf, 2, "%d\n", wacom->led.select[SET_ID]); \ | 669 | return scnprintf(buf, PAGE_SIZE, "%d\n", \ |
| 670 | wacom->led.select[SET_ID]); \ | ||
| 606 | } \ | 671 | } \ |
| 607 | static DEVICE_ATTR(status_led##SET_ID##_select, S_IWUSR | S_IRUSR, \ | 672 | static DEVICE_ATTR(status_led##SET_ID##_select, DEV_ATTR_RW_PERM, \ |
| 608 | wacom_led##SET_ID##_select_show, \ | 673 | wacom_led##SET_ID##_select_show, \ |
| 609 | wacom_led##SET_ID##_select_store) | 674 | wacom_led##SET_ID##_select_store) |
| 610 | 675 | ||
| @@ -641,8 +706,15 @@ static ssize_t wacom_##name##_luminance_store(struct device *dev, \ | |||
| 641 | return wacom_luminance_store(wacom, &wacom->led.field, \ | 706 | return wacom_luminance_store(wacom, &wacom->led.field, \ |
| 642 | buf, count); \ | 707 | buf, count); \ |
| 643 | } \ | 708 | } \ |
| 644 | static DEVICE_ATTR(name##_luminance, S_IWUSR, \ | 709 | static ssize_t wacom_##name##_luminance_show(struct device *dev, \ |
| 645 | NULL, wacom_##name##_luminance_store) | 710 | struct device_attribute *attr, char *buf) \ |
| 711 | { \ | ||
| 712 | struct wacom *wacom = dev_get_drvdata(dev); \ | ||
| 713 | return scnprintf(buf, PAGE_SIZE, "%d\n", wacom->led.field); \ | ||
| 714 | } \ | ||
| 715 | static DEVICE_ATTR(name##_luminance, DEV_ATTR_RW_PERM, \ | ||
| 716 | wacom_##name##_luminance_show, \ | ||
| 717 | wacom_##name##_luminance_store) | ||
| 646 | 718 | ||
| 647 | DEVICE_LUMINANCE_ATTR(status0, llv); | 719 | DEVICE_LUMINANCE_ATTR(status0, llv); |
| 648 | DEVICE_LUMINANCE_ATTR(status1, hlv); | 720 | DEVICE_LUMINANCE_ATTR(status1, hlv); |
| @@ -683,7 +755,7 @@ static ssize_t wacom_btnimg##BUTTON_ID##_store(struct device *dev, \ | |||
| 683 | { \ | 755 | { \ |
| 684 | return wacom_button_image_store(dev, BUTTON_ID, buf, count); \ | 756 | return wacom_button_image_store(dev, BUTTON_ID, buf, count); \ |
| 685 | } \ | 757 | } \ |
| 686 | static DEVICE_ATTR(button##BUTTON_ID##_rawimg, S_IWUSR, \ | 758 | static DEVICE_ATTR(button##BUTTON_ID##_rawimg, DEV_ATTR_WO_PERM, \ |
| 687 | NULL, wacom_btnimg##BUTTON_ID##_store) | 759 | NULL, wacom_btnimg##BUTTON_ID##_store) |
| 688 | 760 | ||
| 689 | DEVICE_BTNIMG_ATTR(0); | 761 | DEVICE_BTNIMG_ATTR(0); |
| @@ -989,7 +1061,7 @@ static ssize_t wacom_store_speed(struct device *dev, | |||
| 989 | return count; | 1061 | return count; |
| 990 | } | 1062 | } |
| 991 | 1063 | ||
| 992 | static DEVICE_ATTR(speed, S_IRUGO | S_IWUSR | S_IWGRP, | 1064 | static DEVICE_ATTR(speed, DEV_ATTR_RW_PERM, |
| 993 | wacom_show_speed, wacom_store_speed); | 1065 | wacom_show_speed, wacom_store_speed); |
| 994 | 1066 | ||
| 995 | static struct input_dev *wacom_allocate_input(struct wacom *wacom) | 1067 | static struct input_dev *wacom_allocate_input(struct wacom *wacom) |
| @@ -1010,47 +1082,82 @@ static struct input_dev *wacom_allocate_input(struct wacom *wacom) | |||
| 1010 | input_dev->uniq = hdev->uniq; | 1082 | input_dev->uniq = hdev->uniq; |
| 1011 | input_dev->id.bustype = hdev->bus; | 1083 | input_dev->id.bustype = hdev->bus; |
| 1012 | input_dev->id.vendor = hdev->vendor; | 1084 | input_dev->id.vendor = hdev->vendor; |
| 1013 | input_dev->id.product = hdev->product; | 1085 | input_dev->id.product = wacom_wac->pid ? wacom_wac->pid : hdev->product; |
| 1014 | input_dev->id.version = hdev->version; | 1086 | input_dev->id.version = hdev->version; |
| 1015 | input_set_drvdata(input_dev, wacom); | 1087 | input_set_drvdata(input_dev, wacom); |
| 1016 | 1088 | ||
| 1017 | return input_dev; | 1089 | return input_dev; |
| 1018 | } | 1090 | } |
| 1019 | 1091 | ||
| 1020 | static void wacom_unregister_inputs(struct wacom *wacom) | 1092 | static void wacom_free_inputs(struct wacom *wacom) |
| 1021 | { | 1093 | { |
| 1022 | if (wacom->wacom_wac.input) | 1094 | struct wacom_wac *wacom_wac = &(wacom->wacom_wac); |
| 1023 | input_unregister_device(wacom->wacom_wac.input); | 1095 | |
| 1024 | if (wacom->wacom_wac.pad_input) | 1096 | if (wacom_wac->input) |
| 1025 | input_unregister_device(wacom->wacom_wac.pad_input); | 1097 | input_free_device(wacom_wac->input); |
| 1026 | wacom->wacom_wac.input = NULL; | 1098 | if (wacom_wac->pad_input) |
| 1027 | wacom->wacom_wac.pad_input = NULL; | 1099 | input_free_device(wacom_wac->pad_input); |
| 1100 | wacom_wac->input = NULL; | ||
| 1101 | wacom_wac->pad_input = NULL; | ||
| 1028 | } | 1102 | } |
| 1029 | 1103 | ||
| 1030 | static int wacom_register_inputs(struct wacom *wacom) | 1104 | static int wacom_allocate_inputs(struct wacom *wacom) |
| 1031 | { | 1105 | { |
| 1032 | struct input_dev *input_dev, *pad_input_dev; | 1106 | struct input_dev *input_dev, *pad_input_dev; |
| 1033 | struct wacom_wac *wacom_wac = &(wacom->wacom_wac); | 1107 | struct wacom_wac *wacom_wac = &(wacom->wacom_wac); |
| 1034 | int error; | ||
| 1035 | 1108 | ||
| 1036 | input_dev = wacom_allocate_input(wacom); | 1109 | input_dev = wacom_allocate_input(wacom); |
| 1037 | pad_input_dev = wacom_allocate_input(wacom); | 1110 | pad_input_dev = wacom_allocate_input(wacom); |
| 1038 | if (!input_dev || !pad_input_dev) { | 1111 | if (!input_dev || !pad_input_dev) { |
| 1039 | error = -ENOMEM; | 1112 | wacom_free_inputs(wacom); |
| 1040 | goto fail1; | 1113 | return -ENOMEM; |
| 1041 | } | 1114 | } |
| 1042 | 1115 | ||
| 1043 | wacom_wac->input = input_dev; | 1116 | wacom_wac->input = input_dev; |
| 1044 | wacom_wac->pad_input = pad_input_dev; | 1117 | wacom_wac->pad_input = pad_input_dev; |
| 1045 | wacom_wac->pad_input->name = wacom_wac->pad_name; | 1118 | wacom_wac->pad_input->name = wacom_wac->pad_name; |
| 1046 | 1119 | ||
| 1120 | return 0; | ||
| 1121 | } | ||
| 1122 | |||
| 1123 | static void wacom_clean_inputs(struct wacom *wacom) | ||
| 1124 | { | ||
| 1125 | if (wacom->wacom_wac.input) { | ||
| 1126 | if (wacom->wacom_wac.input_registered) | ||
| 1127 | input_unregister_device(wacom->wacom_wac.input); | ||
| 1128 | else | ||
| 1129 | input_free_device(wacom->wacom_wac.input); | ||
| 1130 | } | ||
| 1131 | if (wacom->wacom_wac.pad_input) { | ||
| 1132 | if (wacom->wacom_wac.input_registered) | ||
| 1133 | input_unregister_device(wacom->wacom_wac.pad_input); | ||
| 1134 | else | ||
| 1135 | input_free_device(wacom->wacom_wac.pad_input); | ||
| 1136 | } | ||
| 1137 | wacom->wacom_wac.input = NULL; | ||
| 1138 | wacom->wacom_wac.pad_input = NULL; | ||
| 1139 | wacom_destroy_leds(wacom); | ||
| 1140 | } | ||
| 1141 | |||
| 1142 | static int wacom_register_inputs(struct wacom *wacom) | ||
| 1143 | { | ||
| 1144 | struct input_dev *input_dev, *pad_input_dev; | ||
| 1145 | struct wacom_wac *wacom_wac = &(wacom->wacom_wac); | ||
| 1146 | int error; | ||
| 1147 | |||
| 1148 | input_dev = wacom_wac->input; | ||
| 1149 | pad_input_dev = wacom_wac->pad_input; | ||
| 1150 | |||
| 1151 | if (!input_dev || !pad_input_dev) | ||
| 1152 | return -EINVAL; | ||
| 1153 | |||
| 1047 | error = wacom_setup_input_capabilities(input_dev, wacom_wac); | 1154 | error = wacom_setup_input_capabilities(input_dev, wacom_wac); |
| 1048 | if (error) | 1155 | if (error) |
| 1049 | goto fail2; | 1156 | return error; |
| 1050 | 1157 | ||
| 1051 | error = input_register_device(input_dev); | 1158 | error = input_register_device(input_dev); |
| 1052 | if (error) | 1159 | if (error) |
| 1053 | goto fail2; | 1160 | return error; |
| 1054 | 1161 | ||
| 1055 | error = wacom_setup_pad_input_capabilities(pad_input_dev, wacom_wac); | 1162 | error = wacom_setup_pad_input_capabilities(pad_input_dev, wacom_wac); |
| 1056 | if (error) { | 1163 | if (error) { |
| @@ -1061,22 +1168,23 @@ static int wacom_register_inputs(struct wacom *wacom) | |||
| 1061 | } else { | 1168 | } else { |
| 1062 | error = input_register_device(pad_input_dev); | 1169 | error = input_register_device(pad_input_dev); |
| 1063 | if (error) | 1170 | if (error) |
| 1064 | goto fail3; | 1171 | goto fail_register_pad_input; |
| 1172 | |||
| 1173 | error = wacom_initialize_leds(wacom); | ||
| 1174 | if (error) | ||
| 1175 | goto fail_leds; | ||
| 1065 | } | 1176 | } |
| 1066 | 1177 | ||
| 1178 | wacom_wac->input_registered = true; | ||
| 1179 | |||
| 1067 | return 0; | 1180 | return 0; |
| 1068 | 1181 | ||
| 1069 | fail3: | 1182 | fail_leds: |
| 1183 | input_unregister_device(pad_input_dev); | ||
| 1184 | pad_input_dev = NULL; | ||
| 1185 | fail_register_pad_input: | ||
| 1070 | input_unregister_device(input_dev); | 1186 | input_unregister_device(input_dev); |
| 1071 | input_dev = NULL; | ||
| 1072 | fail2: | ||
| 1073 | wacom_wac->input = NULL; | 1187 | wacom_wac->input = NULL; |
| 1074 | wacom_wac->pad_input = NULL; | ||
| 1075 | fail1: | ||
| 1076 | if (input_dev) | ||
| 1077 | input_free_device(input_dev); | ||
| 1078 | if (pad_input_dev) | ||
| 1079 | input_free_device(pad_input_dev); | ||
| 1080 | return error; | 1188 | return error; |
| 1081 | } | 1189 | } |
| 1082 | 1190 | ||
| @@ -1101,13 +1209,13 @@ static void wacom_wireless_work(struct work_struct *work) | |||
| 1101 | hdev1 = usb_get_intfdata(usbdev->config->interface[1]); | 1209 | hdev1 = usb_get_intfdata(usbdev->config->interface[1]); |
| 1102 | wacom1 = hid_get_drvdata(hdev1); | 1210 | wacom1 = hid_get_drvdata(hdev1); |
| 1103 | wacom_wac1 = &(wacom1->wacom_wac); | 1211 | wacom_wac1 = &(wacom1->wacom_wac); |
| 1104 | wacom_unregister_inputs(wacom1); | 1212 | wacom_clean_inputs(wacom1); |
| 1105 | 1213 | ||
| 1106 | /* Touch interface */ | 1214 | /* Touch interface */ |
| 1107 | hdev2 = usb_get_intfdata(usbdev->config->interface[2]); | 1215 | hdev2 = usb_get_intfdata(usbdev->config->interface[2]); |
| 1108 | wacom2 = hid_get_drvdata(hdev2); | 1216 | wacom2 = hid_get_drvdata(hdev2); |
| 1109 | wacom_wac2 = &(wacom2->wacom_wac); | 1217 | wacom_wac2 = &(wacom2->wacom_wac); |
| 1110 | wacom_unregister_inputs(wacom2); | 1218 | wacom_clean_inputs(wacom2); |
| 1111 | 1219 | ||
| 1112 | if (wacom_wac->pid == 0) { | 1220 | if (wacom_wac->pid == 0) { |
| 1113 | hid_info(wacom->hdev, "wireless tablet disconnected\n"); | 1221 | hid_info(wacom->hdev, "wireless tablet disconnected\n"); |
| @@ -1140,7 +1248,9 @@ static void wacom_wireless_work(struct work_struct *work) | |||
| 1140 | wacom_wac1->features.name); | 1248 | wacom_wac1->features.name); |
| 1141 | wacom_wac1->shared->touch_max = wacom_wac1->features.touch_max; | 1249 | wacom_wac1->shared->touch_max = wacom_wac1->features.touch_max; |
| 1142 | wacom_wac1->shared->type = wacom_wac1->features.type; | 1250 | wacom_wac1->shared->type = wacom_wac1->features.type; |
| 1143 | error = wacom_register_inputs(wacom1); | 1251 | wacom_wac1->pid = wacom_wac->pid; |
| 1252 | error = wacom_allocate_inputs(wacom1) || | ||
| 1253 | wacom_register_inputs(wacom1); | ||
| 1144 | if (error) | 1254 | if (error) |
| 1145 | goto fail; | 1255 | goto fail; |
| 1146 | 1256 | ||
| @@ -1160,7 +1270,9 @@ static void wacom_wireless_work(struct work_struct *work) | |||
| 1160 | "%s (WL) Pad",wacom_wac2->features.name); | 1270 | "%s (WL) Pad",wacom_wac2->features.name); |
| 1161 | snprintf(wacom_wac2->pad_name, WACOM_NAME_MAX, | 1271 | snprintf(wacom_wac2->pad_name, WACOM_NAME_MAX, |
| 1162 | "%s (WL) Pad", wacom_wac2->features.name); | 1272 | "%s (WL) Pad", wacom_wac2->features.name); |
| 1163 | error = wacom_register_inputs(wacom2); | 1273 | wacom_wac2->pid = wacom_wac->pid; |
| 1274 | error = wacom_allocate_inputs(wacom2) || | ||
| 1275 | wacom_register_inputs(wacom2); | ||
| 1164 | if (error) | 1276 | if (error) |
| 1165 | goto fail; | 1277 | goto fail; |
| 1166 | 1278 | ||
| @@ -1177,8 +1289,8 @@ static void wacom_wireless_work(struct work_struct *work) | |||
| 1177 | return; | 1289 | return; |
| 1178 | 1290 | ||
| 1179 | fail: | 1291 | fail: |
| 1180 | wacom_unregister_inputs(wacom1); | 1292 | wacom_clean_inputs(wacom1); |
| 1181 | wacom_unregister_inputs(wacom2); | 1293 | wacom_clean_inputs(wacom2); |
| 1182 | return; | 1294 | return; |
| 1183 | } | 1295 | } |
| 1184 | 1296 | ||
| @@ -1241,10 +1353,13 @@ static int wacom_probe(struct hid_device *hdev, | |||
| 1241 | struct wacom_wac *wacom_wac; | 1353 | struct wacom_wac *wacom_wac; |
| 1242 | struct wacom_features *features; | 1354 | struct wacom_features *features; |
| 1243 | int error; | 1355 | int error; |
| 1356 | unsigned int connect_mask = HID_CONNECT_HIDRAW; | ||
| 1244 | 1357 | ||
| 1245 | if (!id->driver_data) | 1358 | if (!id->driver_data) |
| 1246 | return -EINVAL; | 1359 | return -EINVAL; |
| 1247 | 1360 | ||
| 1361 | hdev->quirks |= HID_QUIRK_NO_INIT_REPORTS; | ||
| 1362 | |||
| 1248 | wacom = kzalloc(sizeof(struct wacom), GFP_KERNEL); | 1363 | wacom = kzalloc(sizeof(struct wacom), GFP_KERNEL); |
| 1249 | if (!wacom) | 1364 | if (!wacom) |
| 1250 | return -ENOMEM; | 1365 | return -ENOMEM; |
| @@ -1256,7 +1371,7 @@ static int wacom_probe(struct hid_device *hdev, | |||
| 1256 | error = hid_parse(hdev); | 1371 | error = hid_parse(hdev); |
| 1257 | if (error) { | 1372 | if (error) { |
| 1258 | hid_err(hdev, "parse failed\n"); | 1373 | hid_err(hdev, "parse failed\n"); |
| 1259 | goto fail1; | 1374 | goto fail_parse; |
| 1260 | } | 1375 | } |
| 1261 | 1376 | ||
| 1262 | wacom_wac = &wacom->wacom_wac; | 1377 | wacom_wac = &wacom->wacom_wac; |
| @@ -1265,12 +1380,12 @@ static int wacom_probe(struct hid_device *hdev, | |||
| 1265 | features->pktlen = wacom_compute_pktlen(hdev); | 1380 | features->pktlen = wacom_compute_pktlen(hdev); |
| 1266 | if (features->pktlen > WACOM_PKGLEN_MAX) { | 1381 | if (features->pktlen > WACOM_PKGLEN_MAX) { |
| 1267 | error = -EINVAL; | 1382 | error = -EINVAL; |
| 1268 | goto fail1; | 1383 | goto fail_pktlen; |
| 1269 | } | 1384 | } |
| 1270 | 1385 | ||
| 1271 | if (features->check_for_hid_type && features->hid_type != hdev->type) { | 1386 | if (features->check_for_hid_type && features->hid_type != hdev->type) { |
| 1272 | error = -ENODEV; | 1387 | error = -ENODEV; |
| 1273 | goto fail1; | 1388 | goto fail_type; |
| 1274 | } | 1389 | } |
| 1275 | 1390 | ||
| 1276 | wacom->usbdev = dev; | 1391 | wacom->usbdev = dev; |
| @@ -1278,6 +1393,12 @@ static int wacom_probe(struct hid_device *hdev, | |||
| 1278 | mutex_init(&wacom->lock); | 1393 | mutex_init(&wacom->lock); |
| 1279 | INIT_WORK(&wacom->work, wacom_wireless_work); | 1394 | INIT_WORK(&wacom->work, wacom_wireless_work); |
| 1280 | 1395 | ||
| 1396 | if (!(features->quirks & WACOM_QUIRK_NO_INPUT)) { | ||
| 1397 | error = wacom_allocate_inputs(wacom); | ||
| 1398 | if (error) | ||
| 1399 | goto fail_allocate_inputs; | ||
| 1400 | } | ||
| 1401 | |||
| 1281 | /* set the default size in case we do not get them from hid */ | 1402 | /* set the default size in case we do not get them from hid */ |
| 1282 | wacom_set_default_phy(features); | 1403 | wacom_set_default_phy(features); |
| 1283 | 1404 | ||
| @@ -1339,24 +1460,20 @@ static int wacom_probe(struct hid_device *hdev, | |||
| 1339 | 1460 | ||
| 1340 | error = wacom_add_shared_data(hdev); | 1461 | error = wacom_add_shared_data(hdev); |
| 1341 | if (error) | 1462 | if (error) |
| 1342 | goto fail1; | 1463 | goto fail_shared_data; |
| 1343 | } | 1464 | } |
| 1344 | 1465 | ||
| 1345 | error = wacom_initialize_leds(wacom); | ||
| 1346 | if (error) | ||
| 1347 | goto fail2; | ||
| 1348 | |||
| 1349 | if (!(features->quirks & WACOM_QUIRK_MONITOR) && | 1466 | if (!(features->quirks & WACOM_QUIRK_MONITOR) && |
| 1350 | (features->quirks & WACOM_QUIRK_BATTERY)) { | 1467 | (features->quirks & WACOM_QUIRK_BATTERY)) { |
| 1351 | error = wacom_initialize_battery(wacom); | 1468 | error = wacom_initialize_battery(wacom); |
| 1352 | if (error) | 1469 | if (error) |
| 1353 | goto fail3; | 1470 | goto fail_battery; |
| 1354 | } | 1471 | } |
| 1355 | 1472 | ||
| 1356 | if (!(features->quirks & WACOM_QUIRK_NO_INPUT)) { | 1473 | if (!(features->quirks & WACOM_QUIRK_NO_INPUT)) { |
| 1357 | error = wacom_register_inputs(wacom); | 1474 | error = wacom_register_inputs(wacom); |
| 1358 | if (error) | 1475 | if (error) |
| 1359 | goto fail4; | 1476 | goto fail_register_inputs; |
| 1360 | } | 1477 | } |
| 1361 | 1478 | ||
| 1362 | if (hdev->bus == BUS_BLUETOOTH) { | 1479 | if (hdev->bus == BUS_BLUETOOTH) { |
| @@ -1367,16 +1484,19 @@ static int wacom_probe(struct hid_device *hdev, | |||
| 1367 | error); | 1484 | error); |
| 1368 | } | 1485 | } |
| 1369 | 1486 | ||
| 1370 | /* Note that if query fails it is not a hard failure */ | 1487 | if (features->type == HID_GENERIC) |
| 1371 | wacom_query_tablet_data(hdev, features); | 1488 | connect_mask |= HID_CONNECT_DRIVER; |
| 1372 | 1489 | ||
| 1373 | /* Regular HID work starts now */ | 1490 | /* Regular HID work starts now */ |
| 1374 | error = hid_hw_start(hdev, HID_CONNECT_HIDRAW); | 1491 | error = hid_hw_start(hdev, connect_mask); |
| 1375 | if (error) { | 1492 | if (error) { |
| 1376 | hid_err(hdev, "hw start failed\n"); | 1493 | hid_err(hdev, "hw start failed\n"); |
| 1377 | goto fail5; | 1494 | goto fail_hw_start; |
| 1378 | } | 1495 | } |
| 1379 | 1496 | ||
| 1497 | /* Note that if query fails it is not a hard failure */ | ||
| 1498 | wacom_query_tablet_data(hdev, features); | ||
| 1499 | |||
| 1380 | if (features->quirks & WACOM_QUIRK_MONITOR) | 1500 | if (features->quirks & WACOM_QUIRK_MONITOR) |
| 1381 | error = hid_hw_open(hdev); | 1501 | error = hid_hw_open(hdev); |
| 1382 | 1502 | ||
| @@ -1387,13 +1507,21 @@ static int wacom_probe(struct hid_device *hdev, | |||
| 1387 | 1507 | ||
| 1388 | return 0; | 1508 | return 0; |
| 1389 | 1509 | ||
| 1390 | fail5: if (hdev->bus == BUS_BLUETOOTH) | 1510 | fail_hw_start: |
| 1511 | if (hdev->bus == BUS_BLUETOOTH) | ||
| 1391 | device_remove_file(&hdev->dev, &dev_attr_speed); | 1512 | device_remove_file(&hdev->dev, &dev_attr_speed); |
| 1392 | wacom_unregister_inputs(wacom); | 1513 | fail_register_inputs: |
| 1393 | fail4: wacom_destroy_battery(wacom); | 1514 | wacom_clean_inputs(wacom); |
| 1394 | fail3: wacom_destroy_leds(wacom); | 1515 | wacom_destroy_battery(wacom); |
| 1395 | fail2: wacom_remove_shared_data(wacom_wac); | 1516 | fail_battery: |
| 1396 | fail1: kfree(wacom); | 1517 | wacom_remove_shared_data(wacom_wac); |
| 1518 | fail_shared_data: | ||
| 1519 | wacom_clean_inputs(wacom); | ||
| 1520 | fail_allocate_inputs: | ||
| 1521 | fail_type: | ||
| 1522 | fail_pktlen: | ||
| 1523 | fail_parse: | ||
| 1524 | kfree(wacom); | ||
| 1397 | hid_set_drvdata(hdev, NULL); | 1525 | hid_set_drvdata(hdev, NULL); |
| 1398 | return error; | 1526 | return error; |
| 1399 | } | 1527 | } |
| @@ -1405,11 +1533,10 @@ static void wacom_remove(struct hid_device *hdev) | |||
| 1405 | hid_hw_stop(hdev); | 1533 | hid_hw_stop(hdev); |
| 1406 | 1534 | ||
| 1407 | cancel_work_sync(&wacom->work); | 1535 | cancel_work_sync(&wacom->work); |
| 1408 | wacom_unregister_inputs(wacom); | 1536 | wacom_clean_inputs(wacom); |
| 1409 | if (hdev->bus == BUS_BLUETOOTH) | 1537 | if (hdev->bus == BUS_BLUETOOTH) |
| 1410 | device_remove_file(&hdev->dev, &dev_attr_speed); | 1538 | device_remove_file(&hdev->dev, &dev_attr_speed); |
| 1411 | wacom_destroy_battery(wacom); | 1539 | wacom_destroy_battery(wacom); |
| 1412 | wacom_destroy_leds(wacom); | ||
| 1413 | wacom_remove_shared_data(&wacom->wacom_wac); | 1540 | wacom_remove_shared_data(&wacom->wacom_wac); |
| 1414 | 1541 | ||
| 1415 | hid_set_drvdata(hdev, NULL); | 1542 | hid_set_drvdata(hdev, NULL); |
| @@ -1444,6 +1571,8 @@ static struct hid_driver wacom_driver = { | |||
| 1444 | .id_table = wacom_ids, | 1571 | .id_table = wacom_ids, |
| 1445 | .probe = wacom_probe, | 1572 | .probe = wacom_probe, |
| 1446 | .remove = wacom_remove, | 1573 | .remove = wacom_remove, |
| 1574 | .event = wacom_wac_event, | ||
| 1575 | .report = wacom_wac_report, | ||
| 1447 | #ifdef CONFIG_PM | 1576 | #ifdef CONFIG_PM |
| 1448 | .resume = wacom_resume, | 1577 | .resume = wacom_resume, |
| 1449 | .reset_resume = wacom_reset_resume, | 1578 | .reset_resume = wacom_reset_resume, |
diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c index aa6a08eb7ad6..586b2405b0d4 100644 --- a/drivers/hid/wacom_wac.c +++ b/drivers/hid/wacom_wac.c | |||
| @@ -1248,6 +1248,296 @@ static int wacom_tpc_irq(struct wacom_wac *wacom, size_t len) | |||
| 1248 | return 0; | 1248 | return 0; |
| 1249 | } | 1249 | } |
| 1250 | 1250 | ||
| 1251 | static void wacom_map_usage(struct wacom *wacom, struct hid_usage *usage, | ||
| 1252 | struct hid_field *field, __u8 type, __u16 code, int fuzz) | ||
| 1253 | { | ||
| 1254 | struct wacom_wac *wacom_wac = &wacom->wacom_wac; | ||
| 1255 | struct input_dev *input = wacom_wac->input; | ||
| 1256 | int fmin = field->logical_minimum; | ||
| 1257 | int fmax = field->logical_maximum; | ||
| 1258 | |||
| 1259 | usage->type = type; | ||
| 1260 | usage->code = code; | ||
| 1261 | |||
| 1262 | set_bit(type, input->evbit); | ||
| 1263 | |||
| 1264 | switch (type) { | ||
| 1265 | case EV_ABS: | ||
| 1266 | input_set_abs_params(input, code, fmin, fmax, fuzz, 0); | ||
| 1267 | input_abs_set_res(input, code, | ||
| 1268 | hidinput_calc_abs_res(field, code)); | ||
| 1269 | break; | ||
| 1270 | case EV_KEY: | ||
| 1271 | input_set_capability(input, EV_KEY, code); | ||
| 1272 | break; | ||
| 1273 | case EV_MSC: | ||
| 1274 | input_set_capability(input, EV_MSC, code); | ||
| 1275 | break; | ||
| 1276 | } | ||
| 1277 | } | ||
| 1278 | |||
| 1279 | static void wacom_wac_pen_usage_mapping(struct hid_device *hdev, | ||
| 1280 | struct hid_field *field, struct hid_usage *usage) | ||
| 1281 | { | ||
| 1282 | struct wacom *wacom = hid_get_drvdata(hdev); | ||
| 1283 | |||
| 1284 | switch (usage->hid) { | ||
| 1285 | case HID_GD_X: | ||
| 1286 | wacom_map_usage(wacom, usage, field, EV_ABS, ABS_X, 4); | ||
| 1287 | break; | ||
| 1288 | case HID_GD_Y: | ||
| 1289 | wacom_map_usage(wacom, usage, field, EV_ABS, ABS_Y, 4); | ||
| 1290 | break; | ||
| 1291 | case HID_DG_TIPPRESSURE: | ||
| 1292 | wacom_map_usage(wacom, usage, field, EV_ABS, ABS_PRESSURE, 0); | ||
| 1293 | break; | ||
| 1294 | case HID_DG_INRANGE: | ||
| 1295 | wacom_map_usage(wacom, usage, field, EV_KEY, BTN_TOOL_PEN, 0); | ||
| 1296 | break; | ||
| 1297 | case HID_DG_INVERT: | ||
| 1298 | wacom_map_usage(wacom, usage, field, EV_KEY, | ||
| 1299 | BTN_TOOL_RUBBER, 0); | ||
| 1300 | break; | ||
| 1301 | case HID_DG_ERASER: | ||
| 1302 | case HID_DG_TIPSWITCH: | ||
| 1303 | wacom_map_usage(wacom, usage, field, EV_KEY, BTN_TOUCH, 0); | ||
| 1304 | break; | ||
| 1305 | case HID_DG_BARRELSWITCH: | ||
| 1306 | wacom_map_usage(wacom, usage, field, EV_KEY, BTN_STYLUS, 0); | ||
| 1307 | break; | ||
| 1308 | case HID_DG_BARRELSWITCH2: | ||
| 1309 | wacom_map_usage(wacom, usage, field, EV_KEY, BTN_STYLUS2, 0); | ||
| 1310 | break; | ||
| 1311 | case HID_DG_TOOLSERIALNUMBER: | ||
| 1312 | wacom_map_usage(wacom, usage, field, EV_MSC, MSC_SERIAL, 0); | ||
| 1313 | break; | ||
| 1314 | } | ||
| 1315 | } | ||
| 1316 | |||
| 1317 | static int wacom_wac_pen_event(struct hid_device *hdev, struct hid_field *field, | ||
| 1318 | struct hid_usage *usage, __s32 value) | ||
| 1319 | { | ||
| 1320 | struct wacom *wacom = hid_get_drvdata(hdev); | ||
| 1321 | struct wacom_wac *wacom_wac = &wacom->wacom_wac; | ||
| 1322 | struct input_dev *input = wacom_wac->input; | ||
| 1323 | |||
| 1324 | /* checking which Tool / tip switch to send */ | ||
| 1325 | switch (usage->hid) { | ||
| 1326 | case HID_DG_INRANGE: | ||
| 1327 | wacom_wac->hid_data.inrange_state = value; | ||
| 1328 | return 0; | ||
| 1329 | case HID_DG_INVERT: | ||
| 1330 | wacom_wac->hid_data.invert_state = value; | ||
| 1331 | return 0; | ||
| 1332 | case HID_DG_ERASER: | ||
| 1333 | case HID_DG_TIPSWITCH: | ||
| 1334 | wacom_wac->hid_data.tipswitch |= value; | ||
| 1335 | return 0; | ||
| 1336 | } | ||
| 1337 | |||
| 1338 | /* send pen events only when touch is up or forced out */ | ||
| 1339 | if (!usage->type || wacom_wac->shared->touch_down) | ||
| 1340 | return 0; | ||
| 1341 | |||
| 1342 | input_event(input, usage->type, usage->code, value); | ||
| 1343 | |||
| 1344 | return 0; | ||
| 1345 | } | ||
| 1346 | |||
| 1347 | static void wacom_wac_pen_report(struct hid_device *hdev, | ||
| 1348 | struct hid_report *report) | ||
| 1349 | { | ||
| 1350 | struct wacom *wacom = hid_get_drvdata(hdev); | ||
| 1351 | struct wacom_wac *wacom_wac = &wacom->wacom_wac; | ||
| 1352 | struct input_dev *input = wacom_wac->input; | ||
| 1353 | bool prox = wacom_wac->hid_data.inrange_state; | ||
| 1354 | |||
| 1355 | if (!wacom_wac->shared->stylus_in_proximity) /* first in prox */ | ||
| 1356 | /* Going into proximity select tool */ | ||
| 1357 | wacom_wac->tool[0] = wacom_wac->hid_data.invert_state ? | ||
| 1358 | BTN_TOOL_RUBBER : BTN_TOOL_PEN; | ||
| 1359 | |||
| 1360 | /* keep pen state for touch events */ | ||
| 1361 | wacom_wac->shared->stylus_in_proximity = prox; | ||
| 1362 | |||
| 1363 | /* send pen events only when touch is up or forced out */ | ||
| 1364 | if (!wacom_wac->shared->touch_down) { | ||
| 1365 | input_report_key(input, BTN_TOUCH, | ||
| 1366 | wacom_wac->hid_data.tipswitch); | ||
| 1367 | input_report_key(input, wacom_wac->tool[0], prox); | ||
| 1368 | |||
| 1369 | wacom_wac->hid_data.tipswitch = false; | ||
| 1370 | |||
| 1371 | input_sync(input); | ||
| 1372 | } | ||
| 1373 | } | ||
| 1374 | |||
| 1375 | static void wacom_wac_finger_usage_mapping(struct hid_device *hdev, | ||
| 1376 | struct hid_field *field, struct hid_usage *usage) | ||
| 1377 | { | ||
| 1378 | struct wacom *wacom = hid_get_drvdata(hdev); | ||
| 1379 | struct wacom_wac *wacom_wac = &wacom->wacom_wac; | ||
| 1380 | struct input_dev *input = wacom_wac->input; | ||
| 1381 | unsigned touch_max = wacom_wac->features.touch_max; | ||
| 1382 | |||
| 1383 | switch (usage->hid) { | ||
| 1384 | case HID_GD_X: | ||
| 1385 | if (touch_max == 1) | ||
| 1386 | wacom_map_usage(wacom, usage, field, EV_ABS, ABS_X, 4); | ||
| 1387 | else | ||
| 1388 | wacom_map_usage(wacom, usage, field, EV_ABS, | ||
| 1389 | ABS_MT_POSITION_X, 4); | ||
| 1390 | break; | ||
| 1391 | case HID_GD_Y: | ||
| 1392 | if (touch_max == 1) | ||
| 1393 | wacom_map_usage(wacom, usage, field, EV_ABS, ABS_Y, 4); | ||
| 1394 | else | ||
| 1395 | wacom_map_usage(wacom, usage, field, EV_ABS, | ||
| 1396 | ABS_MT_POSITION_Y, 4); | ||
| 1397 | break; | ||
| 1398 | case HID_DG_CONTACTID: | ||
| 1399 | input_mt_init_slots(input, wacom_wac->features.touch_max, | ||
| 1400 | INPUT_MT_DIRECT); | ||
| 1401 | break; | ||
| 1402 | case HID_DG_INRANGE: | ||
| 1403 | break; | ||
| 1404 | case HID_DG_INVERT: | ||
| 1405 | break; | ||
| 1406 | case HID_DG_TIPSWITCH: | ||
| 1407 | wacom_map_usage(wacom, usage, field, EV_KEY, BTN_TOUCH, 0); | ||
| 1408 | break; | ||
| 1409 | } | ||
| 1410 | } | ||
| 1411 | |||
| 1412 | static int wacom_wac_finger_event(struct hid_device *hdev, | ||
| 1413 | struct hid_field *field, struct hid_usage *usage, __s32 value) | ||
| 1414 | { | ||
| 1415 | struct wacom *wacom = hid_get_drvdata(hdev); | ||
| 1416 | struct wacom_wac *wacom_wac = &wacom->wacom_wac; | ||
| 1417 | |||
| 1418 | switch (usage->hid) { | ||
| 1419 | case HID_GD_X: | ||
| 1420 | wacom_wac->hid_data.x = value; | ||
| 1421 | break; | ||
| 1422 | case HID_GD_Y: | ||
| 1423 | wacom_wac->hid_data.y = value; | ||
| 1424 | break; | ||
| 1425 | case HID_DG_CONTACTID: | ||
| 1426 | wacom_wac->hid_data.id = value; | ||
| 1427 | break; | ||
| 1428 | case HID_DG_TIPSWITCH: | ||
| 1429 | wacom_wac->hid_data.tipswitch = value; | ||
| 1430 | break; | ||
| 1431 | } | ||
| 1432 | |||
| 1433 | |||
| 1434 | return 0; | ||
| 1435 | } | ||
| 1436 | |||
| 1437 | static void wacom_wac_finger_mt_report(struct wacom_wac *wacom_wac, | ||
| 1438 | struct input_dev *input, bool touch) | ||
| 1439 | { | ||
| 1440 | int slot; | ||
| 1441 | struct hid_data *hid_data = &wacom_wac->hid_data; | ||
| 1442 | |||
| 1443 | slot = input_mt_get_slot_by_key(input, hid_data->id); | ||
| 1444 | |||
| 1445 | input_mt_slot(input, slot); | ||
| 1446 | input_mt_report_slot_state(input, MT_TOOL_FINGER, touch); | ||
| 1447 | if (touch) { | ||
| 1448 | input_report_abs(input, ABS_MT_POSITION_X, hid_data->x); | ||
| 1449 | input_report_abs(input, ABS_MT_POSITION_Y, hid_data->y); | ||
| 1450 | } | ||
| 1451 | input_mt_sync_frame(input); | ||
| 1452 | } | ||
| 1453 | |||
| 1454 | static void wacom_wac_finger_single_touch_report(struct wacom_wac *wacom_wac, | ||
| 1455 | struct input_dev *input, bool touch) | ||
| 1456 | { | ||
| 1457 | struct hid_data *hid_data = &wacom_wac->hid_data; | ||
| 1458 | |||
| 1459 | if (touch) { | ||
| 1460 | input_report_abs(input, ABS_X, hid_data->x); | ||
| 1461 | input_report_abs(input, ABS_Y, hid_data->y); | ||
| 1462 | } | ||
| 1463 | input_report_key(input, BTN_TOUCH, touch); | ||
| 1464 | } | ||
| 1465 | |||
| 1466 | static void wacom_wac_finger_report(struct hid_device *hdev, | ||
| 1467 | struct hid_report *report) | ||
| 1468 | { | ||
| 1469 | struct wacom *wacom = hid_get_drvdata(hdev); | ||
| 1470 | struct wacom_wac *wacom_wac = &wacom->wacom_wac; | ||
| 1471 | struct input_dev *input = wacom_wac->input; | ||
| 1472 | bool touch = wacom_wac->hid_data.tipswitch && | ||
| 1473 | !wacom_wac->shared->stylus_in_proximity; | ||
| 1474 | unsigned touch_max = wacom_wac->features.touch_max; | ||
| 1475 | |||
| 1476 | if (touch_max > 1) | ||
| 1477 | wacom_wac_finger_mt_report(wacom_wac, input, touch); | ||
| 1478 | else | ||
| 1479 | wacom_wac_finger_single_touch_report(wacom_wac, input, touch); | ||
| 1480 | input_sync(input); | ||
| 1481 | |||
| 1482 | /* keep touch state for pen event */ | ||
| 1483 | wacom_wac->shared->touch_down = touch; | ||
| 1484 | } | ||
| 1485 | |||
| 1486 | #define WACOM_PEN_FIELD(f) (((f)->logical == HID_DG_STYLUS) || \ | ||
| 1487 | ((f)->physical == HID_DG_STYLUS)) | ||
| 1488 | #define WACOM_FINGER_FIELD(f) (((f)->logical == HID_DG_FINGER) || \ | ||
| 1489 | ((f)->physical == HID_DG_FINGER)) | ||
| 1490 | |||
| 1491 | void wacom_wac_usage_mapping(struct hid_device *hdev, | ||
| 1492 | struct hid_field *field, struct hid_usage *usage) | ||
| 1493 | { | ||
| 1494 | struct wacom *wacom = hid_get_drvdata(hdev); | ||
| 1495 | struct wacom_wac *wacom_wac = &wacom->wacom_wac; | ||
| 1496 | struct input_dev *input = wacom_wac->input; | ||
| 1497 | |||
| 1498 | /* currently, only direct devices have proper hid report descriptors */ | ||
| 1499 | __set_bit(INPUT_PROP_DIRECT, input->propbit); | ||
| 1500 | |||
| 1501 | if (WACOM_PEN_FIELD(field)) | ||
| 1502 | return wacom_wac_pen_usage_mapping(hdev, field, usage); | ||
| 1503 | |||
| 1504 | if (WACOM_FINGER_FIELD(field)) | ||
| 1505 | return wacom_wac_finger_usage_mapping(hdev, field, usage); | ||
| 1506 | } | ||
| 1507 | |||
| 1508 | int wacom_wac_event(struct hid_device *hdev, struct hid_field *field, | ||
| 1509 | struct hid_usage *usage, __s32 value) | ||
| 1510 | { | ||
| 1511 | struct wacom *wacom = hid_get_drvdata(hdev); | ||
| 1512 | |||
| 1513 | if (wacom->wacom_wac.features.type != HID_GENERIC) | ||
| 1514 | return 0; | ||
| 1515 | |||
| 1516 | if (WACOM_PEN_FIELD(field)) | ||
| 1517 | return wacom_wac_pen_event(hdev, field, usage, value); | ||
| 1518 | |||
| 1519 | if (WACOM_FINGER_FIELD(field)) | ||
| 1520 | return wacom_wac_finger_event(hdev, field, usage, value); | ||
| 1521 | |||
| 1522 | return 0; | ||
| 1523 | } | ||
| 1524 | |||
| 1525 | void wacom_wac_report(struct hid_device *hdev, struct hid_report *report) | ||
| 1526 | { | ||
| 1527 | struct wacom *wacom = hid_get_drvdata(hdev); | ||
| 1528 | struct wacom_wac *wacom_wac = &wacom->wacom_wac; | ||
| 1529 | struct hid_field *field = report->field[0]; | ||
| 1530 | |||
| 1531 | if (wacom_wac->features.type != HID_GENERIC) | ||
| 1532 | return; | ||
| 1533 | |||
| 1534 | if (WACOM_PEN_FIELD(field)) | ||
| 1535 | return wacom_wac_pen_report(hdev, report); | ||
| 1536 | |||
| 1537 | if (WACOM_FINGER_FIELD(field)) | ||
| 1538 | return wacom_wac_finger_report(hdev, report); | ||
| 1539 | } | ||
| 1540 | |||
| 1251 | static int wacom_bpt_touch(struct wacom_wac *wacom) | 1541 | static int wacom_bpt_touch(struct wacom_wac *wacom) |
| 1252 | { | 1542 | { |
| 1253 | struct wacom_features *features = &wacom->features; | 1543 | struct wacom_features *features = &wacom->features; |
| @@ -1746,6 +2036,10 @@ int wacom_setup_input_capabilities(struct input_dev *input_dev, | |||
| 1746 | 2036 | ||
| 1747 | input_dev->evbit[0] |= BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS); | 2037 | input_dev->evbit[0] |= BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS); |
| 1748 | 2038 | ||
| 2039 | if (features->type == HID_GENERIC) | ||
| 2040 | /* setup has already been done */ | ||
| 2041 | return 0; | ||
| 2042 | |||
| 1749 | __set_bit(BTN_TOUCH, input_dev->keybit); | 2043 | __set_bit(BTN_TOUCH, input_dev->keybit); |
| 1750 | __set_bit(ABS_MISC, input_dev->absbit); | 2044 | __set_bit(ABS_MISC, input_dev->absbit); |
| 1751 | 2045 | ||
| @@ -1990,6 +2284,9 @@ int wacom_setup_pad_input_capabilities(struct input_dev *input_dev, | |||
| 1990 | input_set_abs_params(input_dev, ABS_X, 0, 1, 0, 0); | 2284 | input_set_abs_params(input_dev, ABS_X, 0, 1, 0, 0); |
| 1991 | input_set_abs_params(input_dev, ABS_Y, 0, 1, 0, 0); | 2285 | input_set_abs_params(input_dev, ABS_Y, 0, 1, 0, 0); |
| 1992 | 2286 | ||
| 2287 | /* kept for making udev and libwacom accepting the pad */ | ||
| 2288 | __set_bit(BTN_STYLUS, input_dev->keybit); | ||
| 2289 | |||
| 1993 | switch (features->type) { | 2290 | switch (features->type) { |
| 1994 | case GRAPHIRE_BT: | 2291 | case GRAPHIRE_BT: |
| 1995 | __set_bit(BTN_0, input_dev->keybit); | 2292 | __set_bit(BTN_0, input_dev->keybit); |
| @@ -2573,6 +2870,17 @@ static const struct wacom_features wacom_features_0x309 = | |||
| 2573 | { "Wacom ISDv5 309", .type = WACOM_24HDT, /* Touch */ | 2870 | { "Wacom ISDv5 309", .type = WACOM_24HDT, /* Touch */ |
| 2574 | .oVid = USB_VENDOR_ID_WACOM, .oPid = 0x0307, .touch_max = 10, | 2871 | .oVid = USB_VENDOR_ID_WACOM, .oPid = 0x0307, .touch_max = 10, |
| 2575 | .check_for_hid_type = true, .hid_type = HID_TYPE_USBNONE }; | 2872 | .check_for_hid_type = true, .hid_type = HID_TYPE_USBNONE }; |
| 2873 | static const struct wacom_features wacom_features_0x30A = | ||
| 2874 | { "Wacom ISDv5 30A", 59352, 33648, 2047, 63, | ||
| 2875 | CINTIQ_HYBRID, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 200, 200, | ||
| 2876 | .oVid = USB_VENDOR_ID_WACOM, .oPid = 0x30C }; | ||
| 2877 | static const struct wacom_features wacom_features_0x30C = | ||
| 2878 | { "Wacom ISDv5 30C", .type = WACOM_24HDT, /* Touch */ | ||
| 2879 | .oVid = USB_VENDOR_ID_WACOM, .oPid = 0x30A, .touch_max = 10, | ||
| 2880 | .check_for_hid_type = true, .hid_type = HID_TYPE_USBNONE }; | ||
| 2881 | |||
| 2882 | static const struct wacom_features wacom_features_HID_ANY_ID = | ||
| 2883 | { "Wacom HID", .type = HID_GENERIC }; | ||
| 2576 | 2884 | ||
| 2577 | #define USB_DEVICE_WACOM(prod) \ | 2885 | #define USB_DEVICE_WACOM(prod) \ |
| 2578 | HID_DEVICE(BUS_USB, HID_GROUP_WACOM, USB_VENDOR_ID_WACOM, prod),\ | 2886 | HID_DEVICE(BUS_USB, HID_GROUP_WACOM, USB_VENDOR_ID_WACOM, prod),\ |
| @@ -2708,6 +3016,8 @@ const struct hid_device_id wacom_ids[] = { | |||
| 2708 | { USB_DEVICE_WACOM(0x304) }, | 3016 | { USB_DEVICE_WACOM(0x304) }, |
| 2709 | { USB_DEVICE_WACOM(0x307) }, | 3017 | { USB_DEVICE_WACOM(0x307) }, |
| 2710 | { USB_DEVICE_WACOM(0x309) }, | 3018 | { USB_DEVICE_WACOM(0x309) }, |
| 3019 | { USB_DEVICE_WACOM(0x30A) }, | ||
| 3020 | { USB_DEVICE_WACOM(0x30C) }, | ||
| 2711 | { USB_DEVICE_WACOM(0x30E) }, | 3021 | { USB_DEVICE_WACOM(0x30E) }, |
| 2712 | { USB_DEVICE_WACOM(0x314) }, | 3022 | { USB_DEVICE_WACOM(0x314) }, |
| 2713 | { USB_DEVICE_WACOM(0x315) }, | 3023 | { USB_DEVICE_WACOM(0x315) }, |
| @@ -2716,6 +3026,8 @@ const struct hid_device_id wacom_ids[] = { | |||
| 2716 | { USB_DEVICE_WACOM(0x4004) }, | 3026 | { USB_DEVICE_WACOM(0x4004) }, |
| 2717 | { USB_DEVICE_WACOM(0x5000) }, | 3027 | { USB_DEVICE_WACOM(0x5000) }, |
| 2718 | { USB_DEVICE_WACOM(0x5002) }, | 3028 | { USB_DEVICE_WACOM(0x5002) }, |
| 3029 | |||
| 3030 | { USB_DEVICE_WACOM(HID_ANY_ID) }, | ||
| 2719 | { } | 3031 | { } |
| 2720 | }; | 3032 | }; |
| 2721 | MODULE_DEVICE_TABLE(hid, wacom_ids); | 3033 | MODULE_DEVICE_TABLE(hid, wacom_ids); |
diff --git a/drivers/hid/wacom_wac.h b/drivers/hid/wacom_wac.h index 339ab5d81a2d..0f0b85ec1322 100644 --- a/drivers/hid/wacom_wac.h +++ b/drivers/hid/wacom_wac.h | |||
| @@ -113,6 +113,7 @@ enum { | |||
| 113 | MTSCREEN, | 113 | MTSCREEN, |
| 114 | MTTPC, | 114 | MTTPC, |
| 115 | MTTPC_B, | 115 | MTTPC_B, |
| 116 | HID_GENERIC, | ||
| 116 | MAX_TYPE | 117 | MAX_TYPE |
| 117 | }; | 118 | }; |
| 118 | 119 | ||
| @@ -154,6 +155,20 @@ struct wacom_shared { | |||
| 154 | struct input_dev *touch_input; | 155 | struct input_dev *touch_input; |
| 155 | }; | 156 | }; |
| 156 | 157 | ||
| 158 | struct hid_data { | ||
| 159 | __s16 inputmode; /* InputMode HID feature, -1 if non-existent */ | ||
| 160 | __s16 inputmode_index; /* InputMode HID feature index in the report */ | ||
| 161 | bool inrange_state; | ||
| 162 | bool invert_state; | ||
| 163 | bool tipswitch; | ||
| 164 | int x; | ||
| 165 | int y; | ||
| 166 | int pressure; | ||
| 167 | int width; | ||
| 168 | int height; | ||
| 169 | int id; | ||
| 170 | }; | ||
| 171 | |||
| 157 | struct wacom_wac { | 172 | struct wacom_wac { |
| 158 | char name[WACOM_NAME_MAX]; | 173 | char name[WACOM_NAME_MAX]; |
| 159 | char pad_name[WACOM_NAME_MAX]; | 174 | char pad_name[WACOM_NAME_MAX]; |
| @@ -167,6 +182,7 @@ struct wacom_wac { | |||
| 167 | struct wacom_shared *shared; | 182 | struct wacom_shared *shared; |
| 168 | struct input_dev *input; | 183 | struct input_dev *input; |
| 169 | struct input_dev *pad_input; | 184 | struct input_dev *pad_input; |
| 185 | bool input_registered; | ||
| 170 | int pid; | 186 | int pid; |
| 171 | int battery_capacity; | 187 | int battery_capacity; |
| 172 | int num_contacts_left; | 188 | int num_contacts_left; |
| @@ -174,6 +190,7 @@ struct wacom_wac { | |||
| 174 | int ps_connected; | 190 | int ps_connected; |
| 175 | u8 bt_features; | 191 | u8 bt_features; |
| 176 | u8 bt_high_speed; | 192 | u8 bt_high_speed; |
| 193 | struct hid_data hid_data; | ||
| 177 | }; | 194 | }; |
| 178 | 195 | ||
| 179 | #endif | 196 | #endif |
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c index 18405314168b..ecb0109a5360 100644 --- a/drivers/iommu/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c | |||
| @@ -3149,14 +3149,16 @@ free_domains: | |||
| 3149 | 3149 | ||
| 3150 | static void cleanup_domain(struct protection_domain *domain) | 3150 | static void cleanup_domain(struct protection_domain *domain) |
| 3151 | { | 3151 | { |
| 3152 | struct iommu_dev_data *dev_data, *next; | 3152 | struct iommu_dev_data *entry; |
| 3153 | unsigned long flags; | 3153 | unsigned long flags; |
| 3154 | 3154 | ||
| 3155 | write_lock_irqsave(&amd_iommu_devtable_lock, flags); | 3155 | write_lock_irqsave(&amd_iommu_devtable_lock, flags); |
| 3156 | 3156 | ||
| 3157 | list_for_each_entry_safe(dev_data, next, &domain->dev_list, list) { | 3157 | while (!list_empty(&domain->dev_list)) { |
| 3158 | __detach_device(dev_data); | 3158 | entry = list_first_entry(&domain->dev_list, |
| 3159 | atomic_set(&dev_data->bind, 0); | 3159 | struct iommu_dev_data, list); |
| 3160 | __detach_device(entry); | ||
| 3161 | atomic_set(&entry->bind, 0); | ||
| 3160 | } | 3162 | } |
| 3161 | 3163 | ||
| 3162 | write_unlock_irqrestore(&amd_iommu_devtable_lock, flags); | 3164 | write_unlock_irqrestore(&amd_iommu_devtable_lock, flags); |
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index d1f5caad04f9..5619f264862d 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c | |||
| @@ -3869,6 +3869,14 @@ static int device_notifier(struct notifier_block *nb, | |||
| 3869 | action != BUS_NOTIFY_DEL_DEVICE) | 3869 | action != BUS_NOTIFY_DEL_DEVICE) |
| 3870 | return 0; | 3870 | return 0; |
| 3871 | 3871 | ||
| 3872 | /* | ||
| 3873 | * If the device is still attached to a device driver we can't | ||
| 3874 | * tear down the domain yet as DMA mappings may still be in use. | ||
| 3875 | * Wait for the BUS_NOTIFY_UNBOUND_DRIVER event to do that. | ||
| 3876 | */ | ||
| 3877 | if (action == BUS_NOTIFY_DEL_DEVICE && dev->driver != NULL) | ||
| 3878 | return 0; | ||
| 3879 | |||
| 3872 | domain = find_domain(dev); | 3880 | domain = find_domain(dev); |
| 3873 | if (!domain) | 3881 | if (!domain) |
| 3874 | return 0; | 3882 | return 0; |
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c index 169836020208..ac4adb337038 100644 --- a/drivers/iommu/iommu.c +++ b/drivers/iommu/iommu.c | |||
| @@ -995,7 +995,7 @@ int iommu_map(struct iommu_domain *domain, unsigned long iova, | |||
| 995 | size_t orig_size = size; | 995 | size_t orig_size = size; |
| 996 | int ret = 0; | 996 | int ret = 0; |
| 997 | 997 | ||
| 998 | if (unlikely(domain->ops->unmap == NULL || | 998 | if (unlikely(domain->ops->map == NULL || |
| 999 | domain->ops->pgsize_bitmap == 0UL)) | 999 | domain->ops->pgsize_bitmap == 0UL)) |
| 1000 | return -ENODEV; | 1000 | return -ENODEV; |
| 1001 | 1001 | ||
diff --git a/drivers/isdn/hardware/eicon/xdi_msg.h b/drivers/isdn/hardware/eicon/xdi_msg.h index 58368f7b5cba..2498c349a32e 100644 --- a/drivers/isdn/hardware/eicon/xdi_msg.h +++ b/drivers/isdn/hardware/eicon/xdi_msg.h | |||
| @@ -1,6 +1,6 @@ | |||
| 1 | /* $Id: xdi_msg.h,v 1.1.2.2 2001/02/16 08:40:36 armin Exp $ */ | 1 | /* $Id: xdi_msg.h,v 1.1.2.2 2001/02/16 08:40:36 armin Exp $ */ |
| 2 | 2 | ||
| 3 | #ifndef __DIVA_XDI_UM_CFG_MESSSGE_H__ | 3 | #ifndef __DIVA_XDI_UM_CFG_MESSAGE_H__ |
| 4 | #define __DIVA_XDI_UM_CFG_MESSAGE_H__ | 4 | #define __DIVA_XDI_UM_CFG_MESSAGE_H__ |
| 5 | 5 | ||
| 6 | /* | 6 | /* |
diff --git a/drivers/net/can/c_can/c_can_platform.c b/drivers/net/can/c_can/c_can_platform.c index 5dede6e64376..109cb44291f5 100644 --- a/drivers/net/can/c_can/c_can_platform.c +++ b/drivers/net/can/c_can/c_can_platform.c | |||
| @@ -280,7 +280,7 @@ static int c_can_plat_probe(struct platform_device *pdev) | |||
| 280 | 280 | ||
| 281 | priv->raminit_ctrlreg = devm_ioremap(&pdev->dev, res->start, | 281 | priv->raminit_ctrlreg = devm_ioremap(&pdev->dev, res->start, |
| 282 | resource_size(res)); | 282 | resource_size(res)); |
| 283 | if (IS_ERR(priv->raminit_ctrlreg) || priv->instance < 0) | 283 | if (!priv->raminit_ctrlreg || priv->instance < 0) |
| 284 | dev_info(&pdev->dev, "control memory is not used for raminit\n"); | 284 | dev_info(&pdev->dev, "control memory is not used for raminit\n"); |
| 285 | else | 285 | else |
| 286 | priv->raminit = c_can_hw_raminit_ti; | 286 | priv->raminit = c_can_hw_raminit_ti; |
diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c index f425ec2c7839..944aa5d3af6e 100644 --- a/drivers/net/can/flexcan.c +++ b/drivers/net/can/flexcan.c | |||
| @@ -549,6 +549,13 @@ static void do_state(struct net_device *dev, | |||
| 549 | 549 | ||
| 550 | /* process state changes depending on the new state */ | 550 | /* process state changes depending on the new state */ |
| 551 | switch (new_state) { | 551 | switch (new_state) { |
| 552 | case CAN_STATE_ERROR_WARNING: | ||
| 553 | netdev_dbg(dev, "Error Warning\n"); | ||
| 554 | cf->can_id |= CAN_ERR_CRTL; | ||
| 555 | cf->data[1] = (bec.txerr > bec.rxerr) ? | ||
| 556 | CAN_ERR_CRTL_TX_WARNING : | ||
| 557 | CAN_ERR_CRTL_RX_WARNING; | ||
| 558 | break; | ||
| 552 | case CAN_STATE_ERROR_ACTIVE: | 559 | case CAN_STATE_ERROR_ACTIVE: |
| 553 | netdev_dbg(dev, "Error Active\n"); | 560 | netdev_dbg(dev, "Error Active\n"); |
| 554 | cf->can_id |= CAN_ERR_PROT; | 561 | cf->can_id |= CAN_ERR_PROT; |
| @@ -852,6 +859,8 @@ static int flexcan_chip_start(struct net_device *dev) | |||
| 852 | if (priv->devtype_data->features & FLEXCAN_HAS_BROKEN_ERR_STATE || | 859 | if (priv->devtype_data->features & FLEXCAN_HAS_BROKEN_ERR_STATE || |
| 853 | priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) | 860 | priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) |
| 854 | reg_ctrl |= FLEXCAN_CTRL_ERR_MSK; | 861 | reg_ctrl |= FLEXCAN_CTRL_ERR_MSK; |
| 862 | else | ||
| 863 | reg_ctrl &= ~FLEXCAN_CTRL_ERR_MSK; | ||
| 855 | 864 | ||
| 856 | /* save for later use */ | 865 | /* save for later use */ |
| 857 | priv->reg_ctrl_default = reg_ctrl; | 866 | priv->reg_ctrl_default = reg_ctrl; |
diff --git a/drivers/net/can/sja1000/sja1000.c b/drivers/net/can/sja1000/sja1000.c index d1692154ed1b..b27ac6074afb 100644 --- a/drivers/net/can/sja1000/sja1000.c +++ b/drivers/net/can/sja1000/sja1000.c | |||
| @@ -172,6 +172,35 @@ static void set_normal_mode(struct net_device *dev) | |||
| 172 | netdev_err(dev, "setting SJA1000 into normal mode failed!\n"); | 172 | netdev_err(dev, "setting SJA1000 into normal mode failed!\n"); |
| 173 | } | 173 | } |
| 174 | 174 | ||
| 175 | /* | ||
| 176 | * initialize SJA1000 chip: | ||
| 177 | * - reset chip | ||
| 178 | * - set output mode | ||
| 179 | * - set baudrate | ||
| 180 | * - enable interrupts | ||
| 181 | * - start operating mode | ||
| 182 | */ | ||
| 183 | static void chipset_init(struct net_device *dev) | ||
| 184 | { | ||
| 185 | struct sja1000_priv *priv = netdev_priv(dev); | ||
| 186 | |||
| 187 | /* set clock divider and output control register */ | ||
| 188 | priv->write_reg(priv, SJA1000_CDR, priv->cdr | CDR_PELICAN); | ||
| 189 | |||
| 190 | /* set acceptance filter (accept all) */ | ||
| 191 | priv->write_reg(priv, SJA1000_ACCC0, 0x00); | ||
| 192 | priv->write_reg(priv, SJA1000_ACCC1, 0x00); | ||
| 193 | priv->write_reg(priv, SJA1000_ACCC2, 0x00); | ||
| 194 | priv->write_reg(priv, SJA1000_ACCC3, 0x00); | ||
| 195 | |||
| 196 | priv->write_reg(priv, SJA1000_ACCM0, 0xFF); | ||
| 197 | priv->write_reg(priv, SJA1000_ACCM1, 0xFF); | ||
| 198 | priv->write_reg(priv, SJA1000_ACCM2, 0xFF); | ||
| 199 | priv->write_reg(priv, SJA1000_ACCM3, 0xFF); | ||
| 200 | |||
| 201 | priv->write_reg(priv, SJA1000_OCR, priv->ocr | OCR_MODE_NORMAL); | ||
| 202 | } | ||
| 203 | |||
| 175 | static void sja1000_start(struct net_device *dev) | 204 | static void sja1000_start(struct net_device *dev) |
| 176 | { | 205 | { |
| 177 | struct sja1000_priv *priv = netdev_priv(dev); | 206 | struct sja1000_priv *priv = netdev_priv(dev); |
| @@ -180,6 +209,10 @@ static void sja1000_start(struct net_device *dev) | |||
| 180 | if (priv->can.state != CAN_STATE_STOPPED) | 209 | if (priv->can.state != CAN_STATE_STOPPED) |
| 181 | set_reset_mode(dev); | 210 | set_reset_mode(dev); |
| 182 | 211 | ||
| 212 | /* Initialize chip if uninitialized at this stage */ | ||
| 213 | if (!(priv->read_reg(priv, SJA1000_CDR) & CDR_PELICAN)) | ||
| 214 | chipset_init(dev); | ||
| 215 | |||
| 183 | /* Clear error counters and error code capture */ | 216 | /* Clear error counters and error code capture */ |
| 184 | priv->write_reg(priv, SJA1000_TXERR, 0x0); | 217 | priv->write_reg(priv, SJA1000_TXERR, 0x0); |
| 185 | priv->write_reg(priv, SJA1000_RXERR, 0x0); | 218 | priv->write_reg(priv, SJA1000_RXERR, 0x0); |
| @@ -237,35 +270,6 @@ static int sja1000_get_berr_counter(const struct net_device *dev, | |||
| 237 | } | 270 | } |
| 238 | 271 | ||
| 239 | /* | 272 | /* |
| 240 | * initialize SJA1000 chip: | ||
| 241 | * - reset chip | ||
| 242 | * - set output mode | ||
| 243 | * - set baudrate | ||
| 244 | * - enable interrupts | ||
| 245 | * - start operating mode | ||
| 246 | */ | ||
| 247 | static void chipset_init(struct net_device *dev) | ||
| 248 | { | ||
| 249 | struct sja1000_priv *priv = netdev_priv(dev); | ||
| 250 | |||
| 251 | /* set clock divider and output control register */ | ||
| 252 | priv->write_reg(priv, SJA1000_CDR, priv->cdr | CDR_PELICAN); | ||
| 253 | |||
| 254 | /* set acceptance filter (accept all) */ | ||
| 255 | priv->write_reg(priv, SJA1000_ACCC0, 0x00); | ||
| 256 | priv->write_reg(priv, SJA1000_ACCC1, 0x00); | ||
| 257 | priv->write_reg(priv, SJA1000_ACCC2, 0x00); | ||
| 258 | priv->write_reg(priv, SJA1000_ACCC3, 0x00); | ||
| 259 | |||
| 260 | priv->write_reg(priv, SJA1000_ACCM0, 0xFF); | ||
| 261 | priv->write_reg(priv, SJA1000_ACCM1, 0xFF); | ||
| 262 | priv->write_reg(priv, SJA1000_ACCM2, 0xFF); | ||
| 263 | priv->write_reg(priv, SJA1000_ACCM3, 0xFF); | ||
| 264 | |||
| 265 | priv->write_reg(priv, SJA1000_OCR, priv->ocr | OCR_MODE_NORMAL); | ||
| 266 | } | ||
| 267 | |||
| 268 | /* | ||
| 269 | * transmit a CAN message | 273 | * transmit a CAN message |
| 270 | * message layout in the sk_buff should be like this: | 274 | * message layout in the sk_buff should be like this: |
| 271 | * xx xx xx xx ff ll 00 11 22 33 44 55 66 77 | 275 | * xx xx xx xx ff ll 00 11 22 33 44 55 66 77 |
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c index e1a8f4e19983..e4222af2baa6 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c | |||
| @@ -563,15 +563,21 @@ static void xgene_enet_free_desc_rings(struct xgene_enet_pdata *pdata) | |||
| 563 | struct xgene_enet_desc_ring *ring; | 563 | struct xgene_enet_desc_ring *ring; |
| 564 | 564 | ||
| 565 | ring = pdata->tx_ring; | 565 | ring = pdata->tx_ring; |
| 566 | if (ring && ring->cp_ring && ring->cp_ring->cp_skb) | 566 | if (ring) { |
| 567 | devm_kfree(dev, ring->cp_ring->cp_skb); | 567 | if (ring->cp_ring && ring->cp_ring->cp_skb) |
| 568 | xgene_enet_free_desc_ring(ring); | 568 | devm_kfree(dev, ring->cp_ring->cp_skb); |
| 569 | xgene_enet_free_desc_ring(ring); | ||
| 570 | } | ||
| 569 | 571 | ||
| 570 | ring = pdata->rx_ring; | 572 | ring = pdata->rx_ring; |
| 571 | if (ring && ring->buf_pool && ring->buf_pool->rx_skb) | 573 | if (ring) { |
| 572 | devm_kfree(dev, ring->buf_pool->rx_skb); | 574 | if (ring->buf_pool) { |
| 573 | xgene_enet_free_desc_ring(ring->buf_pool); | 575 | if (ring->buf_pool->rx_skb) |
| 574 | xgene_enet_free_desc_ring(ring); | 576 | devm_kfree(dev, ring->buf_pool->rx_skb); |
| 577 | xgene_enet_free_desc_ring(ring->buf_pool); | ||
| 578 | } | ||
| 579 | xgene_enet_free_desc_ring(ring); | ||
| 580 | } | ||
| 575 | } | 581 | } |
| 576 | 582 | ||
| 577 | static struct xgene_enet_desc_ring *xgene_enet_create_desc_ring( | 583 | static struct xgene_enet_desc_ring *xgene_enet_create_desc_ring( |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index 4e6c82e20224..4ccc806b1150 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c | |||
| @@ -483,11 +483,7 @@ static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue, | |||
| 483 | 483 | ||
| 484 | #ifdef BNX2X_STOP_ON_ERROR | 484 | #ifdef BNX2X_STOP_ON_ERROR |
| 485 | fp->tpa_queue_used |= (1 << queue); | 485 | fp->tpa_queue_used |= (1 << queue); |
| 486 | #ifdef _ASM_GENERIC_INT_L64_H | ||
| 487 | DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n", | ||
| 488 | #else | ||
| 489 | DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n", | 486 | DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n", |
| 490 | #endif | ||
| 491 | fp->tpa_queue_used); | 487 | fp->tpa_queue_used); |
| 492 | #endif | 488 | #endif |
| 493 | } | 489 | } |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index c13364b6cc19..900cab420810 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c | |||
| @@ -10052,6 +10052,8 @@ static void bnx2x_prev_unload_close_mac(struct bnx2x *bp, | |||
| 10052 | } | 10052 | } |
| 10053 | 10053 | ||
| 10054 | #define BNX2X_PREV_UNDI_PROD_ADDR(p) (BAR_TSTRORM_INTMEM + 0x1508 + ((p) << 4)) | 10054 | #define BNX2X_PREV_UNDI_PROD_ADDR(p) (BAR_TSTRORM_INTMEM + 0x1508 + ((p) << 4)) |
| 10055 | #define BNX2X_PREV_UNDI_PROD_ADDR_H(f) (BAR_TSTRORM_INTMEM + \ | ||
| 10056 | 0x1848 + ((f) << 4)) | ||
| 10055 | #define BNX2X_PREV_UNDI_RCQ(val) ((val) & 0xffff) | 10057 | #define BNX2X_PREV_UNDI_RCQ(val) ((val) & 0xffff) |
| 10056 | #define BNX2X_PREV_UNDI_BD(val) ((val) >> 16 & 0xffff) | 10058 | #define BNX2X_PREV_UNDI_BD(val) ((val) >> 16 & 0xffff) |
| 10057 | #define BNX2X_PREV_UNDI_PROD(rcq, bd) ((bd) << 16 | (rcq)) | 10059 | #define BNX2X_PREV_UNDI_PROD(rcq, bd) ((bd) << 16 | (rcq)) |
| @@ -10059,8 +10061,6 @@ static void bnx2x_prev_unload_close_mac(struct bnx2x *bp, | |||
| 10059 | #define BCM_5710_UNDI_FW_MF_MAJOR (0x07) | 10061 | #define BCM_5710_UNDI_FW_MF_MAJOR (0x07) |
| 10060 | #define BCM_5710_UNDI_FW_MF_MINOR (0x08) | 10062 | #define BCM_5710_UNDI_FW_MF_MINOR (0x08) |
| 10061 | #define BCM_5710_UNDI_FW_MF_VERS (0x05) | 10063 | #define BCM_5710_UNDI_FW_MF_VERS (0x05) |
| 10062 | #define BNX2X_PREV_UNDI_MF_PORT(p) (BAR_TSTRORM_INTMEM + 0x150c + ((p) << 4)) | ||
| 10063 | #define BNX2X_PREV_UNDI_MF_FUNC(f) (BAR_TSTRORM_INTMEM + 0x184c + ((f) << 4)) | ||
| 10064 | 10064 | ||
| 10065 | static bool bnx2x_prev_is_after_undi(struct bnx2x *bp) | 10065 | static bool bnx2x_prev_is_after_undi(struct bnx2x *bp) |
| 10066 | { | 10066 | { |
| @@ -10079,72 +10079,25 @@ static bool bnx2x_prev_is_after_undi(struct bnx2x *bp) | |||
| 10079 | return false; | 10079 | return false; |
| 10080 | } | 10080 | } |
| 10081 | 10081 | ||
| 10082 | static bool bnx2x_prev_unload_undi_fw_supports_mf(struct bnx2x *bp) | 10082 | static void bnx2x_prev_unload_undi_inc(struct bnx2x *bp, u8 inc) |
| 10083 | { | ||
| 10084 | u8 major, minor, version; | ||
| 10085 | u32 fw; | ||
| 10086 | |||
| 10087 | /* Must check that FW is loaded */ | ||
| 10088 | if (!(REG_RD(bp, MISC_REG_RESET_REG_1) & | ||
| 10089 | MISC_REGISTERS_RESET_REG_1_RST_XSEM)) { | ||
| 10090 | BNX2X_DEV_INFO("XSEM is reset - UNDI MF FW is not loaded\n"); | ||
| 10091 | return false; | ||
| 10092 | } | ||
| 10093 | |||
| 10094 | /* Read Currently loaded FW version */ | ||
| 10095 | fw = REG_RD(bp, XSEM_REG_PRAM); | ||
| 10096 | major = fw & 0xff; | ||
| 10097 | minor = (fw >> 0x8) & 0xff; | ||
| 10098 | version = (fw >> 0x10) & 0xff; | ||
| 10099 | BNX2X_DEV_INFO("Loaded FW: 0x%08x: Major 0x%02x Minor 0x%02x Version 0x%02x\n", | ||
| 10100 | fw, major, minor, version); | ||
| 10101 | |||
| 10102 | if (major > BCM_5710_UNDI_FW_MF_MAJOR) | ||
| 10103 | return true; | ||
| 10104 | |||
| 10105 | if ((major == BCM_5710_UNDI_FW_MF_MAJOR) && | ||
| 10106 | (minor > BCM_5710_UNDI_FW_MF_MINOR)) | ||
| 10107 | return true; | ||
| 10108 | |||
| 10109 | if ((major == BCM_5710_UNDI_FW_MF_MAJOR) && | ||
| 10110 | (minor == BCM_5710_UNDI_FW_MF_MINOR) && | ||
| 10111 | (version >= BCM_5710_UNDI_FW_MF_VERS)) | ||
| 10112 | return true; | ||
| 10113 | |||
| 10114 | return false; | ||
| 10115 | } | ||
| 10116 | |||
| 10117 | static void bnx2x_prev_unload_undi_mf(struct bnx2x *bp) | ||
| 10118 | { | ||
| 10119 | int i; | ||
| 10120 | |||
| 10121 | /* Due to legacy (FW) code, the first function on each engine has a | ||
| 10122 | * different offset macro from the rest of the functions. | ||
| 10123 | * Setting this for all 8 functions is harmless regardless of whether | ||
| 10124 | * this is actually a multi-function device. | ||
| 10125 | */ | ||
| 10126 | for (i = 0; i < 2; i++) | ||
| 10127 | REG_WR(bp, BNX2X_PREV_UNDI_MF_PORT(i), 1); | ||
| 10128 | |||
| 10129 | for (i = 2; i < 8; i++) | ||
| 10130 | REG_WR(bp, BNX2X_PREV_UNDI_MF_FUNC(i - 2), 1); | ||
| 10131 | |||
| 10132 | BNX2X_DEV_INFO("UNDI FW (MF) set to discard\n"); | ||
| 10133 | } | ||
| 10134 | |||
| 10135 | static void bnx2x_prev_unload_undi_inc(struct bnx2x *bp, u8 port, u8 inc) | ||
| 10136 | { | 10083 | { |
| 10137 | u16 rcq, bd; | 10084 | u16 rcq, bd; |
| 10138 | u32 tmp_reg = REG_RD(bp, BNX2X_PREV_UNDI_PROD_ADDR(port)); | 10085 | u32 addr, tmp_reg; |
| 10139 | 10086 | ||
| 10087 | if (BP_FUNC(bp) < 2) | ||
| 10088 | addr = BNX2X_PREV_UNDI_PROD_ADDR(BP_PORT(bp)); | ||
| 10089 | else | ||
| 10090 | addr = BNX2X_PREV_UNDI_PROD_ADDR_H(BP_FUNC(bp) - 2); | ||
| 10091 | |||
| 10092 | tmp_reg = REG_RD(bp, addr); | ||
| 10140 | rcq = BNX2X_PREV_UNDI_RCQ(tmp_reg) + inc; | 10093 | rcq = BNX2X_PREV_UNDI_RCQ(tmp_reg) + inc; |
| 10141 | bd = BNX2X_PREV_UNDI_BD(tmp_reg) + inc; | 10094 | bd = BNX2X_PREV_UNDI_BD(tmp_reg) + inc; |
| 10142 | 10095 | ||
| 10143 | tmp_reg = BNX2X_PREV_UNDI_PROD(rcq, bd); | 10096 | tmp_reg = BNX2X_PREV_UNDI_PROD(rcq, bd); |
| 10144 | REG_WR(bp, BNX2X_PREV_UNDI_PROD_ADDR(port), tmp_reg); | 10097 | REG_WR(bp, addr, tmp_reg); |
| 10145 | 10098 | ||
| 10146 | BNX2X_DEV_INFO("UNDI producer [%d] rings bd -> 0x%04x, rcq -> 0x%04x\n", | 10099 | BNX2X_DEV_INFO("UNDI producer [%d/%d][%08x] rings bd -> 0x%04x, rcq -> 0x%04x\n", |
| 10147 | port, bd, rcq); | 10100 | BP_PORT(bp), BP_FUNC(bp), addr, bd, rcq); |
| 10148 | } | 10101 | } |
| 10149 | 10102 | ||
| 10150 | static int bnx2x_prev_mcp_done(struct bnx2x *bp) | 10103 | static int bnx2x_prev_mcp_done(struct bnx2x *bp) |
| @@ -10383,7 +10336,6 @@ static int bnx2x_prev_unload_common(struct bnx2x *bp) | |||
| 10383 | /* Reset should be performed after BRB is emptied */ | 10336 | /* Reset should be performed after BRB is emptied */ |
| 10384 | if (reset_reg & MISC_REGISTERS_RESET_REG_1_RST_BRB1) { | 10337 | if (reset_reg & MISC_REGISTERS_RESET_REG_1_RST_BRB1) { |
| 10385 | u32 timer_count = 1000; | 10338 | u32 timer_count = 1000; |
| 10386 | bool need_write = true; | ||
| 10387 | 10339 | ||
| 10388 | /* Close the MAC Rx to prevent BRB from filling up */ | 10340 | /* Close the MAC Rx to prevent BRB from filling up */ |
| 10389 | bnx2x_prev_unload_close_mac(bp, &mac_vals); | 10341 | bnx2x_prev_unload_close_mac(bp, &mac_vals); |
| @@ -10420,20 +10372,10 @@ static int bnx2x_prev_unload_common(struct bnx2x *bp) | |||
| 10420 | else | 10372 | else |
| 10421 | timer_count--; | 10373 | timer_count--; |
| 10422 | 10374 | ||
| 10423 | /* New UNDI FW supports MF and contains better | 10375 | /* If UNDI resides in memory, manually increment it */ |
| 10424 | * cleaning methods - might be redundant but harmless. | 10376 | if (prev_undi) |
| 10425 | */ | 10377 | bnx2x_prev_unload_undi_inc(bp, 1); |
| 10426 | if (bnx2x_prev_unload_undi_fw_supports_mf(bp)) { | 10378 | |
| 10427 | if (need_write) { | ||
| 10428 | bnx2x_prev_unload_undi_mf(bp); | ||
| 10429 | need_write = false; | ||
| 10430 | } | ||
| 10431 | } else if (prev_undi) { | ||
| 10432 | /* If UNDI resides in memory, | ||
| 10433 | * manually increment it | ||
| 10434 | */ | ||
| 10435 | bnx2x_prev_unload_undi_inc(bp, BP_PORT(bp), 1); | ||
| 10436 | } | ||
| 10437 | udelay(10); | 10379 | udelay(10); |
| 10438 | } | 10380 | } |
| 10439 | 10381 | ||
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h index d57282172ea5..c067b7888ac4 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h | |||
| @@ -652,6 +652,7 @@ struct adapter { | |||
| 652 | struct tid_info tids; | 652 | struct tid_info tids; |
| 653 | void **tid_release_head; | 653 | void **tid_release_head; |
| 654 | spinlock_t tid_release_lock; | 654 | spinlock_t tid_release_lock; |
| 655 | struct workqueue_struct *workq; | ||
| 655 | struct work_struct tid_release_task; | 656 | struct work_struct tid_release_task; |
| 656 | struct work_struct db_full_task; | 657 | struct work_struct db_full_task; |
| 657 | struct work_struct db_drop_task; | 658 | struct work_struct db_drop_task; |
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index 1afee70ce856..18fb9c61d7ba 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c | |||
| @@ -643,8 +643,6 @@ static int set_rxmode(struct net_device *dev, int mtu, bool sleep_ok) | |||
| 643 | return ret; | 643 | return ret; |
| 644 | } | 644 | } |
| 645 | 645 | ||
| 646 | static struct workqueue_struct *workq; | ||
| 647 | |||
| 648 | /** | 646 | /** |
| 649 | * link_start - enable a port | 647 | * link_start - enable a port |
| 650 | * @dev: the port to enable | 648 | * @dev: the port to enable |
| @@ -3340,7 +3338,7 @@ static void cxgb4_queue_tid_release(struct tid_info *t, unsigned int chan, | |||
| 3340 | adap->tid_release_head = (void **)((uintptr_t)p | chan); | 3338 | adap->tid_release_head = (void **)((uintptr_t)p | chan); |
| 3341 | if (!adap->tid_release_task_busy) { | 3339 | if (!adap->tid_release_task_busy) { |
| 3342 | adap->tid_release_task_busy = true; | 3340 | adap->tid_release_task_busy = true; |
| 3343 | queue_work(workq, &adap->tid_release_task); | 3341 | queue_work(adap->workq, &adap->tid_release_task); |
| 3344 | } | 3342 | } |
| 3345 | spin_unlock_bh(&adap->tid_release_lock); | 3343 | spin_unlock_bh(&adap->tid_release_lock); |
| 3346 | } | 3344 | } |
| @@ -4140,7 +4138,7 @@ void t4_db_full(struct adapter *adap) | |||
| 4140 | notify_rdma_uld(adap, CXGB4_CONTROL_DB_FULL); | 4138 | notify_rdma_uld(adap, CXGB4_CONTROL_DB_FULL); |
| 4141 | t4_set_reg_field(adap, SGE_INT_ENABLE3, | 4139 | t4_set_reg_field(adap, SGE_INT_ENABLE3, |
| 4142 | DBFIFO_HP_INT | DBFIFO_LP_INT, 0); | 4140 | DBFIFO_HP_INT | DBFIFO_LP_INT, 0); |
| 4143 | queue_work(workq, &adap->db_full_task); | 4141 | queue_work(adap->workq, &adap->db_full_task); |
| 4144 | } | 4142 | } |
| 4145 | } | 4143 | } |
| 4146 | 4144 | ||
| @@ -4150,7 +4148,7 @@ void t4_db_dropped(struct adapter *adap) | |||
| 4150 | disable_dbs(adap); | 4148 | disable_dbs(adap); |
| 4151 | notify_rdma_uld(adap, CXGB4_CONTROL_DB_FULL); | 4149 | notify_rdma_uld(adap, CXGB4_CONTROL_DB_FULL); |
| 4152 | } | 4150 | } |
| 4153 | queue_work(workq, &adap->db_drop_task); | 4151 | queue_work(adap->workq, &adap->db_drop_task); |
| 4154 | } | 4152 | } |
| 4155 | 4153 | ||
| 4156 | static void uld_attach(struct adapter *adap, unsigned int uld) | 4154 | static void uld_attach(struct adapter *adap, unsigned int uld) |
| @@ -6517,6 +6515,12 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
| 6517 | goto out_disable_device; | 6515 | goto out_disable_device; |
| 6518 | } | 6516 | } |
| 6519 | 6517 | ||
| 6518 | adapter->workq = create_singlethread_workqueue("cxgb4"); | ||
| 6519 | if (!adapter->workq) { | ||
| 6520 | err = -ENOMEM; | ||
| 6521 | goto out_free_adapter; | ||
| 6522 | } | ||
| 6523 | |||
| 6520 | /* PCI device has been enabled */ | 6524 | /* PCI device has been enabled */ |
| 6521 | adapter->flags |= DEV_ENABLED; | 6525 | adapter->flags |= DEV_ENABLED; |
| 6522 | 6526 | ||
| @@ -6715,6 +6719,9 @@ sriov: | |||
| 6715 | out_unmap_bar0: | 6719 | out_unmap_bar0: |
| 6716 | iounmap(adapter->regs); | 6720 | iounmap(adapter->regs); |
| 6717 | out_free_adapter: | 6721 | out_free_adapter: |
| 6722 | if (adapter->workq) | ||
| 6723 | destroy_workqueue(adapter->workq); | ||
| 6724 | |||
| 6718 | kfree(adapter); | 6725 | kfree(adapter); |
| 6719 | out_disable_device: | 6726 | out_disable_device: |
| 6720 | pci_disable_pcie_error_reporting(pdev); | 6727 | pci_disable_pcie_error_reporting(pdev); |
| @@ -6736,6 +6743,11 @@ static void remove_one(struct pci_dev *pdev) | |||
| 6736 | if (adapter) { | 6743 | if (adapter) { |
| 6737 | int i; | 6744 | int i; |
| 6738 | 6745 | ||
| 6746 | /* Tear down per-adapter Work Queue first since it can contain | ||
| 6747 | * references to our adapter data structure. | ||
| 6748 | */ | ||
| 6749 | destroy_workqueue(adapter->workq); | ||
| 6750 | |||
| 6739 | if (is_offload(adapter)) | 6751 | if (is_offload(adapter)) |
| 6740 | detach_ulds(adapter); | 6752 | detach_ulds(adapter); |
| 6741 | 6753 | ||
| @@ -6788,20 +6800,14 @@ static int __init cxgb4_init_module(void) | |||
| 6788 | { | 6800 | { |
| 6789 | int ret; | 6801 | int ret; |
| 6790 | 6802 | ||
| 6791 | workq = create_singlethread_workqueue("cxgb4"); | ||
| 6792 | if (!workq) | ||
| 6793 | return -ENOMEM; | ||
| 6794 | |||
| 6795 | /* Debugfs support is optional, just warn if this fails */ | 6803 | /* Debugfs support is optional, just warn if this fails */ |
| 6796 | cxgb4_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL); | 6804 | cxgb4_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL); |
| 6797 | if (!cxgb4_debugfs_root) | 6805 | if (!cxgb4_debugfs_root) |
| 6798 | pr_warn("could not create debugfs entry, continuing\n"); | 6806 | pr_warn("could not create debugfs entry, continuing\n"); |
| 6799 | 6807 | ||
| 6800 | ret = pci_register_driver(&cxgb4_driver); | 6808 | ret = pci_register_driver(&cxgb4_driver); |
| 6801 | if (ret < 0) { | 6809 | if (ret < 0) |
| 6802 | debugfs_remove(cxgb4_debugfs_root); | 6810 | debugfs_remove(cxgb4_debugfs_root); |
| 6803 | destroy_workqueue(workq); | ||
| 6804 | } | ||
| 6805 | 6811 | ||
| 6806 | register_inet6addr_notifier(&cxgb4_inet6addr_notifier); | 6812 | register_inet6addr_notifier(&cxgb4_inet6addr_notifier); |
| 6807 | 6813 | ||
| @@ -6813,8 +6819,6 @@ static void __exit cxgb4_cleanup_module(void) | |||
| 6813 | unregister_inet6addr_notifier(&cxgb4_inet6addr_notifier); | 6819 | unregister_inet6addr_notifier(&cxgb4_inet6addr_notifier); |
| 6814 | pci_unregister_driver(&cxgb4_driver); | 6820 | pci_unregister_driver(&cxgb4_driver); |
| 6815 | debugfs_remove(cxgb4_debugfs_root); /* NULL ok */ | 6821 | debugfs_remove(cxgb4_debugfs_root); /* NULL ok */ |
| 6816 | flush_workqueue(workq); | ||
| 6817 | destroy_workqueue(workq); | ||
| 6818 | } | 6822 | } |
| 6819 | 6823 | ||
| 6820 | module_init(cxgb4_init_module); | 6824 | module_init(cxgb4_init_module); |
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c index b0bba32d69d5..d22d728d4e5c 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c | |||
| @@ -2303,7 +2303,8 @@ int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq, | |||
| 2303 | FW_EQ_ETH_CMD_PFN(adap->fn) | FW_EQ_ETH_CMD_VFN(0)); | 2303 | FW_EQ_ETH_CMD_PFN(adap->fn) | FW_EQ_ETH_CMD_VFN(0)); |
| 2304 | c.alloc_to_len16 = htonl(FW_EQ_ETH_CMD_ALLOC | | 2304 | c.alloc_to_len16 = htonl(FW_EQ_ETH_CMD_ALLOC | |
| 2305 | FW_EQ_ETH_CMD_EQSTART | FW_LEN16(c)); | 2305 | FW_EQ_ETH_CMD_EQSTART | FW_LEN16(c)); |
| 2306 | c.viid_pkd = htonl(FW_EQ_ETH_CMD_VIID(pi->viid)); | 2306 | c.viid_pkd = htonl(FW_EQ_ETH_CMD_AUTOEQUEQE | |
| 2307 | FW_EQ_ETH_CMD_VIID(pi->viid)); | ||
| 2307 | c.fetchszm_to_iqid = htonl(FW_EQ_ETH_CMD_HOSTFCMODE(2) | | 2308 | c.fetchszm_to_iqid = htonl(FW_EQ_ETH_CMD_HOSTFCMODE(2) | |
| 2308 | FW_EQ_ETH_CMD_PCIECHN(pi->tx_chan) | | 2309 | FW_EQ_ETH_CMD_PCIECHN(pi->tx_chan) | |
| 2309 | FW_EQ_ETH_CMD_FETCHRO(1) | | 2310 | FW_EQ_ETH_CMD_FETCHRO(1) | |
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h index 0549170d7e2e..5f2729ebadbe 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h | |||
| @@ -1227,6 +1227,7 @@ struct fw_eq_eth_cmd { | |||
| 1227 | #define FW_EQ_ETH_CMD_CIDXFTHRESH(x) ((x) << 16) | 1227 | #define FW_EQ_ETH_CMD_CIDXFTHRESH(x) ((x) << 16) |
| 1228 | #define FW_EQ_ETH_CMD_EQSIZE(x) ((x) << 0) | 1228 | #define FW_EQ_ETH_CMD_EQSIZE(x) ((x) << 0) |
| 1229 | 1229 | ||
| 1230 | #define FW_EQ_ETH_CMD_AUTOEQUEQE (1U << 30) | ||
| 1230 | #define FW_EQ_ETH_CMD_VIID(x) ((x) << 16) | 1231 | #define FW_EQ_ETH_CMD_VIID(x) ((x) << 16) |
| 1231 | 1232 | ||
| 1232 | struct fw_eq_ctrl_cmd { | 1233 | struct fw_eq_ctrl_cmd { |
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c index bdfa80ca5e31..a5fb9493dee8 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c | |||
| @@ -2250,7 +2250,8 @@ int t4vf_sge_alloc_eth_txq(struct adapter *adapter, struct sge_eth_txq *txq, | |||
| 2250 | cmd.alloc_to_len16 = cpu_to_be32(FW_EQ_ETH_CMD_ALLOC | | 2250 | cmd.alloc_to_len16 = cpu_to_be32(FW_EQ_ETH_CMD_ALLOC | |
| 2251 | FW_EQ_ETH_CMD_EQSTART | | 2251 | FW_EQ_ETH_CMD_EQSTART | |
| 2252 | FW_LEN16(cmd)); | 2252 | FW_LEN16(cmd)); |
| 2253 | cmd.viid_pkd = cpu_to_be32(FW_EQ_ETH_CMD_VIID(pi->viid)); | 2253 | cmd.viid_pkd = cpu_to_be32(FW_EQ_ETH_CMD_AUTOEQUEQE | |
| 2254 | FW_EQ_ETH_CMD_VIID(pi->viid)); | ||
| 2254 | cmd.fetchszm_to_iqid = | 2255 | cmd.fetchszm_to_iqid = |
| 2255 | cpu_to_be32(FW_EQ_ETH_CMD_HOSTFCMODE(SGE_HOSTFCMODE_STPG) | | 2256 | cpu_to_be32(FW_EQ_ETH_CMD_HOSTFCMODE(SGE_HOSTFCMODE_STPG) | |
| 2256 | FW_EQ_ETH_CMD_PCIECHN(pi->port_id) | | 2257 | FW_EQ_ETH_CMD_PCIECHN(pi->port_id) | |
diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h index 9f7fa644a397..ee41d98b44b6 100644 --- a/drivers/net/ethernet/freescale/fec.h +++ b/drivers/net/ethernet/freescale/fec.h | |||
| @@ -275,6 +275,9 @@ struct fec_enet_private { | |||
| 275 | struct clk *clk_enet_out; | 275 | struct clk *clk_enet_out; |
| 276 | struct clk *clk_ptp; | 276 | struct clk *clk_ptp; |
| 277 | 277 | ||
| 278 | bool ptp_clk_on; | ||
| 279 | struct mutex ptp_clk_mutex; | ||
| 280 | |||
| 278 | /* The saved address of a sent-in-place packet/buffer, for skfree(). */ | 281 | /* The saved address of a sent-in-place packet/buffer, for skfree(). */ |
| 279 | unsigned char *tx_bounce[TX_RING_SIZE]; | 282 | unsigned char *tx_bounce[TX_RING_SIZE]; |
| 280 | struct sk_buff *tx_skbuff[TX_RING_SIZE]; | 283 | struct sk_buff *tx_skbuff[TX_RING_SIZE]; |
| @@ -335,7 +338,7 @@ struct fec_enet_private { | |||
| 335 | u32 cycle_speed; | 338 | u32 cycle_speed; |
| 336 | int hwts_rx_en; | 339 | int hwts_rx_en; |
| 337 | int hwts_tx_en; | 340 | int hwts_tx_en; |
| 338 | struct timer_list time_keep; | 341 | struct delayed_work time_keep; |
| 339 | struct regulator *reg_phy; | 342 | struct regulator *reg_phy; |
| 340 | }; | 343 | }; |
| 341 | 344 | ||
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index 4f87dffcb9b2..89355a719625 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c | |||
| @@ -1611,17 +1611,27 @@ static int fec_enet_clk_enable(struct net_device *ndev, bool enable) | |||
| 1611 | goto failed_clk_enet_out; | 1611 | goto failed_clk_enet_out; |
| 1612 | } | 1612 | } |
| 1613 | if (fep->clk_ptp) { | 1613 | if (fep->clk_ptp) { |
| 1614 | mutex_lock(&fep->ptp_clk_mutex); | ||
| 1614 | ret = clk_prepare_enable(fep->clk_ptp); | 1615 | ret = clk_prepare_enable(fep->clk_ptp); |
| 1615 | if (ret) | 1616 | if (ret) { |
| 1617 | mutex_unlock(&fep->ptp_clk_mutex); | ||
| 1616 | goto failed_clk_ptp; | 1618 | goto failed_clk_ptp; |
| 1619 | } else { | ||
| 1620 | fep->ptp_clk_on = true; | ||
| 1621 | } | ||
| 1622 | mutex_unlock(&fep->ptp_clk_mutex); | ||
| 1617 | } | 1623 | } |
| 1618 | } else { | 1624 | } else { |
| 1619 | clk_disable_unprepare(fep->clk_ahb); | 1625 | clk_disable_unprepare(fep->clk_ahb); |
| 1620 | clk_disable_unprepare(fep->clk_ipg); | 1626 | clk_disable_unprepare(fep->clk_ipg); |
| 1621 | if (fep->clk_enet_out) | 1627 | if (fep->clk_enet_out) |
| 1622 | clk_disable_unprepare(fep->clk_enet_out); | 1628 | clk_disable_unprepare(fep->clk_enet_out); |
| 1623 | if (fep->clk_ptp) | 1629 | if (fep->clk_ptp) { |
| 1630 | mutex_lock(&fep->ptp_clk_mutex); | ||
| 1624 | clk_disable_unprepare(fep->clk_ptp); | 1631 | clk_disable_unprepare(fep->clk_ptp); |
| 1632 | fep->ptp_clk_on = false; | ||
| 1633 | mutex_unlock(&fep->ptp_clk_mutex); | ||
| 1634 | } | ||
| 1625 | } | 1635 | } |
| 1626 | 1636 | ||
| 1627 | return 0; | 1637 | return 0; |
| @@ -2625,6 +2635,8 @@ fec_probe(struct platform_device *pdev) | |||
| 2625 | if (IS_ERR(fep->clk_enet_out)) | 2635 | if (IS_ERR(fep->clk_enet_out)) |
| 2626 | fep->clk_enet_out = NULL; | 2636 | fep->clk_enet_out = NULL; |
| 2627 | 2637 | ||
| 2638 | fep->ptp_clk_on = false; | ||
| 2639 | mutex_init(&fep->ptp_clk_mutex); | ||
| 2628 | fep->clk_ptp = devm_clk_get(&pdev->dev, "ptp"); | 2640 | fep->clk_ptp = devm_clk_get(&pdev->dev, "ptp"); |
| 2629 | fep->bufdesc_ex = | 2641 | fep->bufdesc_ex = |
| 2630 | pdev->id_entry->driver_data & FEC_QUIRK_HAS_BUFDESC_EX; | 2642 | pdev->id_entry->driver_data & FEC_QUIRK_HAS_BUFDESC_EX; |
| @@ -2715,10 +2727,10 @@ fec_drv_remove(struct platform_device *pdev) | |||
| 2715 | struct net_device *ndev = platform_get_drvdata(pdev); | 2727 | struct net_device *ndev = platform_get_drvdata(pdev); |
| 2716 | struct fec_enet_private *fep = netdev_priv(ndev); | 2728 | struct fec_enet_private *fep = netdev_priv(ndev); |
| 2717 | 2729 | ||
| 2730 | cancel_delayed_work_sync(&fep->time_keep); | ||
| 2718 | cancel_work_sync(&fep->tx_timeout_work); | 2731 | cancel_work_sync(&fep->tx_timeout_work); |
| 2719 | unregister_netdev(ndev); | 2732 | unregister_netdev(ndev); |
| 2720 | fec_enet_mii_remove(fep); | 2733 | fec_enet_mii_remove(fep); |
| 2721 | del_timer_sync(&fep->time_keep); | ||
| 2722 | if (fep->reg_phy) | 2734 | if (fep->reg_phy) |
| 2723 | regulator_disable(fep->reg_phy); | 2735 | regulator_disable(fep->reg_phy); |
| 2724 | if (fep->ptp_clock) | 2736 | if (fep->ptp_clock) |
diff --git a/drivers/net/ethernet/freescale/fec_ptp.c b/drivers/net/ethernet/freescale/fec_ptp.c index 82386b29914a..cca3617a2321 100644 --- a/drivers/net/ethernet/freescale/fec_ptp.c +++ b/drivers/net/ethernet/freescale/fec_ptp.c | |||
| @@ -245,12 +245,20 @@ static int fec_ptp_settime(struct ptp_clock_info *ptp, | |||
| 245 | u64 ns; | 245 | u64 ns; |
| 246 | unsigned long flags; | 246 | unsigned long flags; |
| 247 | 247 | ||
| 248 | mutex_lock(&fep->ptp_clk_mutex); | ||
| 249 | /* Check the ptp clock */ | ||
| 250 | if (!fep->ptp_clk_on) { | ||
| 251 | mutex_unlock(&fep->ptp_clk_mutex); | ||
| 252 | return -EINVAL; | ||
| 253 | } | ||
| 254 | |||
| 248 | ns = ts->tv_sec * 1000000000ULL; | 255 | ns = ts->tv_sec * 1000000000ULL; |
| 249 | ns += ts->tv_nsec; | 256 | ns += ts->tv_nsec; |
| 250 | 257 | ||
| 251 | spin_lock_irqsave(&fep->tmreg_lock, flags); | 258 | spin_lock_irqsave(&fep->tmreg_lock, flags); |
| 252 | timecounter_init(&fep->tc, &fep->cc, ns); | 259 | timecounter_init(&fep->tc, &fep->cc, ns); |
| 253 | spin_unlock_irqrestore(&fep->tmreg_lock, flags); | 260 | spin_unlock_irqrestore(&fep->tmreg_lock, flags); |
| 261 | mutex_unlock(&fep->ptp_clk_mutex); | ||
| 254 | return 0; | 262 | return 0; |
| 255 | } | 263 | } |
| 256 | 264 | ||
| @@ -338,17 +346,22 @@ int fec_ptp_get(struct net_device *ndev, struct ifreq *ifr) | |||
| 338 | * fec_time_keep - call timecounter_read every second to avoid timer overrun | 346 | * fec_time_keep - call timecounter_read every second to avoid timer overrun |
| 339 | * because ENET just support 32bit counter, will timeout in 4s | 347 | * because ENET just support 32bit counter, will timeout in 4s |
| 340 | */ | 348 | */ |
| 341 | static void fec_time_keep(unsigned long _data) | 349 | static void fec_time_keep(struct work_struct *work) |
| 342 | { | 350 | { |
| 343 | struct fec_enet_private *fep = (struct fec_enet_private *)_data; | 351 | struct delayed_work *dwork = to_delayed_work(work); |
| 352 | struct fec_enet_private *fep = container_of(dwork, struct fec_enet_private, time_keep); | ||
| 344 | u64 ns; | 353 | u64 ns; |
| 345 | unsigned long flags; | 354 | unsigned long flags; |
| 346 | 355 | ||
| 347 | spin_lock_irqsave(&fep->tmreg_lock, flags); | 356 | mutex_lock(&fep->ptp_clk_mutex); |
| 348 | ns = timecounter_read(&fep->tc); | 357 | if (fep->ptp_clk_on) { |
| 349 | spin_unlock_irqrestore(&fep->tmreg_lock, flags); | 358 | spin_lock_irqsave(&fep->tmreg_lock, flags); |
| 359 | ns = timecounter_read(&fep->tc); | ||
| 360 | spin_unlock_irqrestore(&fep->tmreg_lock, flags); | ||
| 361 | } | ||
| 362 | mutex_unlock(&fep->ptp_clk_mutex); | ||
| 350 | 363 | ||
| 351 | mod_timer(&fep->time_keep, jiffies + HZ); | 364 | schedule_delayed_work(&fep->time_keep, HZ); |
| 352 | } | 365 | } |
| 353 | 366 | ||
| 354 | /** | 367 | /** |
| @@ -386,15 +399,13 @@ void fec_ptp_init(struct platform_device *pdev) | |||
| 386 | 399 | ||
| 387 | fec_ptp_start_cyclecounter(ndev); | 400 | fec_ptp_start_cyclecounter(ndev); |
| 388 | 401 | ||
| 389 | init_timer(&fep->time_keep); | 402 | INIT_DELAYED_WORK(&fep->time_keep, fec_time_keep); |
| 390 | fep->time_keep.data = (unsigned long)fep; | ||
| 391 | fep->time_keep.function = fec_time_keep; | ||
| 392 | fep->time_keep.expires = jiffies + HZ; | ||
| 393 | add_timer(&fep->time_keep); | ||
| 394 | 403 | ||
| 395 | fep->ptp_clock = ptp_clock_register(&fep->ptp_caps, &pdev->dev); | 404 | fep->ptp_clock = ptp_clock_register(&fep->ptp_caps, &pdev->dev); |
| 396 | if (IS_ERR(fep->ptp_clock)) { | 405 | if (IS_ERR(fep->ptp_clock)) { |
| 397 | fep->ptp_clock = NULL; | 406 | fep->ptp_clock = NULL; |
| 398 | pr_err("ptp_clock_register failed\n"); | 407 | pr_err("ptp_clock_register failed\n"); |
| 399 | } | 408 | } |
| 409 | |||
| 410 | schedule_delayed_work(&fep->time_keep, HZ); | ||
| 400 | } | 411 | } |
diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c index c9127562bd22..21978cc019e7 100644 --- a/drivers/net/ethernet/ibm/ibmveth.c +++ b/drivers/net/ethernet/ibm/ibmveth.c | |||
| @@ -292,6 +292,18 @@ failure: | |||
| 292 | atomic_add(buffers_added, &(pool->available)); | 292 | atomic_add(buffers_added, &(pool->available)); |
| 293 | } | 293 | } |
| 294 | 294 | ||
| 295 | /* | ||
| 296 | * The final 8 bytes of the buffer list is a counter of frames dropped | ||
| 297 | * because there was not a buffer in the buffer list capable of holding | ||
| 298 | * the frame. | ||
| 299 | */ | ||
| 300 | static void ibmveth_update_rx_no_buffer(struct ibmveth_adapter *adapter) | ||
| 301 | { | ||
| 302 | __be64 *p = adapter->buffer_list_addr + 4096 - 8; | ||
| 303 | |||
| 304 | adapter->rx_no_buffer = be64_to_cpup(p); | ||
| 305 | } | ||
| 306 | |||
| 295 | /* replenish routine */ | 307 | /* replenish routine */ |
| 296 | static void ibmveth_replenish_task(struct ibmveth_adapter *adapter) | 308 | static void ibmveth_replenish_task(struct ibmveth_adapter *adapter) |
| 297 | { | 309 | { |
| @@ -307,8 +319,7 @@ static void ibmveth_replenish_task(struct ibmveth_adapter *adapter) | |||
| 307 | ibmveth_replenish_buffer_pool(adapter, pool); | 319 | ibmveth_replenish_buffer_pool(adapter, pool); |
| 308 | } | 320 | } |
| 309 | 321 | ||
| 310 | adapter->rx_no_buffer = *(u64 *)(((char*)adapter->buffer_list_addr) + | 322 | ibmveth_update_rx_no_buffer(adapter); |
| 311 | 4096 - 8); | ||
| 312 | } | 323 | } |
| 313 | 324 | ||
| 314 | /* empty and free ana buffer pool - also used to do cleanup in error paths */ | 325 | /* empty and free ana buffer pool - also used to do cleanup in error paths */ |
| @@ -698,8 +709,7 @@ static int ibmveth_close(struct net_device *netdev) | |||
| 698 | 709 | ||
| 699 | free_irq(netdev->irq, netdev); | 710 | free_irq(netdev->irq, netdev); |
| 700 | 711 | ||
| 701 | adapter->rx_no_buffer = *(u64 *)(((char *)adapter->buffer_list_addr) + | 712 | ibmveth_update_rx_no_buffer(adapter); |
| 702 | 4096 - 8); | ||
| 703 | 713 | ||
| 704 | ibmveth_cleanup(adapter); | 714 | ibmveth_cleanup(adapter); |
| 705 | 715 | ||
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c index bb7fe98b3a6c..537b6216971d 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c | |||
| @@ -247,7 +247,7 @@ void i40e_ptp_rx_hang(struct i40e_vsi *vsi) | |||
| 247 | u32 prttsyn_stat; | 247 | u32 prttsyn_stat; |
| 248 | int n; | 248 | int n; |
| 249 | 249 | ||
| 250 | if (pf->flags & I40E_FLAG_PTP) | 250 | if (!(pf->flags & I40E_FLAG_PTP)) |
| 251 | return; | 251 | return; |
| 252 | 252 | ||
| 253 | prttsyn_stat = rd32(hw, I40E_PRTTSYN_STAT_1); | 253 | prttsyn_stat = rd32(hw, I40E_PRTTSYN_STAT_1); |
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index 89672551dce9..3ac6a0d2f143 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c | |||
| @@ -1003,11 +1003,19 @@ int i40e_pci_sriov_configure(struct pci_dev *pdev, int num_vfs) | |||
| 1003 | static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode, | 1003 | static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode, |
| 1004 | u32 v_retval, u8 *msg, u16 msglen) | 1004 | u32 v_retval, u8 *msg, u16 msglen) |
| 1005 | { | 1005 | { |
| 1006 | struct i40e_pf *pf = vf->pf; | 1006 | struct i40e_pf *pf; |
| 1007 | struct i40e_hw *hw = &pf->hw; | 1007 | struct i40e_hw *hw; |
| 1008 | int abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id; | 1008 | int abs_vf_id; |
| 1009 | i40e_status aq_ret; | 1009 | i40e_status aq_ret; |
| 1010 | 1010 | ||
| 1011 | /* validate the request */ | ||
| 1012 | if (!vf || vf->vf_id >= vf->pf->num_alloc_vfs) | ||
| 1013 | return -EINVAL; | ||
| 1014 | |||
| 1015 | pf = vf->pf; | ||
| 1016 | hw = &pf->hw; | ||
| 1017 | abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id; | ||
| 1018 | |||
| 1011 | /* single place to detect unsuccessful return values */ | 1019 | /* single place to detect unsuccessful return values */ |
| 1012 | if (v_retval) { | 1020 | if (v_retval) { |
| 1013 | vf->num_invalid_msgs++; | 1021 | vf->num_invalid_msgs++; |
| @@ -1928,17 +1936,20 @@ static void i40e_vc_vf_broadcast(struct i40e_pf *pf, | |||
| 1928 | { | 1936 | { |
| 1929 | struct i40e_hw *hw = &pf->hw; | 1937 | struct i40e_hw *hw = &pf->hw; |
| 1930 | struct i40e_vf *vf = pf->vf; | 1938 | struct i40e_vf *vf = pf->vf; |
| 1931 | int abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id; | ||
| 1932 | int i; | 1939 | int i; |
| 1933 | 1940 | ||
| 1934 | for (i = 0; i < pf->num_alloc_vfs; i++) { | 1941 | for (i = 0; i < pf->num_alloc_vfs; i++, vf++) { |
| 1942 | int abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id; | ||
| 1943 | /* Not all vfs are enabled so skip the ones that are not */ | ||
| 1944 | if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states) && | ||
| 1945 | !test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) | ||
| 1946 | continue; | ||
| 1947 | |||
| 1935 | /* Ignore return value on purpose - a given VF may fail, but | 1948 | /* Ignore return value on purpose - a given VF may fail, but |
| 1936 | * we need to keep going and send to all of them | 1949 | * we need to keep going and send to all of them |
| 1937 | */ | 1950 | */ |
| 1938 | i40e_aq_send_msg_to_vf(hw, abs_vf_id, v_opcode, v_retval, | 1951 | i40e_aq_send_msg_to_vf(hw, abs_vf_id, v_opcode, v_retval, |
| 1939 | msg, msglen, NULL); | 1952 | msg, msglen, NULL); |
| 1940 | vf++; | ||
| 1941 | abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id; | ||
| 1942 | } | 1953 | } |
| 1943 | } | 1954 | } |
| 1944 | 1955 | ||
| @@ -1954,12 +1965,12 @@ void i40e_vc_notify_link_state(struct i40e_pf *pf) | |||
| 1954 | struct i40e_hw *hw = &pf->hw; | 1965 | struct i40e_hw *hw = &pf->hw; |
| 1955 | struct i40e_vf *vf = pf->vf; | 1966 | struct i40e_vf *vf = pf->vf; |
| 1956 | struct i40e_link_status *ls = &pf->hw.phy.link_info; | 1967 | struct i40e_link_status *ls = &pf->hw.phy.link_info; |
| 1957 | int abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id; | ||
| 1958 | int i; | 1968 | int i; |
| 1959 | 1969 | ||
| 1960 | pfe.event = I40E_VIRTCHNL_EVENT_LINK_CHANGE; | 1970 | pfe.event = I40E_VIRTCHNL_EVENT_LINK_CHANGE; |
| 1961 | pfe.severity = I40E_PF_EVENT_SEVERITY_INFO; | 1971 | pfe.severity = I40E_PF_EVENT_SEVERITY_INFO; |
| 1962 | for (i = 0; i < pf->num_alloc_vfs; i++) { | 1972 | for (i = 0; i < pf->num_alloc_vfs; i++, vf++) { |
| 1973 | int abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id; | ||
| 1963 | if (vf->link_forced) { | 1974 | if (vf->link_forced) { |
| 1964 | pfe.event_data.link_event.link_status = vf->link_up; | 1975 | pfe.event_data.link_event.link_status = vf->link_up; |
| 1965 | pfe.event_data.link_event.link_speed = | 1976 | pfe.event_data.link_event.link_speed = |
| @@ -1972,8 +1983,6 @@ void i40e_vc_notify_link_state(struct i40e_pf *pf) | |||
| 1972 | i40e_aq_send_msg_to_vf(hw, abs_vf_id, I40E_VIRTCHNL_OP_EVENT, | 1983 | i40e_aq_send_msg_to_vf(hw, abs_vf_id, I40E_VIRTCHNL_OP_EVENT, |
| 1973 | 0, (u8 *)&pfe, sizeof(pfe), | 1984 | 0, (u8 *)&pfe, sizeof(pfe), |
| 1974 | NULL); | 1985 | NULL); |
| 1975 | vf++; | ||
| 1976 | abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id; | ||
| 1977 | } | 1986 | } |
| 1978 | } | 1987 | } |
| 1979 | 1988 | ||
| @@ -2002,7 +2011,18 @@ void i40e_vc_notify_reset(struct i40e_pf *pf) | |||
| 2002 | void i40e_vc_notify_vf_reset(struct i40e_vf *vf) | 2011 | void i40e_vc_notify_vf_reset(struct i40e_vf *vf) |
| 2003 | { | 2012 | { |
| 2004 | struct i40e_virtchnl_pf_event pfe; | 2013 | struct i40e_virtchnl_pf_event pfe; |
| 2005 | int abs_vf_id = vf->vf_id + vf->pf->hw.func_caps.vf_base_id; | 2014 | int abs_vf_id; |
| 2015 | |||
| 2016 | /* validate the request */ | ||
| 2017 | if (!vf || vf->vf_id >= vf->pf->num_alloc_vfs) | ||
| 2018 | return; | ||
| 2019 | |||
| 2020 | /* verify if the VF is in either init or active before proceeding */ | ||
| 2021 | if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states) && | ||
| 2022 | !test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) | ||
| 2023 | return; | ||
| 2024 | |||
| 2025 | abs_vf_id = vf->vf_id + vf->pf->hw.func_caps.vf_base_id; | ||
| 2006 | 2026 | ||
| 2007 | pfe.event = I40E_VIRTCHNL_EVENT_RESET_IMPENDING; | 2027 | pfe.event = I40E_VIRTCHNL_EVENT_RESET_IMPENDING; |
| 2008 | pfe.severity = I40E_PF_EVENT_SEVERITY_CERTAIN_DOOM; | 2028 | pfe.severity = I40E_PF_EVENT_SEVERITY_CERTAIN_DOOM; |
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h index 16039d1497b8..b84f5ea3d659 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h | |||
| @@ -268,7 +268,7 @@ struct qlcnic_fdt { | |||
| 268 | u16 cksum; | 268 | u16 cksum; |
| 269 | u16 unused; | 269 | u16 unused; |
| 270 | u8 model[16]; | 270 | u8 model[16]; |
| 271 | u16 mfg_id; | 271 | u8 mfg_id; |
| 272 | u16 id; | 272 | u16 id; |
| 273 | u8 flag; | 273 | u8 flag; |
| 274 | u8 erase_cmd; | 274 | u8 erase_cmd; |
| @@ -2362,6 +2362,19 @@ static inline u32 qlcnic_get_vnic_func_count(struct qlcnic_adapter *adapter) | |||
| 2362 | return QLC_DEFAULT_VNIC_COUNT; | 2362 | return QLC_DEFAULT_VNIC_COUNT; |
| 2363 | } | 2363 | } |
| 2364 | 2364 | ||
| 2365 | static inline void qlcnic_swap32_buffer(u32 *buffer, int count) | ||
| 2366 | { | ||
| 2367 | #if defined(__BIG_ENDIAN) | ||
| 2368 | u32 *tmp = buffer; | ||
| 2369 | int i; | ||
| 2370 | |||
| 2371 | for (i = 0; i < count; i++) { | ||
| 2372 | *tmp = swab32(*tmp); | ||
| 2373 | tmp++; | ||
| 2374 | } | ||
| 2375 | #endif | ||
| 2376 | } | ||
| 2377 | |||
| 2365 | #ifdef CONFIG_QLCNIC_HWMON | 2378 | #ifdef CONFIG_QLCNIC_HWMON |
| 2366 | void qlcnic_register_hwmon_dev(struct qlcnic_adapter *); | 2379 | void qlcnic_register_hwmon_dev(struct qlcnic_adapter *); |
| 2367 | void qlcnic_unregister_hwmon_dev(struct qlcnic_adapter *); | 2380 | void qlcnic_unregister_hwmon_dev(struct qlcnic_adapter *); |
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c index a4a4ec0b68f8..476e4998ef99 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c | |||
| @@ -2603,7 +2603,7 @@ int qlcnic_83xx_lockless_flash_read32(struct qlcnic_adapter *adapter, | |||
| 2603 | } | 2603 | } |
| 2604 | 2604 | ||
| 2605 | qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_DIRECT_WINDOW, | 2605 | qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_DIRECT_WINDOW, |
| 2606 | (addr)); | 2606 | (addr & 0xFFFF0000)); |
| 2607 | 2607 | ||
| 2608 | range = flash_offset + (count * sizeof(u32)); | 2608 | range = flash_offset + (count * sizeof(u32)); |
| 2609 | /* Check if data is spread across multiple sectors */ | 2609 | /* Check if data is spread across multiple sectors */ |
| @@ -2753,7 +2753,7 @@ int qlcnic_83xx_read_flash_descriptor_table(struct qlcnic_adapter *adapter) | |||
| 2753 | ret = qlcnic_83xx_lockless_flash_read32(adapter, QLCNIC_FDT_LOCATION, | 2753 | ret = qlcnic_83xx_lockless_flash_read32(adapter, QLCNIC_FDT_LOCATION, |
| 2754 | (u8 *)&adapter->ahw->fdt, | 2754 | (u8 *)&adapter->ahw->fdt, |
| 2755 | count); | 2755 | count); |
| 2756 | 2756 | qlcnic_swap32_buffer((u32 *)&adapter->ahw->fdt, count); | |
| 2757 | qlcnic_83xx_unlock_flash(adapter); | 2757 | qlcnic_83xx_unlock_flash(adapter); |
| 2758 | return ret; | 2758 | return ret; |
| 2759 | } | 2759 | } |
| @@ -2788,7 +2788,7 @@ int qlcnic_83xx_erase_flash_sector(struct qlcnic_adapter *adapter, | |||
| 2788 | 2788 | ||
| 2789 | addr1 = (sector_start_addr & 0xFF) << 16; | 2789 | addr1 = (sector_start_addr & 0xFF) << 16; |
| 2790 | addr2 = (sector_start_addr & 0xFF0000) >> 16; | 2790 | addr2 = (sector_start_addr & 0xFF0000) >> 16; |
| 2791 | reversed_addr = addr1 | addr2; | 2791 | reversed_addr = addr1 | addr2 | (sector_start_addr & 0xFF00); |
| 2792 | 2792 | ||
| 2793 | qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_WRDATA, | 2793 | qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_WRDATA, |
| 2794 | reversed_addr); | 2794 | reversed_addr); |
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c index f33559b72528..86783e1afcf7 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c | |||
| @@ -1378,31 +1378,45 @@ static int qlcnic_83xx_copy_fw_file(struct qlcnic_adapter *adapter) | |||
| 1378 | { | 1378 | { |
| 1379 | struct qlc_83xx_fw_info *fw_info = adapter->ahw->fw_info; | 1379 | struct qlc_83xx_fw_info *fw_info = adapter->ahw->fw_info; |
| 1380 | const struct firmware *fw = fw_info->fw; | 1380 | const struct firmware *fw = fw_info->fw; |
| 1381 | u32 dest, *p_cache; | 1381 | u32 dest, *p_cache, *temp; |
| 1382 | int i, ret = -EIO; | 1382 | int i, ret = -EIO; |
| 1383 | __le32 *temp_le; | ||
| 1383 | u8 data[16]; | 1384 | u8 data[16]; |
| 1384 | size_t size; | 1385 | size_t size; |
| 1385 | u64 addr; | 1386 | u64 addr; |
| 1386 | 1387 | ||
| 1388 | temp = kzalloc(fw->size, GFP_KERNEL); | ||
| 1389 | if (!temp) { | ||
| 1390 | release_firmware(fw); | ||
| 1391 | fw_info->fw = NULL; | ||
| 1392 | return -ENOMEM; | ||
| 1393 | } | ||
| 1394 | |||
| 1395 | temp_le = (__le32 *)fw->data; | ||
| 1396 | |||
| 1397 | /* FW image in file is in little endian, swap the data to nullify | ||
| 1398 | * the effect of writel() operation on big endian platform. | ||
| 1399 | */ | ||
| 1400 | for (i = 0; i < fw->size / sizeof(u32); i++) | ||
| 1401 | temp[i] = __le32_to_cpu(temp_le[i]); | ||
| 1402 | |||
| 1387 | dest = QLCRDX(adapter->ahw, QLCNIC_FW_IMAGE_ADDR); | 1403 | dest = QLCRDX(adapter->ahw, QLCNIC_FW_IMAGE_ADDR); |
| 1388 | size = (fw->size & ~0xF); | 1404 | size = (fw->size & ~0xF); |
| 1389 | p_cache = (u32 *)fw->data; | 1405 | p_cache = temp; |
| 1390 | addr = (u64)dest; | 1406 | addr = (u64)dest; |
| 1391 | 1407 | ||
| 1392 | ret = qlcnic_ms_mem_write128(adapter, addr, | 1408 | ret = qlcnic_ms_mem_write128(adapter, addr, |
| 1393 | p_cache, size / 16); | 1409 | p_cache, size / 16); |
| 1394 | if (ret) { | 1410 | if (ret) { |
| 1395 | dev_err(&adapter->pdev->dev, "MS memory write failed\n"); | 1411 | dev_err(&adapter->pdev->dev, "MS memory write failed\n"); |
| 1396 | release_firmware(fw); | 1412 | goto exit; |
| 1397 | fw_info->fw = NULL; | ||
| 1398 | return -EIO; | ||
| 1399 | } | 1413 | } |
| 1400 | 1414 | ||
| 1401 | /* alignment check */ | 1415 | /* alignment check */ |
| 1402 | if (fw->size & 0xF) { | 1416 | if (fw->size & 0xF) { |
| 1403 | addr = dest + size; | 1417 | addr = dest + size; |
| 1404 | for (i = 0; i < (fw->size & 0xF); i++) | 1418 | for (i = 0; i < (fw->size & 0xF); i++) |
| 1405 | data[i] = fw->data[size + i]; | 1419 | data[i] = temp[size + i]; |
| 1406 | for (; i < 16; i++) | 1420 | for (; i < 16; i++) |
| 1407 | data[i] = 0; | 1421 | data[i] = 0; |
| 1408 | ret = qlcnic_ms_mem_write128(adapter, addr, | 1422 | ret = qlcnic_ms_mem_write128(adapter, addr, |
| @@ -1410,15 +1424,16 @@ static int qlcnic_83xx_copy_fw_file(struct qlcnic_adapter *adapter) | |||
| 1410 | if (ret) { | 1424 | if (ret) { |
| 1411 | dev_err(&adapter->pdev->dev, | 1425 | dev_err(&adapter->pdev->dev, |
| 1412 | "MS memory write failed\n"); | 1426 | "MS memory write failed\n"); |
| 1413 | release_firmware(fw); | 1427 | goto exit; |
| 1414 | fw_info->fw = NULL; | ||
| 1415 | return -EIO; | ||
| 1416 | } | 1428 | } |
| 1417 | } | 1429 | } |
| 1430 | |||
| 1431 | exit: | ||
| 1418 | release_firmware(fw); | 1432 | release_firmware(fw); |
| 1419 | fw_info->fw = NULL; | 1433 | fw_info->fw = NULL; |
| 1434 | kfree(temp); | ||
| 1420 | 1435 | ||
| 1421 | return 0; | 1436 | return ret; |
| 1422 | } | 1437 | } |
| 1423 | 1438 | ||
| 1424 | static void qlcnic_83xx_dump_pause_control_regs(struct qlcnic_adapter *adapter) | 1439 | static void qlcnic_83xx_dump_pause_control_regs(struct qlcnic_adapter *adapter) |
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c index e46fc39d425d..c9f57fb84b9e 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c | |||
| @@ -47,15 +47,26 @@ struct qlcnic_common_entry_hdr { | |||
| 47 | u32 type; | 47 | u32 type; |
| 48 | u32 offset; | 48 | u32 offset; |
| 49 | u32 cap_size; | 49 | u32 cap_size; |
| 50 | #if defined(__LITTLE_ENDIAN) | ||
| 50 | u8 mask; | 51 | u8 mask; |
| 51 | u8 rsvd[2]; | 52 | u8 rsvd[2]; |
| 52 | u8 flags; | 53 | u8 flags; |
| 54 | #else | ||
| 55 | u8 flags; | ||
| 56 | u8 rsvd[2]; | ||
| 57 | u8 mask; | ||
| 58 | #endif | ||
| 53 | } __packed; | 59 | } __packed; |
| 54 | 60 | ||
| 55 | struct __crb { | 61 | struct __crb { |
| 56 | u32 addr; | 62 | u32 addr; |
| 63 | #if defined(__LITTLE_ENDIAN) | ||
| 57 | u8 stride; | 64 | u8 stride; |
| 58 | u8 rsvd1[3]; | 65 | u8 rsvd1[3]; |
| 66 | #else | ||
| 67 | u8 rsvd1[3]; | ||
| 68 | u8 stride; | ||
| 69 | #endif | ||
| 59 | u32 data_size; | 70 | u32 data_size; |
| 60 | u32 no_ops; | 71 | u32 no_ops; |
| 61 | u32 rsvd2[4]; | 72 | u32 rsvd2[4]; |
| @@ -63,15 +74,28 @@ struct __crb { | |||
| 63 | 74 | ||
| 64 | struct __ctrl { | 75 | struct __ctrl { |
| 65 | u32 addr; | 76 | u32 addr; |
| 77 | #if defined(__LITTLE_ENDIAN) | ||
| 66 | u8 stride; | 78 | u8 stride; |
| 67 | u8 index_a; | 79 | u8 index_a; |
| 68 | u16 timeout; | 80 | u16 timeout; |
| 81 | #else | ||
| 82 | u16 timeout; | ||
| 83 | u8 index_a; | ||
| 84 | u8 stride; | ||
| 85 | #endif | ||
| 69 | u32 data_size; | 86 | u32 data_size; |
| 70 | u32 no_ops; | 87 | u32 no_ops; |
| 88 | #if defined(__LITTLE_ENDIAN) | ||
| 71 | u8 opcode; | 89 | u8 opcode; |
| 72 | u8 index_v; | 90 | u8 index_v; |
| 73 | u8 shl_val; | 91 | u8 shl_val; |
| 74 | u8 shr_val; | 92 | u8 shr_val; |
| 93 | #else | ||
| 94 | u8 shr_val; | ||
| 95 | u8 shl_val; | ||
| 96 | u8 index_v; | ||
| 97 | u8 opcode; | ||
| 98 | #endif | ||
| 75 | u32 val1; | 99 | u32 val1; |
| 76 | u32 val2; | 100 | u32 val2; |
| 77 | u32 val3; | 101 | u32 val3; |
| @@ -79,16 +103,27 @@ struct __ctrl { | |||
| 79 | 103 | ||
| 80 | struct __cache { | 104 | struct __cache { |
| 81 | u32 addr; | 105 | u32 addr; |
| 106 | #if defined(__LITTLE_ENDIAN) | ||
| 82 | u16 stride; | 107 | u16 stride; |
| 83 | u16 init_tag_val; | 108 | u16 init_tag_val; |
| 109 | #else | ||
| 110 | u16 init_tag_val; | ||
| 111 | u16 stride; | ||
| 112 | #endif | ||
| 84 | u32 size; | 113 | u32 size; |
| 85 | u32 no_ops; | 114 | u32 no_ops; |
| 86 | u32 ctrl_addr; | 115 | u32 ctrl_addr; |
| 87 | u32 ctrl_val; | 116 | u32 ctrl_val; |
| 88 | u32 read_addr; | 117 | u32 read_addr; |
| 118 | #if defined(__LITTLE_ENDIAN) | ||
| 89 | u8 read_addr_stride; | 119 | u8 read_addr_stride; |
| 90 | u8 read_addr_num; | 120 | u8 read_addr_num; |
| 91 | u8 rsvd1[2]; | 121 | u8 rsvd1[2]; |
| 122 | #else | ||
| 123 | u8 rsvd1[2]; | ||
| 124 | u8 read_addr_num; | ||
| 125 | u8 read_addr_stride; | ||
| 126 | #endif | ||
| 92 | } __packed; | 127 | } __packed; |
| 93 | 128 | ||
| 94 | struct __ocm { | 129 | struct __ocm { |
| @@ -122,23 +157,39 @@ struct __mux { | |||
| 122 | 157 | ||
| 123 | struct __queue { | 158 | struct __queue { |
| 124 | u32 sel_addr; | 159 | u32 sel_addr; |
| 160 | #if defined(__LITTLE_ENDIAN) | ||
| 125 | u16 stride; | 161 | u16 stride; |
| 126 | u8 rsvd[2]; | 162 | u8 rsvd[2]; |
| 163 | #else | ||
| 164 | u8 rsvd[2]; | ||
| 165 | u16 stride; | ||
| 166 | #endif | ||
| 127 | u32 size; | 167 | u32 size; |
| 128 | u32 no_ops; | 168 | u32 no_ops; |
| 129 | u8 rsvd2[8]; | 169 | u8 rsvd2[8]; |
| 130 | u32 read_addr; | 170 | u32 read_addr; |
| 171 | #if defined(__LITTLE_ENDIAN) | ||
| 131 | u8 read_addr_stride; | 172 | u8 read_addr_stride; |
| 132 | u8 read_addr_cnt; | 173 | u8 read_addr_cnt; |
| 133 | u8 rsvd3[2]; | 174 | u8 rsvd3[2]; |
| 175 | #else | ||
| 176 | u8 rsvd3[2]; | ||
| 177 | u8 read_addr_cnt; | ||
| 178 | u8 read_addr_stride; | ||
| 179 | #endif | ||
| 134 | } __packed; | 180 | } __packed; |
| 135 | 181 | ||
| 136 | struct __pollrd { | 182 | struct __pollrd { |
| 137 | u32 sel_addr; | 183 | u32 sel_addr; |
| 138 | u32 read_addr; | 184 | u32 read_addr; |
| 139 | u32 sel_val; | 185 | u32 sel_val; |
| 186 | #if defined(__LITTLE_ENDIAN) | ||
| 140 | u16 sel_val_stride; | 187 | u16 sel_val_stride; |
| 141 | u16 no_ops; | 188 | u16 no_ops; |
| 189 | #else | ||
| 190 | u16 no_ops; | ||
| 191 | u16 sel_val_stride; | ||
| 192 | #endif | ||
| 142 | u32 poll_wait; | 193 | u32 poll_wait; |
| 143 | u32 poll_mask; | 194 | u32 poll_mask; |
| 144 | u32 data_size; | 195 | u32 data_size; |
| @@ -153,9 +204,15 @@ struct __mux2 { | |||
| 153 | u32 no_ops; | 204 | u32 no_ops; |
| 154 | u32 sel_val_mask; | 205 | u32 sel_val_mask; |
| 155 | u32 read_addr; | 206 | u32 read_addr; |
| 207 | #if defined(__LITTLE_ENDIAN) | ||
| 156 | u8 sel_val_stride; | 208 | u8 sel_val_stride; |
| 157 | u8 data_size; | 209 | u8 data_size; |
| 158 | u8 rsvd[2]; | 210 | u8 rsvd[2]; |
| 211 | #else | ||
| 212 | u8 rsvd[2]; | ||
| 213 | u8 data_size; | ||
| 214 | u8 sel_val_stride; | ||
| 215 | #endif | ||
| 159 | } __packed; | 216 | } __packed; |
| 160 | 217 | ||
| 161 | struct __pollrdmwr { | 218 | struct __pollrdmwr { |
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c index f5786d5792df..59a721fba018 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c | |||
| @@ -280,6 +280,7 @@ static ssize_t qlcnic_sysfs_read_crb(struct file *filp, struct kobject *kobj, | |||
| 280 | if (ret != 0) | 280 | if (ret != 0) |
| 281 | return ret; | 281 | return ret; |
| 282 | qlcnic_read_crb(adapter, buf, offset, size); | 282 | qlcnic_read_crb(adapter, buf, offset, size); |
| 283 | qlcnic_swap32_buffer((u32 *)buf, size / sizeof(u32)); | ||
| 283 | 284 | ||
| 284 | return size; | 285 | return size; |
| 285 | } | 286 | } |
| @@ -296,6 +297,7 @@ static ssize_t qlcnic_sysfs_write_crb(struct file *filp, struct kobject *kobj, | |||
| 296 | if (ret != 0) | 297 | if (ret != 0) |
| 297 | return ret; | 298 | return ret; |
| 298 | 299 | ||
| 300 | qlcnic_swap32_buffer((u32 *)buf, size / sizeof(u32)); | ||
| 299 | qlcnic_write_crb(adapter, buf, offset, size); | 301 | qlcnic_write_crb(adapter, buf, offset, size); |
| 300 | return size; | 302 | return size; |
| 301 | } | 303 | } |
| @@ -329,6 +331,7 @@ static ssize_t qlcnic_sysfs_read_mem(struct file *filp, struct kobject *kobj, | |||
| 329 | return -EIO; | 331 | return -EIO; |
| 330 | 332 | ||
| 331 | memcpy(buf, &data, size); | 333 | memcpy(buf, &data, size); |
| 334 | qlcnic_swap32_buffer((u32 *)buf, size / sizeof(u32)); | ||
| 332 | 335 | ||
| 333 | return size; | 336 | return size; |
| 334 | } | 337 | } |
| @@ -346,6 +349,7 @@ static ssize_t qlcnic_sysfs_write_mem(struct file *filp, struct kobject *kobj, | |||
| 346 | if (ret != 0) | 349 | if (ret != 0) |
| 347 | return ret; | 350 | return ret; |
| 348 | 351 | ||
| 352 | qlcnic_swap32_buffer((u32 *)buf, size / sizeof(u32)); | ||
| 349 | memcpy(&data, buf, size); | 353 | memcpy(&data, buf, size); |
| 350 | 354 | ||
| 351 | if (qlcnic_pci_mem_write_2M(adapter, offset, data)) | 355 | if (qlcnic_pci_mem_write_2M(adapter, offset, data)) |
| @@ -412,6 +416,7 @@ static ssize_t qlcnic_sysfs_write_pm_config(struct file *filp, | |||
| 412 | if (rem) | 416 | if (rem) |
| 413 | return QL_STATUS_INVALID_PARAM; | 417 | return QL_STATUS_INVALID_PARAM; |
| 414 | 418 | ||
| 419 | qlcnic_swap32_buffer((u32 *)buf, size / sizeof(u32)); | ||
| 415 | pm_cfg = (struct qlcnic_pm_func_cfg *)buf; | 420 | pm_cfg = (struct qlcnic_pm_func_cfg *)buf; |
| 416 | ret = validate_pm_config(adapter, pm_cfg, count); | 421 | ret = validate_pm_config(adapter, pm_cfg, count); |
| 417 | 422 | ||
| @@ -474,6 +479,7 @@ static ssize_t qlcnic_sysfs_read_pm_config(struct file *filp, | |||
| 474 | pm_cfg[pci_func].dest_npar = 0; | 479 | pm_cfg[pci_func].dest_npar = 0; |
| 475 | pm_cfg[pci_func].pci_func = i; | 480 | pm_cfg[pci_func].pci_func = i; |
| 476 | } | 481 | } |
| 482 | qlcnic_swap32_buffer((u32 *)buf, size / sizeof(u32)); | ||
| 477 | return size; | 483 | return size; |
| 478 | } | 484 | } |
| 479 | 485 | ||
| @@ -555,6 +561,7 @@ static ssize_t qlcnic_sysfs_write_esw_config(struct file *file, | |||
| 555 | if (rem) | 561 | if (rem) |
| 556 | return QL_STATUS_INVALID_PARAM; | 562 | return QL_STATUS_INVALID_PARAM; |
| 557 | 563 | ||
| 564 | qlcnic_swap32_buffer((u32 *)buf, size / sizeof(u32)); | ||
| 558 | esw_cfg = (struct qlcnic_esw_func_cfg *)buf; | 565 | esw_cfg = (struct qlcnic_esw_func_cfg *)buf; |
| 559 | ret = validate_esw_config(adapter, esw_cfg, count); | 566 | ret = validate_esw_config(adapter, esw_cfg, count); |
| 560 | if (ret) | 567 | if (ret) |
| @@ -649,6 +656,7 @@ static ssize_t qlcnic_sysfs_read_esw_config(struct file *file, | |||
| 649 | if (qlcnic_get_eswitch_port_config(adapter, &esw_cfg[pci_func])) | 656 | if (qlcnic_get_eswitch_port_config(adapter, &esw_cfg[pci_func])) |
| 650 | return QL_STATUS_INVALID_PARAM; | 657 | return QL_STATUS_INVALID_PARAM; |
| 651 | } | 658 | } |
| 659 | qlcnic_swap32_buffer((u32 *)buf, size / sizeof(u32)); | ||
| 652 | return size; | 660 | return size; |
| 653 | } | 661 | } |
| 654 | 662 | ||
| @@ -688,6 +696,7 @@ static ssize_t qlcnic_sysfs_write_npar_config(struct file *file, | |||
| 688 | if (rem) | 696 | if (rem) |
| 689 | return QL_STATUS_INVALID_PARAM; | 697 | return QL_STATUS_INVALID_PARAM; |
| 690 | 698 | ||
| 699 | qlcnic_swap32_buffer((u32 *)buf, size / sizeof(u32)); | ||
| 691 | np_cfg = (struct qlcnic_npar_func_cfg *)buf; | 700 | np_cfg = (struct qlcnic_npar_func_cfg *)buf; |
| 692 | ret = validate_npar_config(adapter, np_cfg, count); | 701 | ret = validate_npar_config(adapter, np_cfg, count); |
| 693 | if (ret) | 702 | if (ret) |
| @@ -759,6 +768,7 @@ static ssize_t qlcnic_sysfs_read_npar_config(struct file *file, | |||
| 759 | np_cfg[pci_func].max_tx_queues = nic_info.max_tx_ques; | 768 | np_cfg[pci_func].max_tx_queues = nic_info.max_tx_ques; |
| 760 | np_cfg[pci_func].max_rx_queues = nic_info.max_rx_ques; | 769 | np_cfg[pci_func].max_rx_queues = nic_info.max_rx_ques; |
| 761 | } | 770 | } |
| 771 | qlcnic_swap32_buffer((u32 *)buf, size / sizeof(u32)); | ||
| 762 | return size; | 772 | return size; |
| 763 | } | 773 | } |
| 764 | 774 | ||
| @@ -916,6 +926,7 @@ static ssize_t qlcnic_sysfs_read_pci_config(struct file *file, | |||
| 916 | 926 | ||
| 917 | pci_cfg = (struct qlcnic_pci_func_cfg *)buf; | 927 | pci_cfg = (struct qlcnic_pci_func_cfg *)buf; |
| 918 | count = size / sizeof(struct qlcnic_pci_func_cfg); | 928 | count = size / sizeof(struct qlcnic_pci_func_cfg); |
| 929 | qlcnic_swap32_buffer((u32 *)pci_info, size / sizeof(u32)); | ||
| 919 | for (i = 0; i < count; i++) { | 930 | for (i = 0; i < count; i++) { |
| 920 | pci_cfg[i].pci_func = pci_info[i].id; | 931 | pci_cfg[i].pci_func = pci_info[i].id; |
| 921 | pci_cfg[i].func_type = pci_info[i].type; | 932 | pci_cfg[i].func_type = pci_info[i].type; |
| @@ -969,6 +980,7 @@ static ssize_t qlcnic_83xx_sysfs_flash_read_handler(struct file *filp, | |||
| 969 | } | 980 | } |
| 970 | 981 | ||
| 971 | qlcnic_83xx_unlock_flash(adapter); | 982 | qlcnic_83xx_unlock_flash(adapter); |
| 983 | qlcnic_swap32_buffer((u32 *)p_read_buf, count); | ||
| 972 | memcpy(buf, p_read_buf, size); | 984 | memcpy(buf, p_read_buf, size); |
| 973 | kfree(p_read_buf); | 985 | kfree(p_read_buf); |
| 974 | 986 | ||
| @@ -986,9 +998,10 @@ static int qlcnic_83xx_sysfs_flash_bulk_write(struct qlcnic_adapter *adapter, | |||
| 986 | if (!p_cache) | 998 | if (!p_cache) |
| 987 | return -ENOMEM; | 999 | return -ENOMEM; |
| 988 | 1000 | ||
| 1001 | count = size / sizeof(u32); | ||
| 1002 | qlcnic_swap32_buffer((u32 *)buf, count); | ||
| 989 | memcpy(p_cache, buf, size); | 1003 | memcpy(p_cache, buf, size); |
| 990 | p_src = p_cache; | 1004 | p_src = p_cache; |
| 991 | count = size / sizeof(u32); | ||
| 992 | 1005 | ||
| 993 | if (qlcnic_83xx_lock_flash(adapter) != 0) { | 1006 | if (qlcnic_83xx_lock_flash(adapter) != 0) { |
| 994 | kfree(p_cache); | 1007 | kfree(p_cache); |
| @@ -1053,6 +1066,7 @@ static int qlcnic_83xx_sysfs_flash_write(struct qlcnic_adapter *adapter, | |||
| 1053 | if (!p_cache) | 1066 | if (!p_cache) |
| 1054 | return -ENOMEM; | 1067 | return -ENOMEM; |
| 1055 | 1068 | ||
| 1069 | qlcnic_swap32_buffer((u32 *)buf, size / sizeof(u32)); | ||
| 1056 | memcpy(p_cache, buf, size); | 1070 | memcpy(p_cache, buf, size); |
| 1057 | p_src = p_cache; | 1071 | p_src = p_cache; |
| 1058 | count = size / sizeof(u32); | 1072 | count = size / sizeof(u32); |
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c index 60e4ca01ccbb..a96955597755 100644 --- a/drivers/net/macvlan.c +++ b/drivers/net/macvlan.c | |||
| @@ -739,7 +739,10 @@ static int macvlan_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], | |||
| 739 | struct macvlan_dev *vlan = netdev_priv(dev); | 739 | struct macvlan_dev *vlan = netdev_priv(dev); |
| 740 | int err = -EINVAL; | 740 | int err = -EINVAL; |
| 741 | 741 | ||
| 742 | if (!vlan->port->passthru) | 742 | /* Support unicast filter only on passthru devices. |
| 743 | * Multicast filter should be allowed on all devices. | ||
| 744 | */ | ||
| 745 | if (!vlan->port->passthru && is_unicast_ether_addr(addr)) | ||
| 743 | return -EOPNOTSUPP; | 746 | return -EOPNOTSUPP; |
| 744 | 747 | ||
| 745 | if (flags & NLM_F_REPLACE) | 748 | if (flags & NLM_F_REPLACE) |
| @@ -760,7 +763,10 @@ static int macvlan_fdb_del(struct ndmsg *ndm, struct nlattr *tb[], | |||
| 760 | struct macvlan_dev *vlan = netdev_priv(dev); | 763 | struct macvlan_dev *vlan = netdev_priv(dev); |
| 761 | int err = -EINVAL; | 764 | int err = -EINVAL; |
| 762 | 765 | ||
| 763 | if (!vlan->port->passthru) | 766 | /* Support unicast filter only on passthru devices. |
| 767 | * Multicast filter should be allowed on all devices. | ||
| 768 | */ | ||
| 769 | if (!vlan->port->passthru && is_unicast_ether_addr(addr)) | ||
| 764 | return -EOPNOTSUPP; | 770 | return -EOPNOTSUPP; |
| 765 | 771 | ||
| 766 | if (is_unicast_ether_addr(addr)) | 772 | if (is_unicast_ether_addr(addr)) |
diff --git a/drivers/net/phy/bcm7xxx.c b/drivers/net/phy/bcm7xxx.c index 526b94cea569..fdce1ea28790 100644 --- a/drivers/net/phy/bcm7xxx.c +++ b/drivers/net/phy/bcm7xxx.c | |||
| @@ -157,6 +157,23 @@ static int bcm7xxx_28nm_config_init(struct phy_device *phydev) | |||
| 157 | return bcm7xxx_28nm_afe_config_init(phydev); | 157 | return bcm7xxx_28nm_afe_config_init(phydev); |
| 158 | } | 158 | } |
| 159 | 159 | ||
| 160 | static int bcm7xxx_28nm_resume(struct phy_device *phydev) | ||
| 161 | { | ||
| 162 | int ret; | ||
| 163 | |||
| 164 | /* Re-apply workarounds coming out suspend/resume */ | ||
| 165 | ret = bcm7xxx_28nm_config_init(phydev); | ||
| 166 | if (ret) | ||
| 167 | return ret; | ||
| 168 | |||
| 169 | /* 28nm Gigabit PHYs come out of reset without any half-duplex | ||
| 170 | * or "hub" compliant advertised mode, fix that. This does not | ||
| 171 | * cause any problems with the PHY library since genphy_config_aneg() | ||
| 172 | * gracefully handles auto-negotiated and forced modes. | ||
| 173 | */ | ||
| 174 | return genphy_config_aneg(phydev); | ||
| 175 | } | ||
| 176 | |||
| 160 | static int phy_set_clr_bits(struct phy_device *dev, int location, | 177 | static int phy_set_clr_bits(struct phy_device *dev, int location, |
| 161 | int set_mask, int clr_mask) | 178 | int set_mask, int clr_mask) |
| 162 | { | 179 | { |
| @@ -212,7 +229,7 @@ static int bcm7xxx_config_init(struct phy_device *phydev) | |||
| 212 | } | 229 | } |
| 213 | 230 | ||
| 214 | /* Workaround for putting the PHY in IDDQ mode, required | 231 | /* Workaround for putting the PHY in IDDQ mode, required |
| 215 | * for all BCM7XXX PHYs | 232 | * for all BCM7XXX 40nm and 65nm PHYs |
| 216 | */ | 233 | */ |
| 217 | static int bcm7xxx_suspend(struct phy_device *phydev) | 234 | static int bcm7xxx_suspend(struct phy_device *phydev) |
| 218 | { | 235 | { |
| @@ -257,8 +274,7 @@ static struct phy_driver bcm7xxx_driver[] = { | |||
| 257 | .config_init = bcm7xxx_28nm_afe_config_init, | 274 | .config_init = bcm7xxx_28nm_afe_config_init, |
| 258 | .config_aneg = genphy_config_aneg, | 275 | .config_aneg = genphy_config_aneg, |
| 259 | .read_status = genphy_read_status, | 276 | .read_status = genphy_read_status, |
| 260 | .suspend = bcm7xxx_suspend, | 277 | .resume = bcm7xxx_28nm_resume, |
| 261 | .resume = bcm7xxx_28nm_afe_config_init, | ||
| 262 | .driver = { .owner = THIS_MODULE }, | 278 | .driver = { .owner = THIS_MODULE }, |
| 263 | }, { | 279 | }, { |
| 264 | .phy_id = PHY_ID_BCM7439, | 280 | .phy_id = PHY_ID_BCM7439, |
| @@ -270,8 +286,7 @@ static struct phy_driver bcm7xxx_driver[] = { | |||
| 270 | .config_init = bcm7xxx_28nm_afe_config_init, | 286 | .config_init = bcm7xxx_28nm_afe_config_init, |
| 271 | .config_aneg = genphy_config_aneg, | 287 | .config_aneg = genphy_config_aneg, |
| 272 | .read_status = genphy_read_status, | 288 | .read_status = genphy_read_status, |
| 273 | .suspend = bcm7xxx_suspend, | 289 | .resume = bcm7xxx_28nm_resume, |
| 274 | .resume = bcm7xxx_28nm_afe_config_init, | ||
| 275 | .driver = { .owner = THIS_MODULE }, | 290 | .driver = { .owner = THIS_MODULE }, |
| 276 | }, { | 291 | }, { |
| 277 | .phy_id = PHY_ID_BCM7445, | 292 | .phy_id = PHY_ID_BCM7445, |
| @@ -283,21 +298,7 @@ static struct phy_driver bcm7xxx_driver[] = { | |||
| 283 | .config_init = bcm7xxx_28nm_config_init, | 298 | .config_init = bcm7xxx_28nm_config_init, |
| 284 | .config_aneg = genphy_config_aneg, | 299 | .config_aneg = genphy_config_aneg, |
| 285 | .read_status = genphy_read_status, | 300 | .read_status = genphy_read_status, |
| 286 | .suspend = bcm7xxx_suspend, | 301 | .resume = bcm7xxx_28nm_afe_config_init, |
| 287 | .resume = bcm7xxx_28nm_config_init, | ||
| 288 | .driver = { .owner = THIS_MODULE }, | ||
| 289 | }, { | ||
| 290 | .name = "Broadcom BCM7XXX 28nm", | ||
| 291 | .phy_id = PHY_ID_BCM7XXX_28, | ||
| 292 | .phy_id_mask = PHY_BCM_OUI_MASK, | ||
| 293 | .features = PHY_GBIT_FEATURES | | ||
| 294 | SUPPORTED_Pause | SUPPORTED_Asym_Pause, | ||
| 295 | .flags = PHY_IS_INTERNAL, | ||
| 296 | .config_init = bcm7xxx_28nm_config_init, | ||
| 297 | .config_aneg = genphy_config_aneg, | ||
| 298 | .read_status = genphy_read_status, | ||
| 299 | .suspend = bcm7xxx_suspend, | ||
| 300 | .resume = bcm7xxx_28nm_config_init, | ||
| 301 | .driver = { .owner = THIS_MODULE }, | 302 | .driver = { .owner = THIS_MODULE }, |
| 302 | }, { | 303 | }, { |
| 303 | .phy_id = PHY_BCM_OUI_4, | 304 | .phy_id = PHY_BCM_OUI_4, |
| @@ -331,7 +332,6 @@ static struct mdio_device_id __maybe_unused bcm7xxx_tbl[] = { | |||
| 331 | { PHY_ID_BCM7366, 0xfffffff0, }, | 332 | { PHY_ID_BCM7366, 0xfffffff0, }, |
| 332 | { PHY_ID_BCM7439, 0xfffffff0, }, | 333 | { PHY_ID_BCM7439, 0xfffffff0, }, |
| 333 | { PHY_ID_BCM7445, 0xfffffff0, }, | 334 | { PHY_ID_BCM7445, 0xfffffff0, }, |
| 334 | { PHY_ID_BCM7XXX_28, 0xfffffc00 }, | ||
| 335 | { PHY_BCM_OUI_4, 0xffff0000 }, | 335 | { PHY_BCM_OUI_4, 0xffff0000 }, |
| 336 | { PHY_BCM_OUI_5, 0xffffff00 }, | 336 | { PHY_BCM_OUI_5, 0xffffff00 }, |
| 337 | { } | 337 | { } |
diff --git a/drivers/net/phy/smsc.c b/drivers/net/phy/smsc.c index 180c49479c42..a4b08198fb9f 100644 --- a/drivers/net/phy/smsc.c +++ b/drivers/net/phy/smsc.c | |||
| @@ -43,6 +43,22 @@ static int smsc_phy_ack_interrupt(struct phy_device *phydev) | |||
| 43 | 43 | ||
| 44 | static int smsc_phy_config_init(struct phy_device *phydev) | 44 | static int smsc_phy_config_init(struct phy_device *phydev) |
| 45 | { | 45 | { |
| 46 | int rc = phy_read(phydev, MII_LAN83C185_CTRL_STATUS); | ||
| 47 | |||
| 48 | if (rc < 0) | ||
| 49 | return rc; | ||
| 50 | |||
| 51 | /* Enable energy detect mode for this SMSC Transceivers */ | ||
| 52 | rc = phy_write(phydev, MII_LAN83C185_CTRL_STATUS, | ||
| 53 | rc | MII_LAN83C185_EDPWRDOWN); | ||
| 54 | if (rc < 0) | ||
| 55 | return rc; | ||
| 56 | |||
| 57 | return smsc_phy_ack_interrupt(phydev); | ||
| 58 | } | ||
| 59 | |||
| 60 | static int smsc_phy_reset(struct phy_device *phydev) | ||
| 61 | { | ||
| 46 | int rc = phy_read(phydev, MII_LAN83C185_SPECIAL_MODES); | 62 | int rc = phy_read(phydev, MII_LAN83C185_SPECIAL_MODES); |
| 47 | if (rc < 0) | 63 | if (rc < 0) |
| 48 | return rc; | 64 | return rc; |
| @@ -66,18 +82,7 @@ static int smsc_phy_config_init(struct phy_device *phydev) | |||
| 66 | rc = phy_read(phydev, MII_BMCR); | 82 | rc = phy_read(phydev, MII_BMCR); |
| 67 | } while (rc & BMCR_RESET); | 83 | } while (rc & BMCR_RESET); |
| 68 | } | 84 | } |
| 69 | 85 | return 0; | |
| 70 | rc = phy_read(phydev, MII_LAN83C185_CTRL_STATUS); | ||
| 71 | if (rc < 0) | ||
| 72 | return rc; | ||
| 73 | |||
| 74 | /* Enable energy detect mode for this SMSC Transceivers */ | ||
| 75 | rc = phy_write(phydev, MII_LAN83C185_CTRL_STATUS, | ||
| 76 | rc | MII_LAN83C185_EDPWRDOWN); | ||
| 77 | if (rc < 0) | ||
| 78 | return rc; | ||
| 79 | |||
| 80 | return smsc_phy_ack_interrupt (phydev); | ||
| 81 | } | 86 | } |
| 82 | 87 | ||
| 83 | static int lan911x_config_init(struct phy_device *phydev) | 88 | static int lan911x_config_init(struct phy_device *phydev) |
| @@ -142,6 +147,7 @@ static struct phy_driver smsc_phy_driver[] = { | |||
| 142 | .config_aneg = genphy_config_aneg, | 147 | .config_aneg = genphy_config_aneg, |
| 143 | .read_status = genphy_read_status, | 148 | .read_status = genphy_read_status, |
| 144 | .config_init = smsc_phy_config_init, | 149 | .config_init = smsc_phy_config_init, |
| 150 | .soft_reset = smsc_phy_reset, | ||
| 145 | 151 | ||
| 146 | /* IRQ related */ | 152 | /* IRQ related */ |
| 147 | .ack_interrupt = smsc_phy_ack_interrupt, | 153 | .ack_interrupt = smsc_phy_ack_interrupt, |
| @@ -164,6 +170,7 @@ static struct phy_driver smsc_phy_driver[] = { | |||
| 164 | .config_aneg = genphy_config_aneg, | 170 | .config_aneg = genphy_config_aneg, |
| 165 | .read_status = genphy_read_status, | 171 | .read_status = genphy_read_status, |
| 166 | .config_init = smsc_phy_config_init, | 172 | .config_init = smsc_phy_config_init, |
| 173 | .soft_reset = smsc_phy_reset, | ||
| 167 | 174 | ||
| 168 | /* IRQ related */ | 175 | /* IRQ related */ |
| 169 | .ack_interrupt = smsc_phy_ack_interrupt, | 176 | .ack_interrupt = smsc_phy_ack_interrupt, |
| @@ -186,6 +193,7 @@ static struct phy_driver smsc_phy_driver[] = { | |||
| 186 | .config_aneg = genphy_config_aneg, | 193 | .config_aneg = genphy_config_aneg, |
| 187 | .read_status = genphy_read_status, | 194 | .read_status = genphy_read_status, |
| 188 | .config_init = smsc_phy_config_init, | 195 | .config_init = smsc_phy_config_init, |
| 196 | .soft_reset = smsc_phy_reset, | ||
| 189 | 197 | ||
| 190 | /* IRQ related */ | 198 | /* IRQ related */ |
| 191 | .ack_interrupt = smsc_phy_ack_interrupt, | 199 | .ack_interrupt = smsc_phy_ack_interrupt, |
| @@ -230,6 +238,7 @@ static struct phy_driver smsc_phy_driver[] = { | |||
| 230 | .config_aneg = genphy_config_aneg, | 238 | .config_aneg = genphy_config_aneg, |
| 231 | .read_status = lan87xx_read_status, | 239 | .read_status = lan87xx_read_status, |
| 232 | .config_init = smsc_phy_config_init, | 240 | .config_init = smsc_phy_config_init, |
| 241 | .soft_reset = smsc_phy_reset, | ||
| 233 | 242 | ||
| 234 | /* IRQ related */ | 243 | /* IRQ related */ |
| 235 | .ack_interrupt = smsc_phy_ack_interrupt, | 244 | .ack_interrupt = smsc_phy_ack_interrupt, |
diff --git a/drivers/pwm/core.c b/drivers/pwm/core.c index 4b66bf09ee55..d2c35920ff08 100644 --- a/drivers/pwm/core.c +++ b/drivers/pwm/core.c | |||
| @@ -606,6 +606,8 @@ struct pwm_device *pwm_get(struct device *dev, const char *con_id) | |||
| 606 | unsigned int best = 0; | 606 | unsigned int best = 0; |
| 607 | struct pwm_lookup *p; | 607 | struct pwm_lookup *p; |
| 608 | unsigned int match; | 608 | unsigned int match; |
| 609 | unsigned int period; | ||
| 610 | enum pwm_polarity polarity; | ||
| 609 | 611 | ||
| 610 | /* look up via DT first */ | 612 | /* look up via DT first */ |
| 611 | if (IS_ENABLED(CONFIG_OF) && dev && dev->of_node) | 613 | if (IS_ENABLED(CONFIG_OF) && dev && dev->of_node) |
| @@ -653,6 +655,8 @@ struct pwm_device *pwm_get(struct device *dev, const char *con_id) | |||
| 653 | if (match > best) { | 655 | if (match > best) { |
| 654 | chip = pwmchip_find_by_name(p->provider); | 656 | chip = pwmchip_find_by_name(p->provider); |
| 655 | index = p->index; | 657 | index = p->index; |
| 658 | period = p->period; | ||
| 659 | polarity = p->polarity; | ||
| 656 | 660 | ||
| 657 | if (match != 3) | 661 | if (match != 3) |
| 658 | best = match; | 662 | best = match; |
| @@ -668,8 +672,8 @@ struct pwm_device *pwm_get(struct device *dev, const char *con_id) | |||
| 668 | if (IS_ERR(pwm)) | 672 | if (IS_ERR(pwm)) |
| 669 | return pwm; | 673 | return pwm; |
| 670 | 674 | ||
| 671 | pwm_set_period(pwm, p->period); | 675 | pwm_set_period(pwm, period); |
| 672 | pwm_set_polarity(pwm, p->polarity); | 676 | pwm_set_polarity(pwm, polarity); |
| 673 | 677 | ||
| 674 | 678 | ||
| 675 | return pwm; | 679 | return pwm; |
diff --git a/drivers/s390/char/con3215.c b/drivers/s390/char/con3215.c index a6d47e5eee9e..c43aca69fb30 100644 --- a/drivers/s390/char/con3215.c +++ b/drivers/s390/char/con3215.c | |||
| @@ -1035,12 +1035,26 @@ static int tty3215_write(struct tty_struct * tty, | |||
| 1035 | const unsigned char *buf, int count) | 1035 | const unsigned char *buf, int count) |
| 1036 | { | 1036 | { |
| 1037 | struct raw3215_info *raw; | 1037 | struct raw3215_info *raw; |
| 1038 | int i, written; | ||
| 1038 | 1039 | ||
| 1039 | if (!tty) | 1040 | if (!tty) |
| 1040 | return 0; | 1041 | return 0; |
| 1041 | raw = (struct raw3215_info *) tty->driver_data; | 1042 | raw = (struct raw3215_info *) tty->driver_data; |
| 1042 | raw3215_write(raw, buf, count); | 1043 | written = count; |
| 1043 | return count; | 1044 | while (count > 0) { |
| 1045 | for (i = 0; i < count; i++) | ||
| 1046 | if (buf[i] == '\t' || buf[i] == '\n') | ||
| 1047 | break; | ||
| 1048 | raw3215_write(raw, buf, i); | ||
| 1049 | count -= i; | ||
| 1050 | buf += i; | ||
| 1051 | if (count > 0) { | ||
| 1052 | raw3215_putchar(raw, *buf); | ||
| 1053 | count--; | ||
| 1054 | buf++; | ||
| 1055 | } | ||
| 1056 | } | ||
| 1057 | return written; | ||
| 1044 | } | 1058 | } |
| 1045 | 1059 | ||
| 1046 | /* | 1060 | /* |
| @@ -1188,7 +1202,7 @@ static int __init tty3215_init(void) | |||
| 1188 | driver->subtype = SYSTEM_TYPE_TTY; | 1202 | driver->subtype = SYSTEM_TYPE_TTY; |
| 1189 | driver->init_termios = tty_std_termios; | 1203 | driver->init_termios = tty_std_termios; |
| 1190 | driver->init_termios.c_iflag = IGNBRK | IGNPAR; | 1204 | driver->init_termios.c_iflag = IGNBRK | IGNPAR; |
| 1191 | driver->init_termios.c_oflag = ONLCR | XTABS; | 1205 | driver->init_termios.c_oflag = ONLCR; |
| 1192 | driver->init_termios.c_lflag = ISIG; | 1206 | driver->init_termios.c_lflag = ISIG; |
| 1193 | driver->flags = TTY_DRIVER_REAL_RAW; | 1207 | driver->flags = TTY_DRIVER_REAL_RAW; |
| 1194 | tty_set_operations(driver, &tty3215_ops); | 1208 | tty_set_operations(driver, &tty3215_ops); |
diff --git a/drivers/s390/char/sclp_tty.c b/drivers/s390/char/sclp_tty.c index 7ed7a5987816..003663288e29 100644 --- a/drivers/s390/char/sclp_tty.c +++ b/drivers/s390/char/sclp_tty.c | |||
| @@ -559,7 +559,7 @@ sclp_tty_init(void) | |||
| 559 | driver->subtype = SYSTEM_TYPE_TTY; | 559 | driver->subtype = SYSTEM_TYPE_TTY; |
| 560 | driver->init_termios = tty_std_termios; | 560 | driver->init_termios = tty_std_termios; |
| 561 | driver->init_termios.c_iflag = IGNBRK | IGNPAR; | 561 | driver->init_termios.c_iflag = IGNBRK | IGNPAR; |
| 562 | driver->init_termios.c_oflag = ONLCR | XTABS; | 562 | driver->init_termios.c_oflag = ONLCR; |
| 563 | driver->init_termios.c_lflag = ISIG | ECHO; | 563 | driver->init_termios.c_lflag = ISIG | ECHO; |
| 564 | driver->flags = TTY_DRIVER_REAL_RAW; | 564 | driver->flags = TTY_DRIVER_REAL_RAW; |
| 565 | tty_set_operations(driver, &sclp_ops); | 565 | tty_set_operations(driver, &sclp_ops); |
diff --git a/drivers/sh/Makefile b/drivers/sh/Makefile index 788ed9b59b4e..114203f32843 100644 --- a/drivers/sh/Makefile +++ b/drivers/sh/Makefile | |||
| @@ -1,8 +1,7 @@ | |||
| 1 | # | 1 | # |
| 2 | # Makefile for the SuperH specific drivers. | 2 | # Makefile for the SuperH specific drivers. |
| 3 | # | 3 | # |
| 4 | obj-$(CONFIG_SUPERH) += intc/ | 4 | obj-$(CONFIG_SH_INTC) += intc/ |
| 5 | obj-$(CONFIG_ARCH_SHMOBILE_LEGACY) += intc/ | ||
| 6 | ifneq ($(CONFIG_COMMON_CLK),y) | 5 | ifneq ($(CONFIG_COMMON_CLK),y) |
| 7 | obj-$(CONFIG_HAVE_CLK) += clk/ | 6 | obj-$(CONFIG_HAVE_CLK) += clk/ |
| 8 | endif | 7 | endif |
diff --git a/drivers/sh/intc/Kconfig b/drivers/sh/intc/Kconfig index 60228fae943f..6a1b05ddc8c9 100644 --- a/drivers/sh/intc/Kconfig +++ b/drivers/sh/intc/Kconfig | |||
| @@ -1,7 +1,9 @@ | |||
| 1 | config SH_INTC | 1 | config SH_INTC |
| 2 | def_bool y | 2 | bool |
| 3 | select IRQ_DOMAIN | 3 | select IRQ_DOMAIN |
| 4 | 4 | ||
| 5 | if SH_INTC | ||
| 6 | |||
| 5 | comment "Interrupt controller options" | 7 | comment "Interrupt controller options" |
| 6 | 8 | ||
| 7 | config INTC_USERIMASK | 9 | config INTC_USERIMASK |
| @@ -37,3 +39,5 @@ config INTC_MAPPING_DEBUG | |||
| 37 | between system IRQs and the per-controller id tables. | 39 | between system IRQs and the per-controller id tables. |
| 38 | 40 | ||
| 39 | If in doubt, say N. | 41 | If in doubt, say N. |
| 42 | |||
| 43 | endif | ||
| @@ -141,6 +141,7 @@ struct kioctx { | |||
| 141 | 141 | ||
| 142 | struct { | 142 | struct { |
| 143 | unsigned tail; | 143 | unsigned tail; |
| 144 | unsigned completed_events; | ||
| 144 | spinlock_t completion_lock; | 145 | spinlock_t completion_lock; |
| 145 | } ____cacheline_aligned_in_smp; | 146 | } ____cacheline_aligned_in_smp; |
| 146 | 147 | ||
| @@ -857,6 +858,68 @@ out: | |||
| 857 | return ret; | 858 | return ret; |
| 858 | } | 859 | } |
| 859 | 860 | ||
| 861 | /* refill_reqs_available | ||
| 862 | * Updates the reqs_available reference counts used for tracking the | ||
| 863 | * number of free slots in the completion ring. This can be called | ||
| 864 | * from aio_complete() (to optimistically update reqs_available) or | ||
| 865 | * from aio_get_req() (the we're out of events case). It must be | ||
| 866 | * called holding ctx->completion_lock. | ||
| 867 | */ | ||
| 868 | static void refill_reqs_available(struct kioctx *ctx, unsigned head, | ||
| 869 | unsigned tail) | ||
| 870 | { | ||
| 871 | unsigned events_in_ring, completed; | ||
| 872 | |||
| 873 | /* Clamp head since userland can write to it. */ | ||
| 874 | head %= ctx->nr_events; | ||
| 875 | if (head <= tail) | ||
| 876 | events_in_ring = tail - head; | ||
| 877 | else | ||
| 878 | events_in_ring = ctx->nr_events - (head - tail); | ||
| 879 | |||
| 880 | completed = ctx->completed_events; | ||
| 881 | if (events_in_ring < completed) | ||
| 882 | completed -= events_in_ring; | ||
| 883 | else | ||
| 884 | completed = 0; | ||
| 885 | |||
| 886 | if (!completed) | ||
| 887 | return; | ||
| 888 | |||
| 889 | ctx->completed_events -= completed; | ||
| 890 | put_reqs_available(ctx, completed); | ||
| 891 | } | ||
| 892 | |||
| 893 | /* user_refill_reqs_available | ||
| 894 | * Called to refill reqs_available when aio_get_req() encounters an | ||
| 895 | * out of space in the completion ring. | ||
| 896 | */ | ||
| 897 | static void user_refill_reqs_available(struct kioctx *ctx) | ||
| 898 | { | ||
| 899 | spin_lock_irq(&ctx->completion_lock); | ||
| 900 | if (ctx->completed_events) { | ||
| 901 | struct aio_ring *ring; | ||
| 902 | unsigned head; | ||
| 903 | |||
| 904 | /* Access of ring->head may race with aio_read_events_ring() | ||
| 905 | * here, but that's okay since whether we read the old version | ||
| 906 | * or the new version, and either will be valid. The important | ||
| 907 | * part is that head cannot pass tail since we prevent | ||
| 908 | * aio_complete() from updating tail by holding | ||
| 909 | * ctx->completion_lock. Even if head is invalid, the check | ||
| 910 | * against ctx->completed_events below will make sure we do the | ||
| 911 | * safe/right thing. | ||
| 912 | */ | ||
| 913 | ring = kmap_atomic(ctx->ring_pages[0]); | ||
| 914 | head = ring->head; | ||
| 915 | kunmap_atomic(ring); | ||
| 916 | |||
| 917 | refill_reqs_available(ctx, head, ctx->tail); | ||
| 918 | } | ||
| 919 | |||
| 920 | spin_unlock_irq(&ctx->completion_lock); | ||
| 921 | } | ||
| 922 | |||
| 860 | /* aio_get_req | 923 | /* aio_get_req |
| 861 | * Allocate a slot for an aio request. | 924 | * Allocate a slot for an aio request. |
| 862 | * Returns NULL if no requests are free. | 925 | * Returns NULL if no requests are free. |
| @@ -865,8 +928,11 @@ static inline struct kiocb *aio_get_req(struct kioctx *ctx) | |||
| 865 | { | 928 | { |
| 866 | struct kiocb *req; | 929 | struct kiocb *req; |
| 867 | 930 | ||
| 868 | if (!get_reqs_available(ctx)) | 931 | if (!get_reqs_available(ctx)) { |
| 869 | return NULL; | 932 | user_refill_reqs_available(ctx); |
| 933 | if (!get_reqs_available(ctx)) | ||
| 934 | return NULL; | ||
| 935 | } | ||
| 870 | 936 | ||
| 871 | req = kmem_cache_alloc(kiocb_cachep, GFP_KERNEL|__GFP_ZERO); | 937 | req = kmem_cache_alloc(kiocb_cachep, GFP_KERNEL|__GFP_ZERO); |
| 872 | if (unlikely(!req)) | 938 | if (unlikely(!req)) |
| @@ -925,8 +991,8 @@ void aio_complete(struct kiocb *iocb, long res, long res2) | |||
| 925 | struct kioctx *ctx = iocb->ki_ctx; | 991 | struct kioctx *ctx = iocb->ki_ctx; |
| 926 | struct aio_ring *ring; | 992 | struct aio_ring *ring; |
| 927 | struct io_event *ev_page, *event; | 993 | struct io_event *ev_page, *event; |
| 994 | unsigned tail, pos, head; | ||
| 928 | unsigned long flags; | 995 | unsigned long flags; |
| 929 | unsigned tail, pos; | ||
| 930 | 996 | ||
| 931 | /* | 997 | /* |
| 932 | * Special case handling for sync iocbs: | 998 | * Special case handling for sync iocbs: |
| @@ -987,10 +1053,14 @@ void aio_complete(struct kiocb *iocb, long res, long res2) | |||
| 987 | ctx->tail = tail; | 1053 | ctx->tail = tail; |
| 988 | 1054 | ||
| 989 | ring = kmap_atomic(ctx->ring_pages[0]); | 1055 | ring = kmap_atomic(ctx->ring_pages[0]); |
| 1056 | head = ring->head; | ||
| 990 | ring->tail = tail; | 1057 | ring->tail = tail; |
| 991 | kunmap_atomic(ring); | 1058 | kunmap_atomic(ring); |
| 992 | flush_dcache_page(ctx->ring_pages[0]); | 1059 | flush_dcache_page(ctx->ring_pages[0]); |
| 993 | 1060 | ||
| 1061 | ctx->completed_events++; | ||
| 1062 | if (ctx->completed_events > 1) | ||
| 1063 | refill_reqs_available(ctx, head, tail); | ||
| 994 | spin_unlock_irqrestore(&ctx->completion_lock, flags); | 1064 | spin_unlock_irqrestore(&ctx->completion_lock, flags); |
| 995 | 1065 | ||
| 996 | pr_debug("added to ring %p at [%u]\n", iocb, tail); | 1066 | pr_debug("added to ring %p at [%u]\n", iocb, tail); |
| @@ -1005,7 +1075,6 @@ void aio_complete(struct kiocb *iocb, long res, long res2) | |||
| 1005 | 1075 | ||
| 1006 | /* everything turned out well, dispose of the aiocb. */ | 1076 | /* everything turned out well, dispose of the aiocb. */ |
| 1007 | kiocb_free(iocb); | 1077 | kiocb_free(iocb); |
| 1008 | put_reqs_available(ctx, 1); | ||
| 1009 | 1078 | ||
| 1010 | /* | 1079 | /* |
| 1011 | * We have to order our ring_info tail store above and test | 1080 | * We have to order our ring_info tail store above and test |
diff --git a/fs/btrfs/async-thread.c b/fs/btrfs/async-thread.c index 5a201d81049c..fbd76ded9a34 100644 --- a/fs/btrfs/async-thread.c +++ b/fs/btrfs/async-thread.c | |||
| @@ -22,7 +22,6 @@ | |||
| 22 | #include <linux/list.h> | 22 | #include <linux/list.h> |
| 23 | #include <linux/spinlock.h> | 23 | #include <linux/spinlock.h> |
| 24 | #include <linux/freezer.h> | 24 | #include <linux/freezer.h> |
| 25 | #include <linux/workqueue.h> | ||
| 26 | #include "async-thread.h" | 25 | #include "async-thread.h" |
| 27 | #include "ctree.h" | 26 | #include "ctree.h" |
| 28 | 27 | ||
| @@ -55,8 +54,39 @@ struct btrfs_workqueue { | |||
| 55 | struct __btrfs_workqueue *high; | 54 | struct __btrfs_workqueue *high; |
| 56 | }; | 55 | }; |
| 57 | 56 | ||
| 58 | static inline struct __btrfs_workqueue | 57 | static void normal_work_helper(struct btrfs_work *work); |
| 59 | *__btrfs_alloc_workqueue(const char *name, int flags, int max_active, | 58 | |
| 59 | #define BTRFS_WORK_HELPER(name) \ | ||
| 60 | void btrfs_##name(struct work_struct *arg) \ | ||
| 61 | { \ | ||
| 62 | struct btrfs_work *work = container_of(arg, struct btrfs_work, \ | ||
| 63 | normal_work); \ | ||
| 64 | normal_work_helper(work); \ | ||
| 65 | } | ||
| 66 | |||
| 67 | BTRFS_WORK_HELPER(worker_helper); | ||
| 68 | BTRFS_WORK_HELPER(delalloc_helper); | ||
| 69 | BTRFS_WORK_HELPER(flush_delalloc_helper); | ||
| 70 | BTRFS_WORK_HELPER(cache_helper); | ||
| 71 | BTRFS_WORK_HELPER(submit_helper); | ||
| 72 | BTRFS_WORK_HELPER(fixup_helper); | ||
| 73 | BTRFS_WORK_HELPER(endio_helper); | ||
| 74 | BTRFS_WORK_HELPER(endio_meta_helper); | ||
| 75 | BTRFS_WORK_HELPER(endio_meta_write_helper); | ||
| 76 | BTRFS_WORK_HELPER(endio_raid56_helper); | ||
| 77 | BTRFS_WORK_HELPER(rmw_helper); | ||
| 78 | BTRFS_WORK_HELPER(endio_write_helper); | ||
| 79 | BTRFS_WORK_HELPER(freespace_write_helper); | ||
| 80 | BTRFS_WORK_HELPER(delayed_meta_helper); | ||
| 81 | BTRFS_WORK_HELPER(readahead_helper); | ||
| 82 | BTRFS_WORK_HELPER(qgroup_rescan_helper); | ||
| 83 | BTRFS_WORK_HELPER(extent_refs_helper); | ||
| 84 | BTRFS_WORK_HELPER(scrub_helper); | ||
| 85 | BTRFS_WORK_HELPER(scrubwrc_helper); | ||
| 86 | BTRFS_WORK_HELPER(scrubnc_helper); | ||
| 87 | |||
| 88 | static struct __btrfs_workqueue * | ||
| 89 | __btrfs_alloc_workqueue(const char *name, int flags, int max_active, | ||
| 60 | int thresh) | 90 | int thresh) |
| 61 | { | 91 | { |
| 62 | struct __btrfs_workqueue *ret = kzalloc(sizeof(*ret), GFP_NOFS); | 92 | struct __btrfs_workqueue *ret = kzalloc(sizeof(*ret), GFP_NOFS); |
| @@ -232,13 +262,11 @@ static void run_ordered_work(struct __btrfs_workqueue *wq) | |||
| 232 | spin_unlock_irqrestore(lock, flags); | 262 | spin_unlock_irqrestore(lock, flags); |
| 233 | } | 263 | } |
| 234 | 264 | ||
| 235 | static void normal_work_helper(struct work_struct *arg) | 265 | static void normal_work_helper(struct btrfs_work *work) |
| 236 | { | 266 | { |
| 237 | struct btrfs_work *work; | ||
| 238 | struct __btrfs_workqueue *wq; | 267 | struct __btrfs_workqueue *wq; |
| 239 | int need_order = 0; | 268 | int need_order = 0; |
| 240 | 269 | ||
| 241 | work = container_of(arg, struct btrfs_work, normal_work); | ||
| 242 | /* | 270 | /* |
| 243 | * We should not touch things inside work in the following cases: | 271 | * We should not touch things inside work in the following cases: |
| 244 | * 1) after work->func() if it has no ordered_free | 272 | * 1) after work->func() if it has no ordered_free |
| @@ -262,7 +290,7 @@ static void normal_work_helper(struct work_struct *arg) | |||
| 262 | trace_btrfs_all_work_done(work); | 290 | trace_btrfs_all_work_done(work); |
| 263 | } | 291 | } |
| 264 | 292 | ||
| 265 | void btrfs_init_work(struct btrfs_work *work, | 293 | void btrfs_init_work(struct btrfs_work *work, btrfs_work_func_t uniq_func, |
| 266 | btrfs_func_t func, | 294 | btrfs_func_t func, |
| 267 | btrfs_func_t ordered_func, | 295 | btrfs_func_t ordered_func, |
| 268 | btrfs_func_t ordered_free) | 296 | btrfs_func_t ordered_free) |
| @@ -270,7 +298,7 @@ void btrfs_init_work(struct btrfs_work *work, | |||
| 270 | work->func = func; | 298 | work->func = func; |
| 271 | work->ordered_func = ordered_func; | 299 | work->ordered_func = ordered_func; |
| 272 | work->ordered_free = ordered_free; | 300 | work->ordered_free = ordered_free; |
| 273 | INIT_WORK(&work->normal_work, normal_work_helper); | 301 | INIT_WORK(&work->normal_work, uniq_func); |
| 274 | INIT_LIST_HEAD(&work->ordered_list); | 302 | INIT_LIST_HEAD(&work->ordered_list); |
| 275 | work->flags = 0; | 303 | work->flags = 0; |
| 276 | } | 304 | } |
diff --git a/fs/btrfs/async-thread.h b/fs/btrfs/async-thread.h index 9c6b66d15fb0..e9e31c94758f 100644 --- a/fs/btrfs/async-thread.h +++ b/fs/btrfs/async-thread.h | |||
| @@ -19,12 +19,14 @@ | |||
| 19 | 19 | ||
| 20 | #ifndef __BTRFS_ASYNC_THREAD_ | 20 | #ifndef __BTRFS_ASYNC_THREAD_ |
| 21 | #define __BTRFS_ASYNC_THREAD_ | 21 | #define __BTRFS_ASYNC_THREAD_ |
| 22 | #include <linux/workqueue.h> | ||
| 22 | 23 | ||
| 23 | struct btrfs_workqueue; | 24 | struct btrfs_workqueue; |
| 24 | /* Internal use only */ | 25 | /* Internal use only */ |
| 25 | struct __btrfs_workqueue; | 26 | struct __btrfs_workqueue; |
| 26 | struct btrfs_work; | 27 | struct btrfs_work; |
| 27 | typedef void (*btrfs_func_t)(struct btrfs_work *arg); | 28 | typedef void (*btrfs_func_t)(struct btrfs_work *arg); |
| 29 | typedef void (*btrfs_work_func_t)(struct work_struct *arg); | ||
| 28 | 30 | ||
| 29 | struct btrfs_work { | 31 | struct btrfs_work { |
| 30 | btrfs_func_t func; | 32 | btrfs_func_t func; |
| @@ -38,11 +40,35 @@ struct btrfs_work { | |||
| 38 | unsigned long flags; | 40 | unsigned long flags; |
| 39 | }; | 41 | }; |
| 40 | 42 | ||
| 43 | #define BTRFS_WORK_HELPER_PROTO(name) \ | ||
| 44 | void btrfs_##name(struct work_struct *arg) | ||
| 45 | |||
| 46 | BTRFS_WORK_HELPER_PROTO(worker_helper); | ||
| 47 | BTRFS_WORK_HELPER_PROTO(delalloc_helper); | ||
| 48 | BTRFS_WORK_HELPER_PROTO(flush_delalloc_helper); | ||
| 49 | BTRFS_WORK_HELPER_PROTO(cache_helper); | ||
| 50 | BTRFS_WORK_HELPER_PROTO(submit_helper); | ||
| 51 | BTRFS_WORK_HELPER_PROTO(fixup_helper); | ||
| 52 | BTRFS_WORK_HELPER_PROTO(endio_helper); | ||
| 53 | BTRFS_WORK_HELPER_PROTO(endio_meta_helper); | ||
| 54 | BTRFS_WORK_HELPER_PROTO(endio_meta_write_helper); | ||
| 55 | BTRFS_WORK_HELPER_PROTO(endio_raid56_helper); | ||
| 56 | BTRFS_WORK_HELPER_PROTO(rmw_helper); | ||
| 57 | BTRFS_WORK_HELPER_PROTO(endio_write_helper); | ||
| 58 | BTRFS_WORK_HELPER_PROTO(freespace_write_helper); | ||
| 59 | BTRFS_WORK_HELPER_PROTO(delayed_meta_helper); | ||
| 60 | BTRFS_WORK_HELPER_PROTO(readahead_helper); | ||
| 61 | BTRFS_WORK_HELPER_PROTO(qgroup_rescan_helper); | ||
| 62 | BTRFS_WORK_HELPER_PROTO(extent_refs_helper); | ||
| 63 | BTRFS_WORK_HELPER_PROTO(scrub_helper); | ||
| 64 | BTRFS_WORK_HELPER_PROTO(scrubwrc_helper); | ||
| 65 | BTRFS_WORK_HELPER_PROTO(scrubnc_helper); | ||
| 66 | |||
| 41 | struct btrfs_workqueue *btrfs_alloc_workqueue(const char *name, | 67 | struct btrfs_workqueue *btrfs_alloc_workqueue(const char *name, |
| 42 | int flags, | 68 | int flags, |
| 43 | int max_active, | 69 | int max_active, |
| 44 | int thresh); | 70 | int thresh); |
| 45 | void btrfs_init_work(struct btrfs_work *work, | 71 | void btrfs_init_work(struct btrfs_work *work, btrfs_work_func_t helper, |
| 46 | btrfs_func_t func, | 72 | btrfs_func_t func, |
| 47 | btrfs_func_t ordered_func, | 73 | btrfs_func_t ordered_func, |
| 48 | btrfs_func_t ordered_free); | 74 | btrfs_func_t ordered_free); |
diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c index da775bfdebc9..a2e90f855d7d 100644 --- a/fs/btrfs/delayed-inode.c +++ b/fs/btrfs/delayed-inode.c | |||
| @@ -1395,8 +1395,8 @@ static int btrfs_wq_run_delayed_node(struct btrfs_delayed_root *delayed_root, | |||
| 1395 | return -ENOMEM; | 1395 | return -ENOMEM; |
| 1396 | 1396 | ||
| 1397 | async_work->delayed_root = delayed_root; | 1397 | async_work->delayed_root = delayed_root; |
| 1398 | btrfs_init_work(&async_work->work, btrfs_async_run_delayed_root, | 1398 | btrfs_init_work(&async_work->work, btrfs_delayed_meta_helper, |
| 1399 | NULL, NULL); | 1399 | btrfs_async_run_delayed_root, NULL, NULL); |
| 1400 | async_work->nr = nr; | 1400 | async_work->nr = nr; |
| 1401 | 1401 | ||
| 1402 | btrfs_queue_work(root->fs_info->delayed_workers, &async_work->work); | 1402 | btrfs_queue_work(root->fs_info->delayed_workers, &async_work->work); |
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index d0ed9e664f7d..a1d36e62179c 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c | |||
| @@ -39,7 +39,6 @@ | |||
| 39 | #include "btrfs_inode.h" | 39 | #include "btrfs_inode.h" |
| 40 | #include "volumes.h" | 40 | #include "volumes.h" |
| 41 | #include "print-tree.h" | 41 | #include "print-tree.h" |
| 42 | #include "async-thread.h" | ||
| 43 | #include "locking.h" | 42 | #include "locking.h" |
| 44 | #include "tree-log.h" | 43 | #include "tree-log.h" |
| 45 | #include "free-space-cache.h" | 44 | #include "free-space-cache.h" |
| @@ -693,35 +692,41 @@ static void end_workqueue_bio(struct bio *bio, int err) | |||
| 693 | { | 692 | { |
| 694 | struct end_io_wq *end_io_wq = bio->bi_private; | 693 | struct end_io_wq *end_io_wq = bio->bi_private; |
| 695 | struct btrfs_fs_info *fs_info; | 694 | struct btrfs_fs_info *fs_info; |
| 695 | struct btrfs_workqueue *wq; | ||
| 696 | btrfs_work_func_t func; | ||
| 696 | 697 | ||
| 697 | fs_info = end_io_wq->info; | 698 | fs_info = end_io_wq->info; |
| 698 | end_io_wq->error = err; | 699 | end_io_wq->error = err; |
| 699 | btrfs_init_work(&end_io_wq->work, end_workqueue_fn, NULL, NULL); | ||
| 700 | 700 | ||
| 701 | if (bio->bi_rw & REQ_WRITE) { | 701 | if (bio->bi_rw & REQ_WRITE) { |
| 702 | if (end_io_wq->metadata == BTRFS_WQ_ENDIO_METADATA) | 702 | if (end_io_wq->metadata == BTRFS_WQ_ENDIO_METADATA) { |
| 703 | btrfs_queue_work(fs_info->endio_meta_write_workers, | 703 | wq = fs_info->endio_meta_write_workers; |
| 704 | &end_io_wq->work); | 704 | func = btrfs_endio_meta_write_helper; |
| 705 | else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_FREE_SPACE) | 705 | } else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_FREE_SPACE) { |
| 706 | btrfs_queue_work(fs_info->endio_freespace_worker, | 706 | wq = fs_info->endio_freespace_worker; |
| 707 | &end_io_wq->work); | 707 | func = btrfs_freespace_write_helper; |
| 708 | else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_RAID56) | 708 | } else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_RAID56) { |
| 709 | btrfs_queue_work(fs_info->endio_raid56_workers, | 709 | wq = fs_info->endio_raid56_workers; |
| 710 | &end_io_wq->work); | 710 | func = btrfs_endio_raid56_helper; |
| 711 | else | 711 | } else { |
| 712 | btrfs_queue_work(fs_info->endio_write_workers, | 712 | wq = fs_info->endio_write_workers; |
| 713 | &end_io_wq->work); | 713 | func = btrfs_endio_write_helper; |
| 714 | } | ||
| 714 | } else { | 715 | } else { |
| 715 | if (end_io_wq->metadata == BTRFS_WQ_ENDIO_RAID56) | 716 | if (end_io_wq->metadata == BTRFS_WQ_ENDIO_RAID56) { |
| 716 | btrfs_queue_work(fs_info->endio_raid56_workers, | 717 | wq = fs_info->endio_raid56_workers; |
| 717 | &end_io_wq->work); | 718 | func = btrfs_endio_raid56_helper; |
| 718 | else if (end_io_wq->metadata) | 719 | } else if (end_io_wq->metadata) { |
| 719 | btrfs_queue_work(fs_info->endio_meta_workers, | 720 | wq = fs_info->endio_meta_workers; |
| 720 | &end_io_wq->work); | 721 | func = btrfs_endio_meta_helper; |
| 721 | else | 722 | } else { |
| 722 | btrfs_queue_work(fs_info->endio_workers, | 723 | wq = fs_info->endio_workers; |
| 723 | &end_io_wq->work); | 724 | func = btrfs_endio_helper; |
| 725 | } | ||
| 724 | } | 726 | } |
| 727 | |||
| 728 | btrfs_init_work(&end_io_wq->work, func, end_workqueue_fn, NULL, NULL); | ||
| 729 | btrfs_queue_work(wq, &end_io_wq->work); | ||
| 725 | } | 730 | } |
| 726 | 731 | ||
| 727 | /* | 732 | /* |
| @@ -828,7 +833,7 @@ int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct inode *inode, | |||
| 828 | async->submit_bio_start = submit_bio_start; | 833 | async->submit_bio_start = submit_bio_start; |
| 829 | async->submit_bio_done = submit_bio_done; | 834 | async->submit_bio_done = submit_bio_done; |
| 830 | 835 | ||
| 831 | btrfs_init_work(&async->work, run_one_async_start, | 836 | btrfs_init_work(&async->work, btrfs_worker_helper, run_one_async_start, |
| 832 | run_one_async_done, run_one_async_free); | 837 | run_one_async_done, run_one_async_free); |
| 833 | 838 | ||
| 834 | async->bio_flags = bio_flags; | 839 | async->bio_flags = bio_flags; |
| @@ -3450,7 +3455,8 @@ static int write_all_supers(struct btrfs_root *root, int max_mirrors) | |||
| 3450 | btrfs_set_stack_device_generation(dev_item, 0); | 3455 | btrfs_set_stack_device_generation(dev_item, 0); |
| 3451 | btrfs_set_stack_device_type(dev_item, dev->type); | 3456 | btrfs_set_stack_device_type(dev_item, dev->type); |
| 3452 | btrfs_set_stack_device_id(dev_item, dev->devid); | 3457 | btrfs_set_stack_device_id(dev_item, dev->devid); |
| 3453 | btrfs_set_stack_device_total_bytes(dev_item, dev->total_bytes); | 3458 | btrfs_set_stack_device_total_bytes(dev_item, |
| 3459 | dev->disk_total_bytes); | ||
| 3454 | btrfs_set_stack_device_bytes_used(dev_item, dev->bytes_used); | 3460 | btrfs_set_stack_device_bytes_used(dev_item, dev->bytes_used); |
| 3455 | btrfs_set_stack_device_io_align(dev_item, dev->io_align); | 3461 | btrfs_set_stack_device_io_align(dev_item, dev->io_align); |
| 3456 | btrfs_set_stack_device_io_width(dev_item, dev->io_width); | 3462 | btrfs_set_stack_device_io_width(dev_item, dev->io_width); |
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 102ed3143976..3efe1c3877bf 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c | |||
| @@ -552,7 +552,8 @@ static int cache_block_group(struct btrfs_block_group_cache *cache, | |||
| 552 | caching_ctl->block_group = cache; | 552 | caching_ctl->block_group = cache; |
| 553 | caching_ctl->progress = cache->key.objectid; | 553 | caching_ctl->progress = cache->key.objectid; |
| 554 | atomic_set(&caching_ctl->count, 1); | 554 | atomic_set(&caching_ctl->count, 1); |
| 555 | btrfs_init_work(&caching_ctl->work, caching_thread, NULL, NULL); | 555 | btrfs_init_work(&caching_ctl->work, btrfs_cache_helper, |
| 556 | caching_thread, NULL, NULL); | ||
| 556 | 557 | ||
| 557 | spin_lock(&cache->lock); | 558 | spin_lock(&cache->lock); |
| 558 | /* | 559 | /* |
| @@ -2749,8 +2750,8 @@ int btrfs_async_run_delayed_refs(struct btrfs_root *root, | |||
| 2749 | async->sync = 0; | 2750 | async->sync = 0; |
| 2750 | init_completion(&async->wait); | 2751 | init_completion(&async->wait); |
| 2751 | 2752 | ||
| 2752 | btrfs_init_work(&async->work, delayed_ref_async_start, | 2753 | btrfs_init_work(&async->work, btrfs_extent_refs_helper, |
| 2753 | NULL, NULL); | 2754 | delayed_ref_async_start, NULL, NULL); |
| 2754 | 2755 | ||
| 2755 | btrfs_queue_work(root->fs_info->extent_workers, &async->work); | 2756 | btrfs_queue_work(root->fs_info->extent_workers, &async->work); |
| 2756 | 2757 | ||
| @@ -3586,13 +3587,7 @@ static u64 get_restripe_target(struct btrfs_fs_info *fs_info, u64 flags) | |||
| 3586 | */ | 3587 | */ |
| 3587 | static u64 btrfs_reduce_alloc_profile(struct btrfs_root *root, u64 flags) | 3588 | static u64 btrfs_reduce_alloc_profile(struct btrfs_root *root, u64 flags) |
| 3588 | { | 3589 | { |
| 3589 | /* | 3590 | u64 num_devices = root->fs_info->fs_devices->rw_devices; |
| 3590 | * we add in the count of missing devices because we want | ||
| 3591 | * to make sure that any RAID levels on a degraded FS | ||
| 3592 | * continue to be honored. | ||
| 3593 | */ | ||
| 3594 | u64 num_devices = root->fs_info->fs_devices->rw_devices + | ||
| 3595 | root->fs_info->fs_devices->missing_devices; | ||
| 3596 | u64 target; | 3591 | u64 target; |
| 3597 | u64 tmp; | 3592 | u64 tmp; |
| 3598 | 3593 | ||
| @@ -8440,13 +8435,7 @@ static u64 update_block_group_flags(struct btrfs_root *root, u64 flags) | |||
| 8440 | if (stripped) | 8435 | if (stripped) |
| 8441 | return extended_to_chunk(stripped); | 8436 | return extended_to_chunk(stripped); |
| 8442 | 8437 | ||
| 8443 | /* | 8438 | num_devices = root->fs_info->fs_devices->rw_devices; |
| 8444 | * we add in the count of missing devices because we want | ||
| 8445 | * to make sure that any RAID levels on a degraded FS | ||
| 8446 | * continue to be honored. | ||
| 8447 | */ | ||
| 8448 | num_devices = root->fs_info->fs_devices->rw_devices + | ||
| 8449 | root->fs_info->fs_devices->missing_devices; | ||
| 8450 | 8439 | ||
| 8451 | stripped = BTRFS_BLOCK_GROUP_RAID0 | | 8440 | stripped = BTRFS_BLOCK_GROUP_RAID0 | |
| 8452 | BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6 | | 8441 | BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6 | |
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 3e11aab9f391..af0359dcf337 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c | |||
| @@ -2532,6 +2532,7 @@ static void end_bio_extent_readpage(struct bio *bio, int err) | |||
| 2532 | test_bit(BIO_UPTODATE, &bio->bi_flags); | 2532 | test_bit(BIO_UPTODATE, &bio->bi_flags); |
| 2533 | if (err) | 2533 | if (err) |
| 2534 | uptodate = 0; | 2534 | uptodate = 0; |
| 2535 | offset += len; | ||
| 2535 | continue; | 2536 | continue; |
| 2536 | } | 2537 | } |
| 2537 | } | 2538 | } |
| @@ -4207,8 +4208,8 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, | |||
| 4207 | return -ENOMEM; | 4208 | return -ENOMEM; |
| 4208 | path->leave_spinning = 1; | 4209 | path->leave_spinning = 1; |
| 4209 | 4210 | ||
| 4210 | start = ALIGN(start, BTRFS_I(inode)->root->sectorsize); | 4211 | start = round_down(start, BTRFS_I(inode)->root->sectorsize); |
| 4211 | len = ALIGN(len, BTRFS_I(inode)->root->sectorsize); | 4212 | len = round_up(max, BTRFS_I(inode)->root->sectorsize) - start; |
| 4212 | 4213 | ||
| 4213 | /* | 4214 | /* |
| 4214 | * lookup the last file extent. We're not using i_size here | 4215 | * lookup the last file extent. We're not using i_size here |
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index d3afac292d67..36861b7a6757 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c | |||
| @@ -1840,7 +1840,15 @@ int btrfs_release_file(struct inode *inode, struct file *filp) | |||
| 1840 | { | 1840 | { |
| 1841 | if (filp->private_data) | 1841 | if (filp->private_data) |
| 1842 | btrfs_ioctl_trans_end(filp); | 1842 | btrfs_ioctl_trans_end(filp); |
| 1843 | filemap_flush(inode->i_mapping); | 1843 | /* |
| 1844 | * ordered_data_close is set by settattr when we are about to truncate | ||
| 1845 | * a file from a non-zero size to a zero size. This tries to | ||
| 1846 | * flush down new bytes that may have been written if the | ||
| 1847 | * application were using truncate to replace a file in place. | ||
| 1848 | */ | ||
| 1849 | if (test_and_clear_bit(BTRFS_INODE_ORDERED_DATA_CLOSE, | ||
| 1850 | &BTRFS_I(inode)->runtime_flags)) | ||
| 1851 | filemap_flush(inode->i_mapping); | ||
| 1844 | return 0; | 1852 | return 0; |
| 1845 | } | 1853 | } |
| 1846 | 1854 | ||
| @@ -2088,10 +2096,9 @@ static int fill_holes(struct btrfs_trans_handle *trans, struct inode *inode, | |||
| 2088 | goto out; | 2096 | goto out; |
| 2089 | } | 2097 | } |
| 2090 | 2098 | ||
| 2091 | if (hole_mergeable(inode, leaf, path->slots[0]+1, offset, end)) { | 2099 | if (hole_mergeable(inode, leaf, path->slots[0], offset, end)) { |
| 2092 | u64 num_bytes; | 2100 | u64 num_bytes; |
| 2093 | 2101 | ||
| 2094 | path->slots[0]++; | ||
| 2095 | key.offset = offset; | 2102 | key.offset = offset; |
| 2096 | btrfs_set_item_key_safe(root, path, &key); | 2103 | btrfs_set_item_key_safe(root, path, &key); |
| 2097 | fi = btrfs_item_ptr(leaf, path->slots[0], | 2104 | fi = btrfs_item_ptr(leaf, path->slots[0], |
| @@ -2216,7 +2223,7 @@ static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len) | |||
| 2216 | goto out_only_mutex; | 2223 | goto out_only_mutex; |
| 2217 | } | 2224 | } |
| 2218 | 2225 | ||
| 2219 | lockstart = round_up(offset , BTRFS_I(inode)->root->sectorsize); | 2226 | lockstart = round_up(offset, BTRFS_I(inode)->root->sectorsize); |
| 2220 | lockend = round_down(offset + len, | 2227 | lockend = round_down(offset + len, |
| 2221 | BTRFS_I(inode)->root->sectorsize) - 1; | 2228 | BTRFS_I(inode)->root->sectorsize) - 1; |
| 2222 | same_page = ((offset >> PAGE_CACHE_SHIFT) == | 2229 | same_page = ((offset >> PAGE_CACHE_SHIFT) == |
| @@ -2277,7 +2284,7 @@ static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len) | |||
| 2277 | tail_start + tail_len, 0, 1); | 2284 | tail_start + tail_len, 0, 1); |
| 2278 | if (ret) | 2285 | if (ret) |
| 2279 | goto out_only_mutex; | 2286 | goto out_only_mutex; |
| 2280 | } | 2287 | } |
| 2281 | } | 2288 | } |
| 2282 | } | 2289 | } |
| 2283 | 2290 | ||
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 03708ef3deef..9c194bd74d6e 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c | |||
| @@ -1096,8 +1096,10 @@ static int cow_file_range_async(struct inode *inode, struct page *locked_page, | |||
| 1096 | async_cow->end = cur_end; | 1096 | async_cow->end = cur_end; |
| 1097 | INIT_LIST_HEAD(&async_cow->extents); | 1097 | INIT_LIST_HEAD(&async_cow->extents); |
| 1098 | 1098 | ||
| 1099 | btrfs_init_work(&async_cow->work, async_cow_start, | 1099 | btrfs_init_work(&async_cow->work, |
| 1100 | async_cow_submit, async_cow_free); | 1100 | btrfs_delalloc_helper, |
| 1101 | async_cow_start, async_cow_submit, | ||
| 1102 | async_cow_free); | ||
| 1101 | 1103 | ||
| 1102 | nr_pages = (cur_end - start + PAGE_CACHE_SIZE) >> | 1104 | nr_pages = (cur_end - start + PAGE_CACHE_SIZE) >> |
| 1103 | PAGE_CACHE_SHIFT; | 1105 | PAGE_CACHE_SHIFT; |
| @@ -1881,7 +1883,8 @@ static int btrfs_writepage_start_hook(struct page *page, u64 start, u64 end) | |||
| 1881 | 1883 | ||
| 1882 | SetPageChecked(page); | 1884 | SetPageChecked(page); |
| 1883 | page_cache_get(page); | 1885 | page_cache_get(page); |
| 1884 | btrfs_init_work(&fixup->work, btrfs_writepage_fixup_worker, NULL, NULL); | 1886 | btrfs_init_work(&fixup->work, btrfs_fixup_helper, |
| 1887 | btrfs_writepage_fixup_worker, NULL, NULL); | ||
| 1885 | fixup->page = page; | 1888 | fixup->page = page; |
| 1886 | btrfs_queue_work(root->fs_info->fixup_workers, &fixup->work); | 1889 | btrfs_queue_work(root->fs_info->fixup_workers, &fixup->work); |
| 1887 | return -EBUSY; | 1890 | return -EBUSY; |
| @@ -2822,7 +2825,8 @@ static int btrfs_writepage_end_io_hook(struct page *page, u64 start, u64 end, | |||
| 2822 | struct inode *inode = page->mapping->host; | 2825 | struct inode *inode = page->mapping->host; |
| 2823 | struct btrfs_root *root = BTRFS_I(inode)->root; | 2826 | struct btrfs_root *root = BTRFS_I(inode)->root; |
| 2824 | struct btrfs_ordered_extent *ordered_extent = NULL; | 2827 | struct btrfs_ordered_extent *ordered_extent = NULL; |
| 2825 | struct btrfs_workqueue *workers; | 2828 | struct btrfs_workqueue *wq; |
| 2829 | btrfs_work_func_t func; | ||
| 2826 | 2830 | ||
| 2827 | trace_btrfs_writepage_end_io_hook(page, start, end, uptodate); | 2831 | trace_btrfs_writepage_end_io_hook(page, start, end, uptodate); |
| 2828 | 2832 | ||
| @@ -2831,13 +2835,17 @@ static int btrfs_writepage_end_io_hook(struct page *page, u64 start, u64 end, | |||
| 2831 | end - start + 1, uptodate)) | 2835 | end - start + 1, uptodate)) |
| 2832 | return 0; | 2836 | return 0; |
| 2833 | 2837 | ||
| 2834 | btrfs_init_work(&ordered_extent->work, finish_ordered_fn, NULL, NULL); | 2838 | if (btrfs_is_free_space_inode(inode)) { |
| 2839 | wq = root->fs_info->endio_freespace_worker; | ||
| 2840 | func = btrfs_freespace_write_helper; | ||
| 2841 | } else { | ||
| 2842 | wq = root->fs_info->endio_write_workers; | ||
| 2843 | func = btrfs_endio_write_helper; | ||
| 2844 | } | ||
| 2835 | 2845 | ||
| 2836 | if (btrfs_is_free_space_inode(inode)) | 2846 | btrfs_init_work(&ordered_extent->work, func, finish_ordered_fn, NULL, |
| 2837 | workers = root->fs_info->endio_freespace_worker; | 2847 | NULL); |
| 2838 | else | 2848 | btrfs_queue_work(wq, &ordered_extent->work); |
| 2839 | workers = root->fs_info->endio_write_workers; | ||
| 2840 | btrfs_queue_work(workers, &ordered_extent->work); | ||
| 2841 | 2849 | ||
| 2842 | return 0; | 2850 | return 0; |
| 2843 | } | 2851 | } |
| @@ -4674,6 +4682,11 @@ static void evict_inode_truncate_pages(struct inode *inode) | |||
| 4674 | clear_bit(EXTENT_FLAG_LOGGING, &em->flags); | 4682 | clear_bit(EXTENT_FLAG_LOGGING, &em->flags); |
| 4675 | remove_extent_mapping(map_tree, em); | 4683 | remove_extent_mapping(map_tree, em); |
| 4676 | free_extent_map(em); | 4684 | free_extent_map(em); |
| 4685 | if (need_resched()) { | ||
| 4686 | write_unlock(&map_tree->lock); | ||
| 4687 | cond_resched(); | ||
| 4688 | write_lock(&map_tree->lock); | ||
| 4689 | } | ||
| 4677 | } | 4690 | } |
| 4678 | write_unlock(&map_tree->lock); | 4691 | write_unlock(&map_tree->lock); |
| 4679 | 4692 | ||
| @@ -4696,6 +4709,7 @@ static void evict_inode_truncate_pages(struct inode *inode) | |||
| 4696 | &cached_state, GFP_NOFS); | 4709 | &cached_state, GFP_NOFS); |
| 4697 | free_extent_state(state); | 4710 | free_extent_state(state); |
| 4698 | 4711 | ||
| 4712 | cond_resched(); | ||
| 4699 | spin_lock(&io_tree->lock); | 4713 | spin_lock(&io_tree->lock); |
| 4700 | } | 4714 | } |
| 4701 | spin_unlock(&io_tree->lock); | 4715 | spin_unlock(&io_tree->lock); |
| @@ -5181,6 +5195,42 @@ struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry) | |||
| 5181 | iput(inode); | 5195 | iput(inode); |
| 5182 | inode = ERR_PTR(ret); | 5196 | inode = ERR_PTR(ret); |
| 5183 | } | 5197 | } |
| 5198 | /* | ||
| 5199 | * If orphan cleanup did remove any orphans, it means the tree | ||
| 5200 | * was modified and therefore the commit root is not the same as | ||
| 5201 | * the current root anymore. This is a problem, because send | ||
| 5202 | * uses the commit root and therefore can see inode items that | ||
| 5203 | * don't exist in the current root anymore, and for example make | ||
| 5204 | * calls to btrfs_iget, which will do tree lookups based on the | ||
| 5205 | * current root and not on the commit root. Those lookups will | ||
| 5206 | * fail, returning a -ESTALE error, and making send fail with | ||
| 5207 | * that error. So make sure a send does not see any orphans we | ||
| 5208 | * have just removed, and that it will see the same inodes | ||
| 5209 | * regardless of whether a transaction commit happened before | ||
| 5210 | * it started (meaning that the commit root will be the same as | ||
| 5211 | * the current root) or not. | ||
| 5212 | */ | ||
| 5213 | if (sub_root->node != sub_root->commit_root) { | ||
| 5214 | u64 sub_flags = btrfs_root_flags(&sub_root->root_item); | ||
| 5215 | |||
| 5216 | if (sub_flags & BTRFS_ROOT_SUBVOL_RDONLY) { | ||
| 5217 | struct extent_buffer *eb; | ||
| 5218 | |||
| 5219 | /* | ||
| 5220 | * Assert we can't have races between dentry | ||
| 5221 | * lookup called through the snapshot creation | ||
| 5222 | * ioctl and the VFS. | ||
| 5223 | */ | ||
| 5224 | ASSERT(mutex_is_locked(&dir->i_mutex)); | ||
| 5225 | |||
| 5226 | down_write(&root->fs_info->commit_root_sem); | ||
| 5227 | eb = sub_root->commit_root; | ||
| 5228 | sub_root->commit_root = | ||
| 5229 | btrfs_root_node(sub_root); | ||
| 5230 | up_write(&root->fs_info->commit_root_sem); | ||
| 5231 | free_extent_buffer(eb); | ||
| 5232 | } | ||
| 5233 | } | ||
| 5184 | } | 5234 | } |
| 5185 | 5235 | ||
| 5186 | return inode; | 5236 | return inode; |
| @@ -5606,6 +5656,13 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans, | |||
| 5606 | } | 5656 | } |
| 5607 | 5657 | ||
| 5608 | /* | 5658 | /* |
| 5659 | * O_TMPFILE, set link count to 0, so that after this point, | ||
| 5660 | * we fill in an inode item with the correct link count. | ||
| 5661 | */ | ||
| 5662 | if (!name) | ||
| 5663 | set_nlink(inode, 0); | ||
| 5664 | |||
| 5665 | /* | ||
| 5609 | * we have to initialize this early, so we can reclaim the inode | 5666 | * we have to initialize this early, so we can reclaim the inode |
| 5610 | * number if we fail afterwards in this function. | 5667 | * number if we fail afterwards in this function. |
| 5611 | */ | 5668 | */ |
| @@ -6097,14 +6154,14 @@ out_fail: | |||
| 6097 | static int merge_extent_mapping(struct extent_map_tree *em_tree, | 6154 | static int merge_extent_mapping(struct extent_map_tree *em_tree, |
| 6098 | struct extent_map *existing, | 6155 | struct extent_map *existing, |
| 6099 | struct extent_map *em, | 6156 | struct extent_map *em, |
| 6100 | u64 map_start, u64 map_len) | 6157 | u64 map_start) |
| 6101 | { | 6158 | { |
| 6102 | u64 start_diff; | 6159 | u64 start_diff; |
| 6103 | 6160 | ||
| 6104 | BUG_ON(map_start < em->start || map_start >= extent_map_end(em)); | 6161 | BUG_ON(map_start < em->start || map_start >= extent_map_end(em)); |
| 6105 | start_diff = map_start - em->start; | 6162 | start_diff = map_start - em->start; |
| 6106 | em->start = map_start; | 6163 | em->start = map_start; |
| 6107 | em->len = map_len; | 6164 | em->len = existing->start - em->start; |
| 6108 | if (em->block_start < EXTENT_MAP_LAST_BYTE && | 6165 | if (em->block_start < EXTENT_MAP_LAST_BYTE && |
| 6109 | !test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) { | 6166 | !test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) { |
| 6110 | em->block_start += start_diff; | 6167 | em->block_start += start_diff; |
| @@ -6275,6 +6332,8 @@ next: | |||
| 6275 | goto not_found; | 6332 | goto not_found; |
| 6276 | if (start + len <= found_key.offset) | 6333 | if (start + len <= found_key.offset) |
| 6277 | goto not_found; | 6334 | goto not_found; |
| 6335 | if (start > found_key.offset) | ||
| 6336 | goto next; | ||
| 6278 | em->start = start; | 6337 | em->start = start; |
| 6279 | em->orig_start = start; | 6338 | em->orig_start = start; |
| 6280 | em->len = found_key.offset - start; | 6339 | em->len = found_key.offset - start; |
| @@ -6390,8 +6449,7 @@ insert: | |||
| 6390 | em->len); | 6449 | em->len); |
| 6391 | if (existing) { | 6450 | if (existing) { |
| 6392 | err = merge_extent_mapping(em_tree, existing, | 6451 | err = merge_extent_mapping(em_tree, existing, |
| 6393 | em, start, | 6452 | em, start); |
| 6394 | root->sectorsize); | ||
| 6395 | free_extent_map(existing); | 6453 | free_extent_map(existing); |
| 6396 | if (err) { | 6454 | if (err) { |
| 6397 | free_extent_map(em); | 6455 | free_extent_map(em); |
| @@ -7158,7 +7216,8 @@ again: | |||
| 7158 | if (!ret) | 7216 | if (!ret) |
| 7159 | goto out_test; | 7217 | goto out_test; |
| 7160 | 7218 | ||
| 7161 | btrfs_init_work(&ordered->work, finish_ordered_fn, NULL, NULL); | 7219 | btrfs_init_work(&ordered->work, btrfs_endio_write_helper, |
| 7220 | finish_ordered_fn, NULL, NULL); | ||
| 7162 | btrfs_queue_work(root->fs_info->endio_write_workers, | 7221 | btrfs_queue_work(root->fs_info->endio_write_workers, |
| 7163 | &ordered->work); | 7222 | &ordered->work); |
| 7164 | out_test: | 7223 | out_test: |
| @@ -7306,10 +7365,8 @@ static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip, | |||
| 7306 | map_length = orig_bio->bi_iter.bi_size; | 7365 | map_length = orig_bio->bi_iter.bi_size; |
| 7307 | ret = btrfs_map_block(root->fs_info, rw, start_sector << 9, | 7366 | ret = btrfs_map_block(root->fs_info, rw, start_sector << 9, |
| 7308 | &map_length, NULL, 0); | 7367 | &map_length, NULL, 0); |
| 7309 | if (ret) { | 7368 | if (ret) |
| 7310 | bio_put(orig_bio); | ||
| 7311 | return -EIO; | 7369 | return -EIO; |
| 7312 | } | ||
| 7313 | 7370 | ||
| 7314 | if (map_length >= orig_bio->bi_iter.bi_size) { | 7371 | if (map_length >= orig_bio->bi_iter.bi_size) { |
| 7315 | bio = orig_bio; | 7372 | bio = orig_bio; |
| @@ -7326,6 +7383,7 @@ static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip, | |||
| 7326 | bio = btrfs_dio_bio_alloc(orig_bio->bi_bdev, start_sector, GFP_NOFS); | 7383 | bio = btrfs_dio_bio_alloc(orig_bio->bi_bdev, start_sector, GFP_NOFS); |
| 7327 | if (!bio) | 7384 | if (!bio) |
| 7328 | return -ENOMEM; | 7385 | return -ENOMEM; |
| 7386 | |||
| 7329 | bio->bi_private = dip; | 7387 | bio->bi_private = dip; |
| 7330 | bio->bi_end_io = btrfs_end_dio_bio; | 7388 | bio->bi_end_io = btrfs_end_dio_bio; |
| 7331 | atomic_inc(&dip->pending_bios); | 7389 | atomic_inc(&dip->pending_bios); |
| @@ -7534,7 +7592,8 @@ static ssize_t btrfs_direct_IO(int rw, struct kiocb *iocb, | |||
| 7534 | count = iov_iter_count(iter); | 7592 | count = iov_iter_count(iter); |
| 7535 | if (test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT, | 7593 | if (test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT, |
| 7536 | &BTRFS_I(inode)->runtime_flags)) | 7594 | &BTRFS_I(inode)->runtime_flags)) |
| 7537 | filemap_fdatawrite_range(inode->i_mapping, offset, count); | 7595 | filemap_fdatawrite_range(inode->i_mapping, offset, |
| 7596 | offset + count - 1); | ||
| 7538 | 7597 | ||
| 7539 | if (rw & WRITE) { | 7598 | if (rw & WRITE) { |
| 7540 | /* | 7599 | /* |
| @@ -8495,7 +8554,9 @@ struct btrfs_delalloc_work *btrfs_alloc_delalloc_work(struct inode *inode, | |||
| 8495 | work->inode = inode; | 8554 | work->inode = inode; |
| 8496 | work->wait = wait; | 8555 | work->wait = wait; |
| 8497 | work->delay_iput = delay_iput; | 8556 | work->delay_iput = delay_iput; |
| 8498 | btrfs_init_work(&work->work, btrfs_run_delalloc_work, NULL, NULL); | 8557 | WARN_ON_ONCE(!inode); |
| 8558 | btrfs_init_work(&work->work, btrfs_flush_delalloc_helper, | ||
| 8559 | btrfs_run_delalloc_work, NULL, NULL); | ||
| 8499 | 8560 | ||
| 8500 | return work; | 8561 | return work; |
| 8501 | } | 8562 | } |
| @@ -8979,6 +9040,14 @@ static int btrfs_tmpfile(struct inode *dir, struct dentry *dentry, umode_t mode) | |||
| 8979 | if (ret) | 9040 | if (ret) |
| 8980 | goto out; | 9041 | goto out; |
| 8981 | 9042 | ||
| 9043 | /* | ||
| 9044 | * We set number of links to 0 in btrfs_new_inode(), and here we set | ||
| 9045 | * it to 1 because d_tmpfile() will issue a warning if the count is 0, | ||
| 9046 | * through: | ||
| 9047 | * | ||
| 9048 | * d_tmpfile() -> inode_dec_link_count() -> drop_nlink() | ||
| 9049 | */ | ||
| 9050 | set_nlink(inode, 1); | ||
| 8982 | d_tmpfile(dentry, inode); | 9051 | d_tmpfile(dentry, inode); |
| 8983 | mark_inode_dirty(inode); | 9052 | mark_inode_dirty(inode); |
| 8984 | 9053 | ||
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index 47aceb494d1d..fce6fd0e3f50 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c | |||
| @@ -711,39 +711,6 @@ static int create_snapshot(struct btrfs_root *root, struct inode *dir, | |||
| 711 | if (ret) | 711 | if (ret) |
| 712 | goto fail; | 712 | goto fail; |
| 713 | 713 | ||
| 714 | ret = btrfs_orphan_cleanup(pending_snapshot->snap); | ||
| 715 | if (ret) | ||
| 716 | goto fail; | ||
| 717 | |||
| 718 | /* | ||
| 719 | * If orphan cleanup did remove any orphans, it means the tree was | ||
| 720 | * modified and therefore the commit root is not the same as the | ||
| 721 | * current root anymore. This is a problem, because send uses the | ||
| 722 | * commit root and therefore can see inode items that don't exist | ||
| 723 | * in the current root anymore, and for example make calls to | ||
| 724 | * btrfs_iget, which will do tree lookups based on the current root | ||
| 725 | * and not on the commit root. Those lookups will fail, returning a | ||
| 726 | * -ESTALE error, and making send fail with that error. So make sure | ||
| 727 | * a send does not see any orphans we have just removed, and that it | ||
| 728 | * will see the same inodes regardless of whether a transaction | ||
| 729 | * commit happened before it started (meaning that the commit root | ||
| 730 | * will be the same as the current root) or not. | ||
| 731 | */ | ||
| 732 | if (readonly && pending_snapshot->snap->node != | ||
| 733 | pending_snapshot->snap->commit_root) { | ||
| 734 | trans = btrfs_join_transaction(pending_snapshot->snap); | ||
| 735 | if (IS_ERR(trans) && PTR_ERR(trans) != -ENOENT) { | ||
| 736 | ret = PTR_ERR(trans); | ||
| 737 | goto fail; | ||
| 738 | } | ||
| 739 | if (!IS_ERR(trans)) { | ||
| 740 | ret = btrfs_commit_transaction(trans, | ||
| 741 | pending_snapshot->snap); | ||
| 742 | if (ret) | ||
| 743 | goto fail; | ||
| 744 | } | ||
| 745 | } | ||
| 746 | |||
| 747 | inode = btrfs_lookup_dentry(dentry->d_parent->d_inode, dentry); | 714 | inode = btrfs_lookup_dentry(dentry->d_parent->d_inode, dentry); |
| 748 | if (IS_ERR(inode)) { | 715 | if (IS_ERR(inode)) { |
| 749 | ret = PTR_ERR(inode); | 716 | ret = PTR_ERR(inode); |
| @@ -3527,7 +3494,8 @@ process_slot: | |||
| 3527 | btrfs_mark_buffer_dirty(leaf); | 3494 | btrfs_mark_buffer_dirty(leaf); |
| 3528 | btrfs_release_path(path); | 3495 | btrfs_release_path(path); |
| 3529 | 3496 | ||
| 3530 | last_dest_end = new_key.offset + datal; | 3497 | last_dest_end = ALIGN(new_key.offset + datal, |
| 3498 | root->sectorsize); | ||
| 3531 | ret = clone_finish_inode_update(trans, inode, | 3499 | ret = clone_finish_inode_update(trans, inode, |
| 3532 | last_dest_end, | 3500 | last_dest_end, |
| 3533 | destoff, olen); | 3501 | destoff, olen); |
diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c index 963895c1f801..ac734ec4cc20 100644 --- a/fs/btrfs/ordered-data.c +++ b/fs/btrfs/ordered-data.c | |||
| @@ -615,6 +615,7 @@ int btrfs_wait_ordered_extents(struct btrfs_root *root, int nr) | |||
| 615 | spin_unlock(&root->ordered_extent_lock); | 615 | spin_unlock(&root->ordered_extent_lock); |
| 616 | 616 | ||
| 617 | btrfs_init_work(&ordered->flush_work, | 617 | btrfs_init_work(&ordered->flush_work, |
| 618 | btrfs_flush_delalloc_helper, | ||
| 618 | btrfs_run_ordered_extent_work, NULL, NULL); | 619 | btrfs_run_ordered_extent_work, NULL, NULL); |
| 619 | list_add_tail(&ordered->work_list, &works); | 620 | list_add_tail(&ordered->work_list, &works); |
| 620 | btrfs_queue_work(root->fs_info->flush_workers, | 621 | btrfs_queue_work(root->fs_info->flush_workers, |
diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c index b497498484be..ded5c601d916 100644 --- a/fs/btrfs/qgroup.c +++ b/fs/btrfs/qgroup.c | |||
| @@ -1973,7 +1973,7 @@ static int qgroup_subtree_accounting(struct btrfs_trans_handle *trans, | |||
| 1973 | elem.seq, &roots); | 1973 | elem.seq, &roots); |
| 1974 | btrfs_put_tree_mod_seq(fs_info, &elem); | 1974 | btrfs_put_tree_mod_seq(fs_info, &elem); |
| 1975 | if (ret < 0) | 1975 | if (ret < 0) |
| 1976 | return ret; | 1976 | goto out; |
| 1977 | 1977 | ||
| 1978 | if (roots->nnodes != 1) | 1978 | if (roots->nnodes != 1) |
| 1979 | goto out; | 1979 | goto out; |
| @@ -2720,6 +2720,7 @@ qgroup_rescan_init(struct btrfs_fs_info *fs_info, u64 progress_objectid, | |||
| 2720 | memset(&fs_info->qgroup_rescan_work, 0, | 2720 | memset(&fs_info->qgroup_rescan_work, 0, |
| 2721 | sizeof(fs_info->qgroup_rescan_work)); | 2721 | sizeof(fs_info->qgroup_rescan_work)); |
| 2722 | btrfs_init_work(&fs_info->qgroup_rescan_work, | 2722 | btrfs_init_work(&fs_info->qgroup_rescan_work, |
| 2723 | btrfs_qgroup_rescan_helper, | ||
| 2723 | btrfs_qgroup_rescan_worker, NULL, NULL); | 2724 | btrfs_qgroup_rescan_worker, NULL, NULL); |
| 2724 | 2725 | ||
| 2725 | if (ret) { | 2726 | if (ret) { |
diff --git a/fs/btrfs/raid56.c b/fs/btrfs/raid56.c index 4a88f073fdd7..0a6b6e4bcbb9 100644 --- a/fs/btrfs/raid56.c +++ b/fs/btrfs/raid56.c | |||
| @@ -1416,7 +1416,8 @@ cleanup: | |||
| 1416 | 1416 | ||
| 1417 | static void async_rmw_stripe(struct btrfs_raid_bio *rbio) | 1417 | static void async_rmw_stripe(struct btrfs_raid_bio *rbio) |
| 1418 | { | 1418 | { |
| 1419 | btrfs_init_work(&rbio->work, rmw_work, NULL, NULL); | 1419 | btrfs_init_work(&rbio->work, btrfs_rmw_helper, |
| 1420 | rmw_work, NULL, NULL); | ||
| 1420 | 1421 | ||
| 1421 | btrfs_queue_work(rbio->fs_info->rmw_workers, | 1422 | btrfs_queue_work(rbio->fs_info->rmw_workers, |
| 1422 | &rbio->work); | 1423 | &rbio->work); |
| @@ -1424,7 +1425,8 @@ static void async_rmw_stripe(struct btrfs_raid_bio *rbio) | |||
| 1424 | 1425 | ||
| 1425 | static void async_read_rebuild(struct btrfs_raid_bio *rbio) | 1426 | static void async_read_rebuild(struct btrfs_raid_bio *rbio) |
| 1426 | { | 1427 | { |
| 1427 | btrfs_init_work(&rbio->work, read_rebuild_work, NULL, NULL); | 1428 | btrfs_init_work(&rbio->work, btrfs_rmw_helper, |
| 1429 | read_rebuild_work, NULL, NULL); | ||
| 1428 | 1430 | ||
| 1429 | btrfs_queue_work(rbio->fs_info->rmw_workers, | 1431 | btrfs_queue_work(rbio->fs_info->rmw_workers, |
| 1430 | &rbio->work); | 1432 | &rbio->work); |
| @@ -1665,7 +1667,8 @@ static void btrfs_raid_unplug(struct blk_plug_cb *cb, bool from_schedule) | |||
| 1665 | plug = container_of(cb, struct btrfs_plug_cb, cb); | 1667 | plug = container_of(cb, struct btrfs_plug_cb, cb); |
| 1666 | 1668 | ||
| 1667 | if (from_schedule) { | 1669 | if (from_schedule) { |
| 1668 | btrfs_init_work(&plug->work, unplug_work, NULL, NULL); | 1670 | btrfs_init_work(&plug->work, btrfs_rmw_helper, |
| 1671 | unplug_work, NULL, NULL); | ||
| 1669 | btrfs_queue_work(plug->info->rmw_workers, | 1672 | btrfs_queue_work(plug->info->rmw_workers, |
| 1670 | &plug->work); | 1673 | &plug->work); |
| 1671 | return; | 1674 | return; |
diff --git a/fs/btrfs/reada.c b/fs/btrfs/reada.c index 09230cf3a244..20408c6b665a 100644 --- a/fs/btrfs/reada.c +++ b/fs/btrfs/reada.c | |||
| @@ -798,7 +798,8 @@ static void reada_start_machine(struct btrfs_fs_info *fs_info) | |||
| 798 | /* FIXME we cannot handle this properly right now */ | 798 | /* FIXME we cannot handle this properly right now */ |
| 799 | BUG(); | 799 | BUG(); |
| 800 | } | 800 | } |
| 801 | btrfs_init_work(&rmw->work, reada_start_machine_worker, NULL, NULL); | 801 | btrfs_init_work(&rmw->work, btrfs_readahead_helper, |
| 802 | reada_start_machine_worker, NULL, NULL); | ||
| 802 | rmw->fs_info = fs_info; | 803 | rmw->fs_info = fs_info; |
| 803 | 804 | ||
| 804 | btrfs_queue_work(fs_info->readahead_workers, &rmw->work); | 805 | btrfs_queue_work(fs_info->readahead_workers, &rmw->work); |
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c index b6d198f5181e..f4a41f37be22 100644 --- a/fs/btrfs/scrub.c +++ b/fs/btrfs/scrub.c | |||
| @@ -428,8 +428,8 @@ struct scrub_ctx *scrub_setup_ctx(struct btrfs_device *dev, int is_dev_replace) | |||
| 428 | sbio->index = i; | 428 | sbio->index = i; |
| 429 | sbio->sctx = sctx; | 429 | sbio->sctx = sctx; |
| 430 | sbio->page_count = 0; | 430 | sbio->page_count = 0; |
| 431 | btrfs_init_work(&sbio->work, scrub_bio_end_io_worker, | 431 | btrfs_init_work(&sbio->work, btrfs_scrub_helper, |
| 432 | NULL, NULL); | 432 | scrub_bio_end_io_worker, NULL, NULL); |
| 433 | 433 | ||
| 434 | if (i != SCRUB_BIOS_PER_SCTX - 1) | 434 | if (i != SCRUB_BIOS_PER_SCTX - 1) |
| 435 | sctx->bios[i]->next_free = i + 1; | 435 | sctx->bios[i]->next_free = i + 1; |
| @@ -999,8 +999,8 @@ nodatasum_case: | |||
| 999 | fixup_nodatasum->root = fs_info->extent_root; | 999 | fixup_nodatasum->root = fs_info->extent_root; |
| 1000 | fixup_nodatasum->mirror_num = failed_mirror_index + 1; | 1000 | fixup_nodatasum->mirror_num = failed_mirror_index + 1; |
| 1001 | scrub_pending_trans_workers_inc(sctx); | 1001 | scrub_pending_trans_workers_inc(sctx); |
| 1002 | btrfs_init_work(&fixup_nodatasum->work, scrub_fixup_nodatasum, | 1002 | btrfs_init_work(&fixup_nodatasum->work, btrfs_scrub_helper, |
| 1003 | NULL, NULL); | 1003 | scrub_fixup_nodatasum, NULL, NULL); |
| 1004 | btrfs_queue_work(fs_info->scrub_workers, | 1004 | btrfs_queue_work(fs_info->scrub_workers, |
| 1005 | &fixup_nodatasum->work); | 1005 | &fixup_nodatasum->work); |
| 1006 | goto out; | 1006 | goto out; |
| @@ -1616,7 +1616,8 @@ static void scrub_wr_bio_end_io(struct bio *bio, int err) | |||
| 1616 | sbio->err = err; | 1616 | sbio->err = err; |
| 1617 | sbio->bio = bio; | 1617 | sbio->bio = bio; |
| 1618 | 1618 | ||
| 1619 | btrfs_init_work(&sbio->work, scrub_wr_bio_end_io_worker, NULL, NULL); | 1619 | btrfs_init_work(&sbio->work, btrfs_scrubwrc_helper, |
| 1620 | scrub_wr_bio_end_io_worker, NULL, NULL); | ||
| 1620 | btrfs_queue_work(fs_info->scrub_wr_completion_workers, &sbio->work); | 1621 | btrfs_queue_work(fs_info->scrub_wr_completion_workers, &sbio->work); |
| 1621 | } | 1622 | } |
| 1622 | 1623 | ||
| @@ -2904,6 +2905,7 @@ int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start, | |||
| 2904 | struct scrub_ctx *sctx; | 2905 | struct scrub_ctx *sctx; |
| 2905 | int ret; | 2906 | int ret; |
| 2906 | struct btrfs_device *dev; | 2907 | struct btrfs_device *dev; |
| 2908 | struct rcu_string *name; | ||
| 2907 | 2909 | ||
| 2908 | if (btrfs_fs_closing(fs_info)) | 2910 | if (btrfs_fs_closing(fs_info)) |
| 2909 | return -EINVAL; | 2911 | return -EINVAL; |
| @@ -2965,6 +2967,16 @@ int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start, | |||
| 2965 | return -ENODEV; | 2967 | return -ENODEV; |
| 2966 | } | 2968 | } |
| 2967 | 2969 | ||
| 2970 | if (!is_dev_replace && !readonly && !dev->writeable) { | ||
| 2971 | mutex_unlock(&fs_info->fs_devices->device_list_mutex); | ||
| 2972 | rcu_read_lock(); | ||
| 2973 | name = rcu_dereference(dev->name); | ||
| 2974 | btrfs_err(fs_info, "scrub: device %s is not writable", | ||
| 2975 | name->str); | ||
| 2976 | rcu_read_unlock(); | ||
| 2977 | return -EROFS; | ||
| 2978 | } | ||
| 2979 | |||
| 2968 | mutex_lock(&fs_info->scrub_lock); | 2980 | mutex_lock(&fs_info->scrub_lock); |
| 2969 | if (!dev->in_fs_metadata || dev->is_tgtdev_for_dev_replace) { | 2981 | if (!dev->in_fs_metadata || dev->is_tgtdev_for_dev_replace) { |
| 2970 | mutex_unlock(&fs_info->scrub_lock); | 2982 | mutex_unlock(&fs_info->scrub_lock); |
| @@ -3203,7 +3215,8 @@ static int copy_nocow_pages(struct scrub_ctx *sctx, u64 logical, u64 len, | |||
| 3203 | nocow_ctx->len = len; | 3215 | nocow_ctx->len = len; |
| 3204 | nocow_ctx->mirror_num = mirror_num; | 3216 | nocow_ctx->mirror_num = mirror_num; |
| 3205 | nocow_ctx->physical_for_dev_replace = physical_for_dev_replace; | 3217 | nocow_ctx->physical_for_dev_replace = physical_for_dev_replace; |
| 3206 | btrfs_init_work(&nocow_ctx->work, copy_nocow_pages_worker, NULL, NULL); | 3218 | btrfs_init_work(&nocow_ctx->work, btrfs_scrubnc_helper, |
| 3219 | copy_nocow_pages_worker, NULL, NULL); | ||
| 3207 | INIT_LIST_HEAD(&nocow_ctx->inodes); | 3220 | INIT_LIST_HEAD(&nocow_ctx->inodes); |
| 3208 | btrfs_queue_work(fs_info->scrub_nocow_workers, | 3221 | btrfs_queue_work(fs_info->scrub_nocow_workers, |
| 3209 | &nocow_ctx->work); | 3222 | &nocow_ctx->work); |
diff --git a/fs/btrfs/sysfs.c b/fs/btrfs/sysfs.c index 78699364f537..12e53556e214 100644 --- a/fs/btrfs/sysfs.c +++ b/fs/btrfs/sysfs.c | |||
| @@ -614,7 +614,7 @@ int btrfs_kobj_rm_device(struct btrfs_fs_info *fs_info, | |||
| 614 | if (!fs_info->device_dir_kobj) | 614 | if (!fs_info->device_dir_kobj) |
| 615 | return -EINVAL; | 615 | return -EINVAL; |
| 616 | 616 | ||
| 617 | if (one_device) { | 617 | if (one_device && one_device->bdev) { |
| 618 | disk = one_device->bdev->bd_part; | 618 | disk = one_device->bdev->bd_part; |
| 619 | disk_kobj = &part_to_dev(disk)->kobj; | 619 | disk_kobj = &part_to_dev(disk)->kobj; |
| 620 | 620 | ||
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c index 9e1f2cd5e67a..7e0e6e3029dd 100644 --- a/fs/btrfs/tree-log.c +++ b/fs/btrfs/tree-log.c | |||
| @@ -3298,7 +3298,7 @@ static noinline int copy_items(struct btrfs_trans_handle *trans, | |||
| 3298 | struct list_head ordered_sums; | 3298 | struct list_head ordered_sums; |
| 3299 | int skip_csum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM; | 3299 | int skip_csum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM; |
| 3300 | bool has_extents = false; | 3300 | bool has_extents = false; |
| 3301 | bool need_find_last_extent = (*last_extent == 0); | 3301 | bool need_find_last_extent = true; |
| 3302 | bool done = false; | 3302 | bool done = false; |
| 3303 | 3303 | ||
| 3304 | INIT_LIST_HEAD(&ordered_sums); | 3304 | INIT_LIST_HEAD(&ordered_sums); |
| @@ -3352,8 +3352,7 @@ static noinline int copy_items(struct btrfs_trans_handle *trans, | |||
| 3352 | */ | 3352 | */ |
| 3353 | if (ins_keys[i].type == BTRFS_EXTENT_DATA_KEY) { | 3353 | if (ins_keys[i].type == BTRFS_EXTENT_DATA_KEY) { |
| 3354 | has_extents = true; | 3354 | has_extents = true; |
| 3355 | if (need_find_last_extent && | 3355 | if (first_key.objectid == (u64)-1) |
| 3356 | first_key.objectid == (u64)-1) | ||
| 3357 | first_key = ins_keys[i]; | 3356 | first_key = ins_keys[i]; |
| 3358 | } else { | 3357 | } else { |
| 3359 | need_find_last_extent = false; | 3358 | need_find_last_extent = false; |
| @@ -3427,6 +3426,16 @@ static noinline int copy_items(struct btrfs_trans_handle *trans, | |||
| 3427 | if (!has_extents) | 3426 | if (!has_extents) |
| 3428 | return ret; | 3427 | return ret; |
| 3429 | 3428 | ||
| 3429 | if (need_find_last_extent && *last_extent == first_key.offset) { | ||
| 3430 | /* | ||
| 3431 | * We don't have any leafs between our current one and the one | ||
| 3432 | * we processed before that can have file extent items for our | ||
| 3433 | * inode (and have a generation number smaller than our current | ||
| 3434 | * transaction id). | ||
| 3435 | */ | ||
| 3436 | need_find_last_extent = false; | ||
| 3437 | } | ||
| 3438 | |||
| 3430 | /* | 3439 | /* |
| 3431 | * Because we use btrfs_search_forward we could skip leaves that were | 3440 | * Because we use btrfs_search_forward we could skip leaves that were |
| 3432 | * not modified and then assume *last_extent is valid when it really | 3441 | * not modified and then assume *last_extent is valid when it really |
| @@ -3537,7 +3546,7 @@ fill_holes: | |||
| 3537 | 0, 0); | 3546 | 0, 0); |
| 3538 | if (ret) | 3547 | if (ret) |
| 3539 | break; | 3548 | break; |
| 3540 | *last_extent = offset + len; | 3549 | *last_extent = extent_end; |
| 3541 | } | 3550 | } |
| 3542 | /* | 3551 | /* |
| 3543 | * Need to let the callers know we dropped the path so they should | 3552 | * Need to let the callers know we dropped the path so they should |
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index 6cb82f62cb7c..340a92d08e84 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c | |||
| @@ -508,6 +508,44 @@ static noinline int device_list_add(const char *path, | |||
| 508 | ret = 1; | 508 | ret = 1; |
| 509 | device->fs_devices = fs_devices; | 509 | device->fs_devices = fs_devices; |
| 510 | } else if (!device->name || strcmp(device->name->str, path)) { | 510 | } else if (!device->name || strcmp(device->name->str, path)) { |
| 511 | /* | ||
| 512 | * When FS is already mounted. | ||
| 513 | * 1. If you are here and if the device->name is NULL that | ||
| 514 | * means this device was missing at time of FS mount. | ||
| 515 | * 2. If you are here and if the device->name is different | ||
| 516 | * from 'path' that means either | ||
| 517 | * a. The same device disappeared and reappeared with | ||
| 518 | * different name. or | ||
| 519 | * b. The missing-disk-which-was-replaced, has | ||
| 520 | * reappeared now. | ||
| 521 | * | ||
| 522 | * We must allow 1 and 2a above. But 2b would be a spurious | ||
| 523 | * and unintentional. | ||
| 524 | * | ||
| 525 | * Further in case of 1 and 2a above, the disk at 'path' | ||
| 526 | * would have missed some transaction when it was away and | ||
| 527 | * in case of 2a the stale bdev has to be updated as well. | ||
| 528 | * 2b must not be allowed at all time. | ||
| 529 | */ | ||
| 530 | |||
| 531 | /* | ||
| 532 | * As of now don't allow update to btrfs_fs_device through | ||
| 533 | * the btrfs dev scan cli, after FS has been mounted. | ||
| 534 | */ | ||
| 535 | if (fs_devices->opened) { | ||
| 536 | return -EBUSY; | ||
| 537 | } else { | ||
| 538 | /* | ||
| 539 | * That is if the FS is _not_ mounted and if you | ||
| 540 | * are here, that means there is more than one | ||
| 541 | * disk with same uuid and devid.We keep the one | ||
| 542 | * with larger generation number or the last-in if | ||
| 543 | * generation are equal. | ||
| 544 | */ | ||
| 545 | if (found_transid < device->generation) | ||
| 546 | return -EEXIST; | ||
| 547 | } | ||
| 548 | |||
| 511 | name = rcu_string_strdup(path, GFP_NOFS); | 549 | name = rcu_string_strdup(path, GFP_NOFS); |
| 512 | if (!name) | 550 | if (!name) |
| 513 | return -ENOMEM; | 551 | return -ENOMEM; |
| @@ -519,6 +557,15 @@ static noinline int device_list_add(const char *path, | |||
| 519 | } | 557 | } |
| 520 | } | 558 | } |
| 521 | 559 | ||
| 560 | /* | ||
| 561 | * Unmount does not free the btrfs_device struct but would zero | ||
| 562 | * generation along with most of the other members. So just update | ||
| 563 | * it back. We need it to pick the disk with largest generation | ||
| 564 | * (as above). | ||
| 565 | */ | ||
| 566 | if (!fs_devices->opened) | ||
| 567 | device->generation = found_transid; | ||
| 568 | |||
| 522 | if (found_transid > fs_devices->latest_trans) { | 569 | if (found_transid > fs_devices->latest_trans) { |
| 523 | fs_devices->latest_devid = devid; | 570 | fs_devices->latest_devid = devid; |
| 524 | fs_devices->latest_trans = found_transid; | 571 | fs_devices->latest_trans = found_transid; |
| @@ -1436,7 +1483,7 @@ static int btrfs_add_device(struct btrfs_trans_handle *trans, | |||
| 1436 | btrfs_set_device_io_align(leaf, dev_item, device->io_align); | 1483 | btrfs_set_device_io_align(leaf, dev_item, device->io_align); |
| 1437 | btrfs_set_device_io_width(leaf, dev_item, device->io_width); | 1484 | btrfs_set_device_io_width(leaf, dev_item, device->io_width); |
| 1438 | btrfs_set_device_sector_size(leaf, dev_item, device->sector_size); | 1485 | btrfs_set_device_sector_size(leaf, dev_item, device->sector_size); |
| 1439 | btrfs_set_device_total_bytes(leaf, dev_item, device->total_bytes); | 1486 | btrfs_set_device_total_bytes(leaf, dev_item, device->disk_total_bytes); |
| 1440 | btrfs_set_device_bytes_used(leaf, dev_item, device->bytes_used); | 1487 | btrfs_set_device_bytes_used(leaf, dev_item, device->bytes_used); |
| 1441 | btrfs_set_device_group(leaf, dev_item, 0); | 1488 | btrfs_set_device_group(leaf, dev_item, 0); |
| 1442 | btrfs_set_device_seek_speed(leaf, dev_item, 0); | 1489 | btrfs_set_device_seek_speed(leaf, dev_item, 0); |
| @@ -1671,7 +1718,7 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path) | |||
| 1671 | device->fs_devices->total_devices--; | 1718 | device->fs_devices->total_devices--; |
| 1672 | 1719 | ||
| 1673 | if (device->missing) | 1720 | if (device->missing) |
| 1674 | root->fs_info->fs_devices->missing_devices--; | 1721 | device->fs_devices->missing_devices--; |
| 1675 | 1722 | ||
| 1676 | next_device = list_entry(root->fs_info->fs_devices->devices.next, | 1723 | next_device = list_entry(root->fs_info->fs_devices->devices.next, |
| 1677 | struct btrfs_device, dev_list); | 1724 | struct btrfs_device, dev_list); |
| @@ -1801,8 +1848,12 @@ void btrfs_rm_dev_replace_srcdev(struct btrfs_fs_info *fs_info, | |||
| 1801 | if (srcdev->bdev) { | 1848 | if (srcdev->bdev) { |
| 1802 | fs_info->fs_devices->open_devices--; | 1849 | fs_info->fs_devices->open_devices--; |
| 1803 | 1850 | ||
| 1804 | /* zero out the old super */ | 1851 | /* |
| 1805 | btrfs_scratch_superblock(srcdev); | 1852 | * zero out the old super if it is not writable |
| 1853 | * (e.g. seed device) | ||
| 1854 | */ | ||
| 1855 | if (srcdev->writeable) | ||
| 1856 | btrfs_scratch_superblock(srcdev); | ||
| 1806 | } | 1857 | } |
| 1807 | 1858 | ||
| 1808 | call_rcu(&srcdev->rcu, free_device); | 1859 | call_rcu(&srcdev->rcu, free_device); |
| @@ -1941,6 +1992,9 @@ static int btrfs_prepare_sprout(struct btrfs_root *root) | |||
| 1941 | fs_devices->seeding = 0; | 1992 | fs_devices->seeding = 0; |
| 1942 | fs_devices->num_devices = 0; | 1993 | fs_devices->num_devices = 0; |
| 1943 | fs_devices->open_devices = 0; | 1994 | fs_devices->open_devices = 0; |
| 1995 | fs_devices->missing_devices = 0; | ||
| 1996 | fs_devices->num_can_discard = 0; | ||
| 1997 | fs_devices->rotating = 0; | ||
| 1944 | fs_devices->seed = seed_devices; | 1998 | fs_devices->seed = seed_devices; |
| 1945 | 1999 | ||
| 1946 | generate_random_uuid(fs_devices->fsid); | 2000 | generate_random_uuid(fs_devices->fsid); |
| @@ -5800,7 +5854,8 @@ struct btrfs_device *btrfs_alloc_device(struct btrfs_fs_info *fs_info, | |||
| 5800 | else | 5854 | else |
| 5801 | generate_random_uuid(dev->uuid); | 5855 | generate_random_uuid(dev->uuid); |
| 5802 | 5856 | ||
| 5803 | btrfs_init_work(&dev->work, pending_bios_fn, NULL, NULL); | 5857 | btrfs_init_work(&dev->work, btrfs_submit_helper, |
| 5858 | pending_bios_fn, NULL, NULL); | ||
| 5804 | 5859 | ||
| 5805 | return dev; | 5860 | return dev; |
| 5806 | } | 5861 | } |
diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c index ba491926df5f..be7cbce6e4c7 100644 --- a/fs/nfs/pagelist.c +++ b/fs/nfs/pagelist.c | |||
| @@ -116,7 +116,7 @@ __nfs_iocounter_wait(struct nfs_io_counter *c) | |||
| 116 | if (atomic_read(&c->io_count) == 0) | 116 | if (atomic_read(&c->io_count) == 0) |
| 117 | break; | 117 | break; |
| 118 | ret = nfs_wait_bit_killable(&q.key); | 118 | ret = nfs_wait_bit_killable(&q.key); |
| 119 | } while (atomic_read(&c->io_count) != 0); | 119 | } while (atomic_read(&c->io_count) != 0 && !ret); |
| 120 | finish_wait(wq, &q.wait); | 120 | finish_wait(wq, &q.wait); |
| 121 | return ret; | 121 | return ret; |
| 122 | } | 122 | } |
| @@ -139,26 +139,49 @@ nfs_iocounter_wait(struct nfs_io_counter *c) | |||
| 139 | /* | 139 | /* |
| 140 | * nfs_page_group_lock - lock the head of the page group | 140 | * nfs_page_group_lock - lock the head of the page group |
| 141 | * @req - request in group that is to be locked | 141 | * @req - request in group that is to be locked |
| 142 | * @nonblock - if true don't block waiting for lock | ||
| 142 | * | 143 | * |
| 143 | * this lock must be held if modifying the page group list | 144 | * this lock must be held if modifying the page group list |
| 144 | * | 145 | * |
| 145 | * returns result from wait_on_bit_lock: 0 on success, < 0 on error | 146 | * return 0 on success, < 0 on error: -EDELAY if nonblocking or the |
| 147 | * result from wait_on_bit_lock | ||
| 148 | * | ||
| 149 | * NOTE: calling with nonblock=false should always have set the | ||
| 150 | * lock bit (see fs/buffer.c and other uses of wait_on_bit_lock | ||
| 151 | * with TASK_UNINTERRUPTIBLE), so there is no need to check the result. | ||
| 146 | */ | 152 | */ |
| 147 | int | 153 | int |
| 148 | nfs_page_group_lock(struct nfs_page *req, bool wait) | 154 | nfs_page_group_lock(struct nfs_page *req, bool nonblock) |
| 149 | { | 155 | { |
| 150 | struct nfs_page *head = req->wb_head; | 156 | struct nfs_page *head = req->wb_head; |
| 151 | int ret; | ||
| 152 | 157 | ||
| 153 | WARN_ON_ONCE(head != head->wb_head); | 158 | WARN_ON_ONCE(head != head->wb_head); |
| 154 | 159 | ||
| 155 | do { | 160 | if (!test_and_set_bit(PG_HEADLOCK, &head->wb_flags)) |
| 156 | ret = wait_on_bit_lock(&head->wb_flags, PG_HEADLOCK, | 161 | return 0; |
| 157 | TASK_UNINTERRUPTIBLE); | ||
| 158 | } while (wait && ret != 0); | ||
| 159 | 162 | ||
| 160 | WARN_ON_ONCE(ret > 0); | 163 | if (!nonblock) |
| 161 | return ret; | 164 | return wait_on_bit_lock(&head->wb_flags, PG_HEADLOCK, |
| 165 | TASK_UNINTERRUPTIBLE); | ||
| 166 | |||
| 167 | return -EAGAIN; | ||
| 168 | } | ||
| 169 | |||
| 170 | /* | ||
| 171 | * nfs_page_group_lock_wait - wait for the lock to clear, but don't grab it | ||
| 172 | * @req - a request in the group | ||
| 173 | * | ||
| 174 | * This is a blocking call to wait for the group lock to be cleared. | ||
| 175 | */ | ||
| 176 | void | ||
| 177 | nfs_page_group_lock_wait(struct nfs_page *req) | ||
| 178 | { | ||
| 179 | struct nfs_page *head = req->wb_head; | ||
| 180 | |||
| 181 | WARN_ON_ONCE(head != head->wb_head); | ||
| 182 | |||
| 183 | wait_on_bit(&head->wb_flags, PG_HEADLOCK, | ||
| 184 | TASK_UNINTERRUPTIBLE); | ||
| 162 | } | 185 | } |
| 163 | 186 | ||
| 164 | /* | 187 | /* |
| @@ -219,7 +242,7 @@ bool nfs_page_group_sync_on_bit(struct nfs_page *req, unsigned int bit) | |||
| 219 | { | 242 | { |
| 220 | bool ret; | 243 | bool ret; |
| 221 | 244 | ||
| 222 | nfs_page_group_lock(req, true); | 245 | nfs_page_group_lock(req, false); |
| 223 | ret = nfs_page_group_sync_on_bit_locked(req, bit); | 246 | ret = nfs_page_group_sync_on_bit_locked(req, bit); |
| 224 | nfs_page_group_unlock(req); | 247 | nfs_page_group_unlock(req); |
| 225 | 248 | ||
| @@ -701,10 +724,11 @@ int nfs_generic_pgio(struct nfs_pageio_descriptor *desc, | |||
| 701 | struct nfs_pgio_header *hdr) | 724 | struct nfs_pgio_header *hdr) |
| 702 | { | 725 | { |
| 703 | struct nfs_page *req; | 726 | struct nfs_page *req; |
| 704 | struct page **pages; | 727 | struct page **pages, |
| 728 | *last_page; | ||
| 705 | struct list_head *head = &desc->pg_list; | 729 | struct list_head *head = &desc->pg_list; |
| 706 | struct nfs_commit_info cinfo; | 730 | struct nfs_commit_info cinfo; |
| 707 | unsigned int pagecount; | 731 | unsigned int pagecount, pageused; |
| 708 | 732 | ||
| 709 | pagecount = nfs_page_array_len(desc->pg_base, desc->pg_count); | 733 | pagecount = nfs_page_array_len(desc->pg_base, desc->pg_count); |
| 710 | if (!nfs_pgarray_set(&hdr->page_array, pagecount)) | 734 | if (!nfs_pgarray_set(&hdr->page_array, pagecount)) |
| @@ -712,12 +736,23 @@ int nfs_generic_pgio(struct nfs_pageio_descriptor *desc, | |||
| 712 | 736 | ||
| 713 | nfs_init_cinfo(&cinfo, desc->pg_inode, desc->pg_dreq); | 737 | nfs_init_cinfo(&cinfo, desc->pg_inode, desc->pg_dreq); |
| 714 | pages = hdr->page_array.pagevec; | 738 | pages = hdr->page_array.pagevec; |
| 739 | last_page = NULL; | ||
| 740 | pageused = 0; | ||
| 715 | while (!list_empty(head)) { | 741 | while (!list_empty(head)) { |
| 716 | req = nfs_list_entry(head->next); | 742 | req = nfs_list_entry(head->next); |
| 717 | nfs_list_remove_request(req); | 743 | nfs_list_remove_request(req); |
| 718 | nfs_list_add_request(req, &hdr->pages); | 744 | nfs_list_add_request(req, &hdr->pages); |
| 719 | *pages++ = req->wb_page; | 745 | |
| 746 | if (WARN_ON_ONCE(pageused >= pagecount)) | ||
| 747 | return nfs_pgio_error(desc, hdr); | ||
| 748 | |||
| 749 | if (!last_page || last_page != req->wb_page) { | ||
| 750 | *pages++ = last_page = req->wb_page; | ||
| 751 | pageused++; | ||
| 752 | } | ||
| 720 | } | 753 | } |
| 754 | if (WARN_ON_ONCE(pageused != pagecount)) | ||
| 755 | return nfs_pgio_error(desc, hdr); | ||
| 721 | 756 | ||
| 722 | if ((desc->pg_ioflags & FLUSH_COND_STABLE) && | 757 | if ((desc->pg_ioflags & FLUSH_COND_STABLE) && |
| 723 | (desc->pg_moreio || nfs_reqs_to_commit(&cinfo))) | 758 | (desc->pg_moreio || nfs_reqs_to_commit(&cinfo))) |
| @@ -788,6 +823,14 @@ static bool nfs_can_coalesce_requests(struct nfs_page *prev, | |||
| 788 | return false; | 823 | return false; |
| 789 | if (req_offset(req) != req_offset(prev) + prev->wb_bytes) | 824 | if (req_offset(req) != req_offset(prev) + prev->wb_bytes) |
| 790 | return false; | 825 | return false; |
| 826 | if (req->wb_page == prev->wb_page) { | ||
| 827 | if (req->wb_pgbase != prev->wb_pgbase + prev->wb_bytes) | ||
| 828 | return false; | ||
| 829 | } else { | ||
| 830 | if (req->wb_pgbase != 0 || | ||
| 831 | prev->wb_pgbase + prev->wb_bytes != PAGE_CACHE_SIZE) | ||
| 832 | return false; | ||
| 833 | } | ||
| 791 | } | 834 | } |
| 792 | size = pgio->pg_ops->pg_test(pgio, prev, req); | 835 | size = pgio->pg_ops->pg_test(pgio, prev, req); |
| 793 | WARN_ON_ONCE(size > req->wb_bytes); | 836 | WARN_ON_ONCE(size > req->wb_bytes); |
| @@ -858,13 +901,8 @@ static int __nfs_pageio_add_request(struct nfs_pageio_descriptor *desc, | |||
| 858 | struct nfs_page *subreq; | 901 | struct nfs_page *subreq; |
| 859 | unsigned int bytes_left = 0; | 902 | unsigned int bytes_left = 0; |
| 860 | unsigned int offset, pgbase; | 903 | unsigned int offset, pgbase; |
| 861 | int ret; | ||
| 862 | 904 | ||
| 863 | ret = nfs_page_group_lock(req, false); | 905 | nfs_page_group_lock(req, false); |
| 864 | if (ret < 0) { | ||
| 865 | desc->pg_error = ret; | ||
| 866 | return 0; | ||
| 867 | } | ||
| 868 | 906 | ||
| 869 | subreq = req; | 907 | subreq = req; |
| 870 | bytes_left = subreq->wb_bytes; | 908 | bytes_left = subreq->wb_bytes; |
| @@ -886,11 +924,7 @@ static int __nfs_pageio_add_request(struct nfs_pageio_descriptor *desc, | |||
| 886 | if (desc->pg_recoalesce) | 924 | if (desc->pg_recoalesce) |
| 887 | return 0; | 925 | return 0; |
| 888 | /* retry add_request for this subreq */ | 926 | /* retry add_request for this subreq */ |
| 889 | ret = nfs_page_group_lock(req, false); | 927 | nfs_page_group_lock(req, false); |
| 890 | if (ret < 0) { | ||
| 891 | desc->pg_error = ret; | ||
| 892 | return 0; | ||
| 893 | } | ||
| 894 | continue; | 928 | continue; |
| 895 | } | 929 | } |
| 896 | 930 | ||
diff --git a/fs/nfs/write.c b/fs/nfs/write.c index e3b5cf28bdc5..175d5d073ccf 100644 --- a/fs/nfs/write.c +++ b/fs/nfs/write.c | |||
| @@ -241,7 +241,7 @@ static bool nfs_page_group_covers_page(struct nfs_page *req) | |||
| 241 | unsigned int pos = 0; | 241 | unsigned int pos = 0; |
| 242 | unsigned int len = nfs_page_length(req->wb_page); | 242 | unsigned int len = nfs_page_length(req->wb_page); |
| 243 | 243 | ||
| 244 | nfs_page_group_lock(req, true); | 244 | nfs_page_group_lock(req, false); |
| 245 | 245 | ||
| 246 | do { | 246 | do { |
| 247 | tmp = nfs_page_group_search_locked(req->wb_head, pos); | 247 | tmp = nfs_page_group_search_locked(req->wb_head, pos); |
| @@ -478,10 +478,23 @@ try_again: | |||
| 478 | return NULL; | 478 | return NULL; |
| 479 | } | 479 | } |
| 480 | 480 | ||
| 481 | /* lock each request in the page group */ | 481 | /* holding inode lock, so always make a non-blocking call to try the |
| 482 | ret = nfs_page_group_lock(head, false); | 482 | * page group lock */ |
| 483 | if (ret < 0) | 483 | ret = nfs_page_group_lock(head, true); |
| 484 | if (ret < 0) { | ||
| 485 | spin_unlock(&inode->i_lock); | ||
| 486 | |||
| 487 | if (!nonblock && ret == -EAGAIN) { | ||
| 488 | nfs_page_group_lock_wait(head); | ||
| 489 | nfs_release_request(head); | ||
| 490 | goto try_again; | ||
| 491 | } | ||
| 492 | |||
| 493 | nfs_release_request(head); | ||
| 484 | return ERR_PTR(ret); | 494 | return ERR_PTR(ret); |
| 495 | } | ||
| 496 | |||
| 497 | /* lock each request in the page group */ | ||
| 485 | subreq = head; | 498 | subreq = head; |
| 486 | do { | 499 | do { |
| 487 | /* | 500 | /* |
diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h index 6dfd64b3a604..e973540cd15b 100644 --- a/include/drm/drm_pciids.h +++ b/include/drm/drm_pciids.h | |||
| @@ -17,6 +17,7 @@ | |||
| 17 | {0x1002, 0x1315, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ | 17 | {0x1002, 0x1315, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ |
| 18 | {0x1002, 0x1316, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ | 18 | {0x1002, 0x1316, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ |
| 19 | {0x1002, 0x1317, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ | 19 | {0x1002, 0x1317, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ |
| 20 | {0x1002, 0x1318, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ | ||
| 20 | {0x1002, 0x131B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ | 21 | {0x1002, 0x131B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ |
| 21 | {0x1002, 0x131C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ | 22 | {0x1002, 0x131C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ |
| 22 | {0x1002, 0x131D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ | 23 | {0x1002, 0x131D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ |
| @@ -164,8 +165,11 @@ | |||
| 164 | {0x1002, 0x6601, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ | 165 | {0x1002, 0x6601, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
| 165 | {0x1002, 0x6602, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ | 166 | {0x1002, 0x6602, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
| 166 | {0x1002, 0x6603, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ | 167 | {0x1002, 0x6603, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
| 168 | {0x1002, 0x6604, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ | ||
| 169 | {0x1002, 0x6605, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ | ||
| 167 | {0x1002, 0x6606, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ | 170 | {0x1002, 0x6606, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
| 168 | {0x1002, 0x6607, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ | 171 | {0x1002, 0x6607, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
| 172 | {0x1002, 0x6608, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_NEW_MEMMAP}, \ | ||
| 169 | {0x1002, 0x6610, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_NEW_MEMMAP}, \ | 173 | {0x1002, 0x6610, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_NEW_MEMMAP}, \ |
| 170 | {0x1002, 0x6611, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_NEW_MEMMAP}, \ | 174 | {0x1002, 0x6611, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_NEW_MEMMAP}, \ |
| 171 | {0x1002, 0x6613, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_NEW_MEMMAP}, \ | 175 | {0x1002, 0x6613, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_NEW_MEMMAP}, \ |
| @@ -175,6 +179,8 @@ | |||
| 175 | {0x1002, 0x6631, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_NEW_MEMMAP}, \ | 179 | {0x1002, 0x6631, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_NEW_MEMMAP}, \ |
| 176 | {0x1002, 0x6640, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ | 180 | {0x1002, 0x6640, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
| 177 | {0x1002, 0x6641, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ | 181 | {0x1002, 0x6641, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
| 182 | {0x1002, 0x6646, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ | ||
| 183 | {0x1002, 0x6647, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ | ||
| 178 | {0x1002, 0x6649, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_NEW_MEMMAP}, \ | 184 | {0x1002, 0x6649, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_NEW_MEMMAP}, \ |
| 179 | {0x1002, 0x6650, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_NEW_MEMMAP}, \ | 185 | {0x1002, 0x6650, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_NEW_MEMMAP}, \ |
| 180 | {0x1002, 0x6651, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_NEW_MEMMAP}, \ | 186 | {0x1002, 0x6651, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_NEW_MEMMAP}, \ |
| @@ -297,6 +303,7 @@ | |||
| 297 | {0x1002, 0x6829, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \ | 303 | {0x1002, 0x6829, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \ |
| 298 | {0x1002, 0x682A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ | 304 | {0x1002, 0x682A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
| 299 | {0x1002, 0x682B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ | 305 | {0x1002, 0x682B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
| 306 | {0x1002, 0x682C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \ | ||
| 300 | {0x1002, 0x682D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ | 307 | {0x1002, 0x682D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
| 301 | {0x1002, 0x682F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ | 308 | {0x1002, 0x682F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
| 302 | {0x1002, 0x6830, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ | 309 | {0x1002, 0x6830, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
diff --git a/include/linux/brcmphy.h b/include/linux/brcmphy.h index 6f76277baf39..61219b9b3445 100644 --- a/include/linux/brcmphy.h +++ b/include/linux/brcmphy.h | |||
| @@ -16,7 +16,6 @@ | |||
| 16 | #define PHY_ID_BCM7366 0x600d8490 | 16 | #define PHY_ID_BCM7366 0x600d8490 |
| 17 | #define PHY_ID_BCM7439 0x600d8480 | 17 | #define PHY_ID_BCM7439 0x600d8480 |
| 18 | #define PHY_ID_BCM7445 0x600d8510 | 18 | #define PHY_ID_BCM7445 0x600d8510 |
| 19 | #define PHY_ID_BCM7XXX_28 0x600d8400 | ||
| 20 | 19 | ||
| 21 | #define PHY_BCM_OUI_MASK 0xfffffc00 | 20 | #define PHY_BCM_OUI_MASK 0xfffffc00 |
| 22 | #define PHY_BCM_OUI_1 0x00206000 | 21 | #define PHY_BCM_OUI_1 0x00206000 |
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h index 6bb5e3f2a3b4..f0b0edbf55a9 100644 --- a/include/linux/ftrace.h +++ b/include/linux/ftrace.h | |||
| @@ -102,6 +102,15 @@ enum { | |||
| 102 | FTRACE_OPS_FL_DELETED = 1 << 8, | 102 | FTRACE_OPS_FL_DELETED = 1 << 8, |
| 103 | }; | 103 | }; |
| 104 | 104 | ||
| 105 | #ifdef CONFIG_DYNAMIC_FTRACE | ||
| 106 | /* The hash used to know what functions callbacks trace */ | ||
| 107 | struct ftrace_ops_hash { | ||
| 108 | struct ftrace_hash *notrace_hash; | ||
| 109 | struct ftrace_hash *filter_hash; | ||
| 110 | struct mutex regex_lock; | ||
| 111 | }; | ||
| 112 | #endif | ||
| 113 | |||
| 105 | /* | 114 | /* |
| 106 | * Note, ftrace_ops can be referenced outside of RCU protection. | 115 | * Note, ftrace_ops can be referenced outside of RCU protection. |
| 107 | * (Although, for perf, the control ops prevent that). If ftrace_ops is | 116 | * (Although, for perf, the control ops prevent that). If ftrace_ops is |
| @@ -121,10 +130,9 @@ struct ftrace_ops { | |||
| 121 | int __percpu *disabled; | 130 | int __percpu *disabled; |
| 122 | #ifdef CONFIG_DYNAMIC_FTRACE | 131 | #ifdef CONFIG_DYNAMIC_FTRACE |
| 123 | int nr_trampolines; | 132 | int nr_trampolines; |
| 124 | struct ftrace_hash *notrace_hash; | 133 | struct ftrace_ops_hash local_hash; |
| 125 | struct ftrace_hash *filter_hash; | 134 | struct ftrace_ops_hash *func_hash; |
| 126 | struct ftrace_hash *tramp_hash; | 135 | struct ftrace_hash *tramp_hash; |
| 127 | struct mutex regex_lock; | ||
| 128 | unsigned long trampoline; | 136 | unsigned long trampoline; |
| 129 | #endif | 137 | #endif |
| 130 | }; | 138 | }; |
diff --git a/include/linux/gpio/consumer.h b/include/linux/gpio/consumer.h index b7ce0c64c6f3..c7e17de732f3 100644 --- a/include/linux/gpio/consumer.h +++ b/include/linux/gpio/consumer.h | |||
| @@ -16,8 +16,6 @@ struct device; | |||
| 16 | */ | 16 | */ |
| 17 | struct gpio_desc; | 17 | struct gpio_desc; |
| 18 | 18 | ||
| 19 | #ifdef CONFIG_GPIOLIB | ||
| 20 | |||
| 21 | #define GPIOD_FLAGS_BIT_DIR_SET BIT(0) | 19 | #define GPIOD_FLAGS_BIT_DIR_SET BIT(0) |
| 22 | #define GPIOD_FLAGS_BIT_DIR_OUT BIT(1) | 20 | #define GPIOD_FLAGS_BIT_DIR_OUT BIT(1) |
| 23 | #define GPIOD_FLAGS_BIT_DIR_VAL BIT(2) | 21 | #define GPIOD_FLAGS_BIT_DIR_VAL BIT(2) |
| @@ -34,6 +32,8 @@ enum gpiod_flags { | |||
| 34 | GPIOD_FLAGS_BIT_DIR_VAL, | 32 | GPIOD_FLAGS_BIT_DIR_VAL, |
| 35 | }; | 33 | }; |
| 36 | 34 | ||
| 35 | #ifdef CONFIG_GPIOLIB | ||
| 36 | |||
| 37 | /* Acquire and dispose GPIOs */ | 37 | /* Acquire and dispose GPIOs */ |
| 38 | struct gpio_desc *__must_check __gpiod_get(struct device *dev, | 38 | struct gpio_desc *__must_check __gpiod_get(struct device *dev, |
| 39 | const char *con_id, | 39 | const char *con_id, |
diff --git a/include/linux/hid.h b/include/linux/hid.h index f53c4a9cca1d..78ea9bf941cd 100644 --- a/include/linux/hid.h +++ b/include/linux/hid.h | |||
| @@ -265,6 +265,7 @@ struct hid_item { | |||
| 265 | #define HID_CONNECT_HIDDEV 0x08 | 265 | #define HID_CONNECT_HIDDEV 0x08 |
| 266 | #define HID_CONNECT_HIDDEV_FORCE 0x10 | 266 | #define HID_CONNECT_HIDDEV_FORCE 0x10 |
| 267 | #define HID_CONNECT_FF 0x20 | 267 | #define HID_CONNECT_FF 0x20 |
| 268 | #define HID_CONNECT_DRIVER 0x40 | ||
| 268 | #define HID_CONNECT_DEFAULT (HID_CONNECT_HIDINPUT|HID_CONNECT_HIDRAW| \ | 269 | #define HID_CONNECT_DEFAULT (HID_CONNECT_HIDINPUT|HID_CONNECT_HIDRAW| \ |
| 269 | HID_CONNECT_HIDDEV|HID_CONNECT_FF) | 270 | HID_CONNECT_HIDDEV|HID_CONNECT_FF) |
| 270 | 271 | ||
| @@ -287,6 +288,7 @@ struct hid_item { | |||
| 287 | #define HID_QUIRK_HIDINPUT_FORCE 0x00000080 | 288 | #define HID_QUIRK_HIDINPUT_FORCE 0x00000080 |
| 288 | #define HID_QUIRK_NO_EMPTY_INPUT 0x00000100 | 289 | #define HID_QUIRK_NO_EMPTY_INPUT 0x00000100 |
| 289 | #define HID_QUIRK_NO_INIT_INPUT_REPORTS 0x00000200 | 290 | #define HID_QUIRK_NO_INIT_INPUT_REPORTS 0x00000200 |
| 291 | #define HID_QUIRK_ALWAYS_POLL 0x00000400 | ||
| 290 | #define HID_QUIRK_SKIP_OUTPUT_REPORTS 0x00010000 | 292 | #define HID_QUIRK_SKIP_OUTPUT_REPORTS 0x00010000 |
| 291 | #define HID_QUIRK_SKIP_OUTPUT_REPORT_ID 0x00020000 | 293 | #define HID_QUIRK_SKIP_OUTPUT_REPORT_ID 0x00020000 |
| 292 | #define HID_QUIRK_NO_OUTPUT_REPORTS_ON_INTR_EP 0x00040000 | 294 | #define HID_QUIRK_NO_OUTPUT_REPORTS_ON_INTR_EP 0x00040000 |
| @@ -440,6 +442,7 @@ struct hid_output_fifo { | |||
| 440 | #define HID_CLAIMED_INPUT 1 | 442 | #define HID_CLAIMED_INPUT 1 |
| 441 | #define HID_CLAIMED_HIDDEV 2 | 443 | #define HID_CLAIMED_HIDDEV 2 |
| 442 | #define HID_CLAIMED_HIDRAW 4 | 444 | #define HID_CLAIMED_HIDRAW 4 |
| 445 | #define HID_CLAIMED_DRIVER 8 | ||
| 443 | 446 | ||
| 444 | #define HID_STAT_ADDED 1 | 447 | #define HID_STAT_ADDED 1 |
| 445 | #define HID_STAT_PARSED 2 | 448 | #define HID_STAT_PARSED 2 |
diff --git a/include/linux/nfs_page.h b/include/linux/nfs_page.h index 6ad2bbcad405..6c3e06ee2fb7 100644 --- a/include/linux/nfs_page.h +++ b/include/linux/nfs_page.h | |||
| @@ -123,6 +123,7 @@ extern int nfs_wait_on_request(struct nfs_page *); | |||
| 123 | extern void nfs_unlock_request(struct nfs_page *req); | 123 | extern void nfs_unlock_request(struct nfs_page *req); |
| 124 | extern void nfs_unlock_and_release_request(struct nfs_page *); | 124 | extern void nfs_unlock_and_release_request(struct nfs_page *); |
| 125 | extern int nfs_page_group_lock(struct nfs_page *, bool); | 125 | extern int nfs_page_group_lock(struct nfs_page *, bool); |
| 126 | extern void nfs_page_group_lock_wait(struct nfs_page *); | ||
| 126 | extern void nfs_page_group_unlock(struct nfs_page *); | 127 | extern void nfs_page_group_unlock(struct nfs_page *); |
| 127 | extern bool nfs_page_group_sync_on_bit(struct nfs_page *, unsigned int); | 128 | extern bool nfs_page_group_sync_on_bit(struct nfs_page *, unsigned int); |
| 128 | 129 | ||
diff --git a/include/uapi/asm-generic/unistd.h b/include/uapi/asm-generic/unistd.h index f1afd607f043..11d11bc5c78f 100644 --- a/include/uapi/asm-generic/unistd.h +++ b/include/uapi/asm-generic/unistd.h | |||
| @@ -703,9 +703,11 @@ __SYSCALL(__NR_renameat2, sys_renameat2) | |||
| 703 | __SYSCALL(__NR_seccomp, sys_seccomp) | 703 | __SYSCALL(__NR_seccomp, sys_seccomp) |
| 704 | #define __NR_getrandom 278 | 704 | #define __NR_getrandom 278 |
| 705 | __SYSCALL(__NR_getrandom, sys_getrandom) | 705 | __SYSCALL(__NR_getrandom, sys_getrandom) |
| 706 | #define __NR_memfd_create 279 | ||
| 707 | __SYSCALL(__NR_memfd_create, sys_memfd_create) | ||
| 706 | 708 | ||
| 707 | #undef __NR_syscalls | 709 | #undef __NR_syscalls |
| 708 | #define __NR_syscalls 279 | 710 | #define __NR_syscalls 280 |
| 709 | 711 | ||
| 710 | /* | 712 | /* |
| 711 | * All syscalls below here should go away really, | 713 | * All syscalls below here should go away really, |
diff --git a/include/uapi/drm/radeon_drm.h b/include/uapi/drm/radeon_drm.h index 509b2d7a41b7..fea6099608ef 100644 --- a/include/uapi/drm/radeon_drm.h +++ b/include/uapi/drm/radeon_drm.h | |||
| @@ -944,6 +944,7 @@ struct drm_radeon_cs_chunk { | |||
| 944 | }; | 944 | }; |
| 945 | 945 | ||
| 946 | /* drm_radeon_cs_reloc.flags */ | 946 | /* drm_radeon_cs_reloc.flags */ |
| 947 | #define RADEON_RELOC_PRIO_MASK (0xf << 0) | ||
| 947 | 948 | ||
| 948 | struct drm_radeon_cs_reloc { | 949 | struct drm_radeon_cs_reloc { |
| 949 | uint32_t handle; | 950 | uint32_t handle; |
diff --git a/include/uapi/linux/uhid.h b/include/uapi/linux/uhid.h index 1e3b09c191cd..aaa86d6bd1dd 100644 --- a/include/uapi/linux/uhid.h +++ b/include/uapi/linux/uhid.h | |||
| @@ -24,35 +24,23 @@ | |||
| 24 | #include <linux/hid.h> | 24 | #include <linux/hid.h> |
| 25 | 25 | ||
| 26 | enum uhid_event_type { | 26 | enum uhid_event_type { |
| 27 | UHID_CREATE, | 27 | __UHID_LEGACY_CREATE, |
| 28 | UHID_DESTROY, | 28 | UHID_DESTROY, |
| 29 | UHID_START, | 29 | UHID_START, |
| 30 | UHID_STOP, | 30 | UHID_STOP, |
| 31 | UHID_OPEN, | 31 | UHID_OPEN, |
| 32 | UHID_CLOSE, | 32 | UHID_CLOSE, |
| 33 | UHID_OUTPUT, | 33 | UHID_OUTPUT, |
| 34 | UHID_OUTPUT_EV, /* obsolete! */ | 34 | __UHID_LEGACY_OUTPUT_EV, |
| 35 | UHID_INPUT, | 35 | __UHID_LEGACY_INPUT, |
| 36 | UHID_FEATURE, | 36 | UHID_GET_REPORT, |
| 37 | UHID_FEATURE_ANSWER, | 37 | UHID_GET_REPORT_REPLY, |
| 38 | UHID_CREATE2, | 38 | UHID_CREATE2, |
| 39 | UHID_INPUT2, | 39 | UHID_INPUT2, |
| 40 | UHID_SET_REPORT, | ||
| 41 | UHID_SET_REPORT_REPLY, | ||
| 40 | }; | 42 | }; |
| 41 | 43 | ||
| 42 | struct uhid_create_req { | ||
| 43 | __u8 name[128]; | ||
| 44 | __u8 phys[64]; | ||
| 45 | __u8 uniq[64]; | ||
| 46 | __u8 __user *rd_data; | ||
| 47 | __u16 rd_size; | ||
| 48 | |||
| 49 | __u16 bus; | ||
| 50 | __u32 vendor; | ||
| 51 | __u32 product; | ||
| 52 | __u32 version; | ||
| 53 | __u32 country; | ||
| 54 | } __attribute__((__packed__)); | ||
| 55 | |||
| 56 | struct uhid_create2_req { | 44 | struct uhid_create2_req { |
| 57 | __u8 name[128]; | 45 | __u8 name[128]; |
| 58 | __u8 phys[64]; | 46 | __u8 phys[64]; |
| @@ -66,6 +54,16 @@ struct uhid_create2_req { | |||
| 66 | __u8 rd_data[HID_MAX_DESCRIPTOR_SIZE]; | 54 | __u8 rd_data[HID_MAX_DESCRIPTOR_SIZE]; |
| 67 | } __attribute__((__packed__)); | 55 | } __attribute__((__packed__)); |
| 68 | 56 | ||
| 57 | enum uhid_dev_flag { | ||
| 58 | UHID_DEV_NUMBERED_FEATURE_REPORTS = (1ULL << 0), | ||
| 59 | UHID_DEV_NUMBERED_OUTPUT_REPORTS = (1ULL << 1), | ||
| 60 | UHID_DEV_NUMBERED_INPUT_REPORTS = (1ULL << 2), | ||
| 61 | }; | ||
| 62 | |||
| 63 | struct uhid_start_req { | ||
| 64 | __u64 dev_flags; | ||
| 65 | }; | ||
| 66 | |||
| 69 | #define UHID_DATA_MAX 4096 | 67 | #define UHID_DATA_MAX 4096 |
| 70 | 68 | ||
| 71 | enum uhid_report_type { | 69 | enum uhid_report_type { |
| @@ -74,36 +72,94 @@ enum uhid_report_type { | |||
| 74 | UHID_INPUT_REPORT, | 72 | UHID_INPUT_REPORT, |
| 75 | }; | 73 | }; |
| 76 | 74 | ||
| 77 | struct uhid_input_req { | 75 | struct uhid_input2_req { |
| 76 | __u16 size; | ||
| 77 | __u8 data[UHID_DATA_MAX]; | ||
| 78 | } __attribute__((__packed__)); | ||
| 79 | |||
| 80 | struct uhid_output_req { | ||
| 78 | __u8 data[UHID_DATA_MAX]; | 81 | __u8 data[UHID_DATA_MAX]; |
| 79 | __u16 size; | 82 | __u16 size; |
| 83 | __u8 rtype; | ||
| 80 | } __attribute__((__packed__)); | 84 | } __attribute__((__packed__)); |
| 81 | 85 | ||
| 82 | struct uhid_input2_req { | 86 | struct uhid_get_report_req { |
| 87 | __u32 id; | ||
| 88 | __u8 rnum; | ||
| 89 | __u8 rtype; | ||
| 90 | } __attribute__((__packed__)); | ||
| 91 | |||
| 92 | struct uhid_get_report_reply_req { | ||
| 93 | __u32 id; | ||
| 94 | __u16 err; | ||
| 83 | __u16 size; | 95 | __u16 size; |
| 84 | __u8 data[UHID_DATA_MAX]; | 96 | __u8 data[UHID_DATA_MAX]; |
| 85 | } __attribute__((__packed__)); | 97 | } __attribute__((__packed__)); |
| 86 | 98 | ||
| 87 | struct uhid_output_req { | 99 | struct uhid_set_report_req { |
| 100 | __u32 id; | ||
| 101 | __u8 rnum; | ||
| 102 | __u8 rtype; | ||
| 103 | __u16 size; | ||
| 104 | __u8 data[UHID_DATA_MAX]; | ||
| 105 | } __attribute__((__packed__)); | ||
| 106 | |||
| 107 | struct uhid_set_report_reply_req { | ||
| 108 | __u32 id; | ||
| 109 | __u16 err; | ||
| 110 | } __attribute__((__packed__)); | ||
| 111 | |||
| 112 | /* | ||
| 113 | * Compat Layer | ||
| 114 | * All these commands and requests are obsolete. You should avoid using them in | ||
| 115 | * new code. We support them for backwards-compatibility, but you might not get | ||
| 116 | * access to new feature in case you use them. | ||
| 117 | */ | ||
| 118 | |||
| 119 | enum uhid_legacy_event_type { | ||
| 120 | UHID_CREATE = __UHID_LEGACY_CREATE, | ||
| 121 | UHID_OUTPUT_EV = __UHID_LEGACY_OUTPUT_EV, | ||
| 122 | UHID_INPUT = __UHID_LEGACY_INPUT, | ||
| 123 | UHID_FEATURE = UHID_GET_REPORT, | ||
| 124 | UHID_FEATURE_ANSWER = UHID_GET_REPORT_REPLY, | ||
| 125 | }; | ||
| 126 | |||
| 127 | /* Obsolete! Use UHID_CREATE2. */ | ||
| 128 | struct uhid_create_req { | ||
| 129 | __u8 name[128]; | ||
| 130 | __u8 phys[64]; | ||
| 131 | __u8 uniq[64]; | ||
| 132 | __u8 __user *rd_data; | ||
| 133 | __u16 rd_size; | ||
| 134 | |||
| 135 | __u16 bus; | ||
| 136 | __u32 vendor; | ||
| 137 | __u32 product; | ||
| 138 | __u32 version; | ||
| 139 | __u32 country; | ||
| 140 | } __attribute__((__packed__)); | ||
| 141 | |||
| 142 | /* Obsolete! Use UHID_INPUT2. */ | ||
| 143 | struct uhid_input_req { | ||
| 88 | __u8 data[UHID_DATA_MAX]; | 144 | __u8 data[UHID_DATA_MAX]; |
| 89 | __u16 size; | 145 | __u16 size; |
| 90 | __u8 rtype; | ||
| 91 | } __attribute__((__packed__)); | 146 | } __attribute__((__packed__)); |
| 92 | 147 | ||
| 93 | /* Obsolete! Newer kernels will no longer send these events but instead convert | 148 | /* Obsolete! Kernel uses UHID_OUTPUT exclusively now. */ |
| 94 | * it into raw output reports via UHID_OUTPUT. */ | ||
| 95 | struct uhid_output_ev_req { | 149 | struct uhid_output_ev_req { |
| 96 | __u16 type; | 150 | __u16 type; |
| 97 | __u16 code; | 151 | __u16 code; |
| 98 | __s32 value; | 152 | __s32 value; |
| 99 | } __attribute__((__packed__)); | 153 | } __attribute__((__packed__)); |
| 100 | 154 | ||
| 155 | /* Obsolete! Kernel uses ABI compatible UHID_GET_REPORT. */ | ||
| 101 | struct uhid_feature_req { | 156 | struct uhid_feature_req { |
| 102 | __u32 id; | 157 | __u32 id; |
| 103 | __u8 rnum; | 158 | __u8 rnum; |
| 104 | __u8 rtype; | 159 | __u8 rtype; |
| 105 | } __attribute__((__packed__)); | 160 | } __attribute__((__packed__)); |
| 106 | 161 | ||
| 162 | /* Obsolete! Use ABI compatible UHID_GET_REPORT_REPLY. */ | ||
| 107 | struct uhid_feature_answer_req { | 163 | struct uhid_feature_answer_req { |
| 108 | __u32 id; | 164 | __u32 id; |
| 109 | __u16 err; | 165 | __u16 err; |
| @@ -111,6 +167,15 @@ struct uhid_feature_answer_req { | |||
| 111 | __u8 data[UHID_DATA_MAX]; | 167 | __u8 data[UHID_DATA_MAX]; |
| 112 | } __attribute__((__packed__)); | 168 | } __attribute__((__packed__)); |
| 113 | 169 | ||
| 170 | /* | ||
| 171 | * UHID Events | ||
| 172 | * All UHID events from and to the kernel are encoded as "struct uhid_event". | ||
| 173 | * The "type" field contains a UHID_* type identifier. All payload depends on | ||
| 174 | * that type and can be accessed via ev->u.XYZ accordingly. | ||
| 175 | * If user-space writes short events, they're extended with 0s by the kernel. If | ||
| 176 | * the kernel writes short events, user-space shall extend them with 0s. | ||
| 177 | */ | ||
| 178 | |||
| 114 | struct uhid_event { | 179 | struct uhid_event { |
| 115 | __u32 type; | 180 | __u32 type; |
| 116 | 181 | ||
| @@ -120,9 +185,14 @@ struct uhid_event { | |||
| 120 | struct uhid_output_req output; | 185 | struct uhid_output_req output; |
| 121 | struct uhid_output_ev_req output_ev; | 186 | struct uhid_output_ev_req output_ev; |
| 122 | struct uhid_feature_req feature; | 187 | struct uhid_feature_req feature; |
| 188 | struct uhid_get_report_req get_report; | ||
| 123 | struct uhid_feature_answer_req feature_answer; | 189 | struct uhid_feature_answer_req feature_answer; |
| 190 | struct uhid_get_report_reply_req get_report_reply; | ||
| 124 | struct uhid_create2_req create2; | 191 | struct uhid_create2_req create2; |
| 125 | struct uhid_input2_req input2; | 192 | struct uhid_input2_req input2; |
| 193 | struct uhid_set_report_req set_report; | ||
| 194 | struct uhid_set_report_reply_req set_report_reply; | ||
| 195 | struct uhid_start_req start; | ||
| 126 | } u; | 196 | } u; |
| 127 | } __attribute__((__packed__)); | 197 | } __attribute__((__packed__)); |
| 128 | 198 | ||
diff --git a/kernel/events/core.c b/kernel/events/core.c index 1cf24b3e42ec..f9c1ed002dbc 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c | |||
| @@ -41,6 +41,7 @@ | |||
| 41 | #include <linux/cgroup.h> | 41 | #include <linux/cgroup.h> |
| 42 | #include <linux/module.h> | 42 | #include <linux/module.h> |
| 43 | #include <linux/mman.h> | 43 | #include <linux/mman.h> |
| 44 | #include <linux/compat.h> | ||
| 44 | 45 | ||
| 45 | #include "internal.h" | 46 | #include "internal.h" |
| 46 | 47 | ||
| @@ -3717,6 +3718,26 @@ static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg) | |||
| 3717 | return 0; | 3718 | return 0; |
| 3718 | } | 3719 | } |
| 3719 | 3720 | ||
| 3721 | #ifdef CONFIG_COMPAT | ||
| 3722 | static long perf_compat_ioctl(struct file *file, unsigned int cmd, | ||
| 3723 | unsigned long arg) | ||
| 3724 | { | ||
| 3725 | switch (_IOC_NR(cmd)) { | ||
| 3726 | case _IOC_NR(PERF_EVENT_IOC_SET_FILTER): | ||
| 3727 | case _IOC_NR(PERF_EVENT_IOC_ID): | ||
| 3728 | /* Fix up pointer size (usually 4 -> 8 in 32-on-64-bit case */ | ||
| 3729 | if (_IOC_SIZE(cmd) == sizeof(compat_uptr_t)) { | ||
| 3730 | cmd &= ~IOCSIZE_MASK; | ||
| 3731 | cmd |= sizeof(void *) << IOCSIZE_SHIFT; | ||
| 3732 | } | ||
| 3733 | break; | ||
| 3734 | } | ||
| 3735 | return perf_ioctl(file, cmd, arg); | ||
| 3736 | } | ||
| 3737 | #else | ||
| 3738 | # define perf_compat_ioctl NULL | ||
| 3739 | #endif | ||
| 3740 | |||
| 3720 | int perf_event_task_enable(void) | 3741 | int perf_event_task_enable(void) |
| 3721 | { | 3742 | { |
| 3722 | struct perf_event *event; | 3743 | struct perf_event *event; |
| @@ -4222,7 +4243,7 @@ static const struct file_operations perf_fops = { | |||
| 4222 | .read = perf_read, | 4243 | .read = perf_read, |
| 4223 | .poll = perf_poll, | 4244 | .poll = perf_poll, |
| 4224 | .unlocked_ioctl = perf_ioctl, | 4245 | .unlocked_ioctl = perf_ioctl, |
| 4225 | .compat_ioctl = perf_ioctl, | 4246 | .compat_ioctl = perf_compat_ioctl, |
| 4226 | .mmap = perf_mmap, | 4247 | .mmap = perf_mmap, |
| 4227 | .fasync = perf_fasync, | 4248 | .fasync = perf_fasync, |
| 4228 | }; | 4249 | }; |
diff --git a/kernel/kprobes.c b/kernel/kprobes.c index 734e9a7d280b..3995f546d0f3 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c | |||
| @@ -1778,7 +1778,18 @@ static int pre_handler_kretprobe(struct kprobe *p, struct pt_regs *regs) | |||
| 1778 | unsigned long hash, flags = 0; | 1778 | unsigned long hash, flags = 0; |
| 1779 | struct kretprobe_instance *ri; | 1779 | struct kretprobe_instance *ri; |
| 1780 | 1780 | ||
| 1781 | /*TODO: consider to only swap the RA after the last pre_handler fired */ | 1781 | /* |
| 1782 | * To avoid deadlocks, prohibit return probing in NMI contexts, | ||
| 1783 | * just skip the probe and increase the (inexact) 'nmissed' | ||
| 1784 | * statistical counter, so that the user is informed that | ||
| 1785 | * something happened: | ||
| 1786 | */ | ||
| 1787 | if (unlikely(in_nmi())) { | ||
| 1788 | rp->nmissed++; | ||
| 1789 | return 0; | ||
| 1790 | } | ||
| 1791 | |||
| 1792 | /* TODO: consider to only swap the RA after the last pre_handler fired */ | ||
| 1782 | hash = hash_ptr(current, KPROBE_HASH_BITS); | 1793 | hash = hash_ptr(current, KPROBE_HASH_BITS); |
| 1783 | raw_spin_lock_irqsave(&rp->lock, flags); | 1794 | raw_spin_lock_irqsave(&rp->lock, flags); |
| 1784 | if (!hlist_empty(&rp->free_instances)) { | 1795 | if (!hlist_empty(&rp->free_instances)) { |
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 1654b12c891a..5916a8e59e87 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c | |||
| @@ -65,15 +65,21 @@ | |||
| 65 | #define FL_GLOBAL_CONTROL_MASK (FTRACE_OPS_FL_CONTROL) | 65 | #define FL_GLOBAL_CONTROL_MASK (FTRACE_OPS_FL_CONTROL) |
| 66 | 66 | ||
| 67 | #ifdef CONFIG_DYNAMIC_FTRACE | 67 | #ifdef CONFIG_DYNAMIC_FTRACE |
| 68 | #define INIT_REGEX_LOCK(opsname) \ | 68 | #define INIT_OPS_HASH(opsname) \ |
| 69 | .regex_lock = __MUTEX_INITIALIZER(opsname.regex_lock), | 69 | .func_hash = &opsname.local_hash, \ |
| 70 | .local_hash.regex_lock = __MUTEX_INITIALIZER(opsname.local_hash.regex_lock), | ||
| 71 | #define ASSIGN_OPS_HASH(opsname, val) \ | ||
| 72 | .func_hash = val, \ | ||
| 73 | .local_hash.regex_lock = __MUTEX_INITIALIZER(opsname.local_hash.regex_lock), | ||
| 70 | #else | 74 | #else |
| 71 | #define INIT_REGEX_LOCK(opsname) | 75 | #define INIT_OPS_HASH(opsname) |
| 76 | #define ASSIGN_OPS_HASH(opsname, val) | ||
| 72 | #endif | 77 | #endif |
| 73 | 78 | ||
| 74 | static struct ftrace_ops ftrace_list_end __read_mostly = { | 79 | static struct ftrace_ops ftrace_list_end __read_mostly = { |
| 75 | .func = ftrace_stub, | 80 | .func = ftrace_stub, |
| 76 | .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_STUB, | 81 | .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_STUB, |
| 82 | INIT_OPS_HASH(ftrace_list_end) | ||
| 77 | }; | 83 | }; |
| 78 | 84 | ||
| 79 | /* ftrace_enabled is a method to turn ftrace on or off */ | 85 | /* ftrace_enabled is a method to turn ftrace on or off */ |
| @@ -140,7 +146,8 @@ static inline void ftrace_ops_init(struct ftrace_ops *ops) | |||
| 140 | { | 146 | { |
| 141 | #ifdef CONFIG_DYNAMIC_FTRACE | 147 | #ifdef CONFIG_DYNAMIC_FTRACE |
| 142 | if (!(ops->flags & FTRACE_OPS_FL_INITIALIZED)) { | 148 | if (!(ops->flags & FTRACE_OPS_FL_INITIALIZED)) { |
| 143 | mutex_init(&ops->regex_lock); | 149 | mutex_init(&ops->local_hash.regex_lock); |
| 150 | ops->func_hash = &ops->local_hash; | ||
| 144 | ops->flags |= FTRACE_OPS_FL_INITIALIZED; | 151 | ops->flags |= FTRACE_OPS_FL_INITIALIZED; |
| 145 | } | 152 | } |
| 146 | #endif | 153 | #endif |
| @@ -899,7 +906,7 @@ static void unregister_ftrace_profiler(void) | |||
| 899 | static struct ftrace_ops ftrace_profile_ops __read_mostly = { | 906 | static struct ftrace_ops ftrace_profile_ops __read_mostly = { |
| 900 | .func = function_profile_call, | 907 | .func = function_profile_call, |
| 901 | .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED, | 908 | .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED, |
| 902 | INIT_REGEX_LOCK(ftrace_profile_ops) | 909 | INIT_OPS_HASH(ftrace_profile_ops) |
| 903 | }; | 910 | }; |
| 904 | 911 | ||
| 905 | static int register_ftrace_profiler(void) | 912 | static int register_ftrace_profiler(void) |
| @@ -1081,11 +1088,12 @@ static const struct ftrace_hash empty_hash = { | |||
| 1081 | #define EMPTY_HASH ((struct ftrace_hash *)&empty_hash) | 1088 | #define EMPTY_HASH ((struct ftrace_hash *)&empty_hash) |
| 1082 | 1089 | ||
| 1083 | static struct ftrace_ops global_ops = { | 1090 | static struct ftrace_ops global_ops = { |
| 1084 | .func = ftrace_stub, | 1091 | .func = ftrace_stub, |
| 1085 | .notrace_hash = EMPTY_HASH, | 1092 | .local_hash.notrace_hash = EMPTY_HASH, |
| 1086 | .filter_hash = EMPTY_HASH, | 1093 | .local_hash.filter_hash = EMPTY_HASH, |
| 1087 | .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED, | 1094 | INIT_OPS_HASH(global_ops) |
| 1088 | INIT_REGEX_LOCK(global_ops) | 1095 | .flags = FTRACE_OPS_FL_RECURSION_SAFE | |
| 1096 | FTRACE_OPS_FL_INITIALIZED, | ||
| 1089 | }; | 1097 | }; |
| 1090 | 1098 | ||
| 1091 | struct ftrace_page { | 1099 | struct ftrace_page { |
| @@ -1226,8 +1234,8 @@ static void free_ftrace_hash_rcu(struct ftrace_hash *hash) | |||
| 1226 | void ftrace_free_filter(struct ftrace_ops *ops) | 1234 | void ftrace_free_filter(struct ftrace_ops *ops) |
| 1227 | { | 1235 | { |
| 1228 | ftrace_ops_init(ops); | 1236 | ftrace_ops_init(ops); |
| 1229 | free_ftrace_hash(ops->filter_hash); | 1237 | free_ftrace_hash(ops->func_hash->filter_hash); |
| 1230 | free_ftrace_hash(ops->notrace_hash); | 1238 | free_ftrace_hash(ops->func_hash->notrace_hash); |
| 1231 | } | 1239 | } |
| 1232 | 1240 | ||
| 1233 | static struct ftrace_hash *alloc_ftrace_hash(int size_bits) | 1241 | static struct ftrace_hash *alloc_ftrace_hash(int size_bits) |
| @@ -1288,9 +1296,9 @@ alloc_and_copy_ftrace_hash(int size_bits, struct ftrace_hash *hash) | |||
| 1288 | } | 1296 | } |
| 1289 | 1297 | ||
| 1290 | static void | 1298 | static void |
| 1291 | ftrace_hash_rec_disable(struct ftrace_ops *ops, int filter_hash); | 1299 | ftrace_hash_rec_disable_modify(struct ftrace_ops *ops, int filter_hash); |
| 1292 | static void | 1300 | static void |
| 1293 | ftrace_hash_rec_enable(struct ftrace_ops *ops, int filter_hash); | 1301 | ftrace_hash_rec_enable_modify(struct ftrace_ops *ops, int filter_hash); |
| 1294 | 1302 | ||
| 1295 | static int | 1303 | static int |
| 1296 | ftrace_hash_move(struct ftrace_ops *ops, int enable, | 1304 | ftrace_hash_move(struct ftrace_ops *ops, int enable, |
| @@ -1342,13 +1350,13 @@ update: | |||
| 1342 | * Remove the current set, update the hash and add | 1350 | * Remove the current set, update the hash and add |
| 1343 | * them back. | 1351 | * them back. |
| 1344 | */ | 1352 | */ |
| 1345 | ftrace_hash_rec_disable(ops, enable); | 1353 | ftrace_hash_rec_disable_modify(ops, enable); |
| 1346 | 1354 | ||
| 1347 | old_hash = *dst; | 1355 | old_hash = *dst; |
| 1348 | rcu_assign_pointer(*dst, new_hash); | 1356 | rcu_assign_pointer(*dst, new_hash); |
| 1349 | free_ftrace_hash_rcu(old_hash); | 1357 | free_ftrace_hash_rcu(old_hash); |
| 1350 | 1358 | ||
| 1351 | ftrace_hash_rec_enable(ops, enable); | 1359 | ftrace_hash_rec_enable_modify(ops, enable); |
| 1352 | 1360 | ||
| 1353 | return 0; | 1361 | return 0; |
| 1354 | } | 1362 | } |
| @@ -1382,8 +1390,8 @@ ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs) | |||
| 1382 | return 0; | 1390 | return 0; |
| 1383 | #endif | 1391 | #endif |
| 1384 | 1392 | ||
| 1385 | filter_hash = rcu_dereference_raw_notrace(ops->filter_hash); | 1393 | filter_hash = rcu_dereference_raw_notrace(ops->func_hash->filter_hash); |
| 1386 | notrace_hash = rcu_dereference_raw_notrace(ops->notrace_hash); | 1394 | notrace_hash = rcu_dereference_raw_notrace(ops->func_hash->notrace_hash); |
| 1387 | 1395 | ||
| 1388 | if ((ftrace_hash_empty(filter_hash) || | 1396 | if ((ftrace_hash_empty(filter_hash) || |
| 1389 | ftrace_lookup_ip(filter_hash, ip)) && | 1397 | ftrace_lookup_ip(filter_hash, ip)) && |
| @@ -1503,25 +1511,38 @@ static bool test_rec_ops_needs_regs(struct dyn_ftrace *rec) | |||
| 1503 | static void ftrace_remove_tramp(struct ftrace_ops *ops, | 1511 | static void ftrace_remove_tramp(struct ftrace_ops *ops, |
| 1504 | struct dyn_ftrace *rec) | 1512 | struct dyn_ftrace *rec) |
| 1505 | { | 1513 | { |
| 1506 | struct ftrace_func_entry *entry; | 1514 | /* If TRAMP is not set, no ops should have a trampoline for this */ |
| 1507 | 1515 | if (!(rec->flags & FTRACE_FL_TRAMP)) | |
| 1508 | entry = ftrace_lookup_ip(ops->tramp_hash, rec->ip); | ||
| 1509 | if (!entry) | ||
| 1510 | return; | 1516 | return; |
| 1511 | 1517 | ||
| 1518 | rec->flags &= ~FTRACE_FL_TRAMP; | ||
| 1519 | |||
| 1520 | if ((!ftrace_hash_empty(ops->func_hash->filter_hash) && | ||
| 1521 | !ftrace_lookup_ip(ops->func_hash->filter_hash, rec->ip)) || | ||
| 1522 | ftrace_lookup_ip(ops->func_hash->notrace_hash, rec->ip)) | ||
| 1523 | return; | ||
| 1512 | /* | 1524 | /* |
| 1513 | * The tramp_hash entry will be removed at time | 1525 | * The tramp_hash entry will be removed at time |
| 1514 | * of update. | 1526 | * of update. |
| 1515 | */ | 1527 | */ |
| 1516 | ops->nr_trampolines--; | 1528 | ops->nr_trampolines--; |
| 1517 | rec->flags &= ~FTRACE_FL_TRAMP; | ||
| 1518 | } | 1529 | } |
| 1519 | 1530 | ||
| 1520 | static void ftrace_clear_tramps(struct dyn_ftrace *rec) | 1531 | static void ftrace_clear_tramps(struct dyn_ftrace *rec, struct ftrace_ops *ops) |
| 1521 | { | 1532 | { |
| 1522 | struct ftrace_ops *op; | 1533 | struct ftrace_ops *op; |
| 1523 | 1534 | ||
| 1535 | /* If TRAMP is not set, no ops should have a trampoline for this */ | ||
| 1536 | if (!(rec->flags & FTRACE_FL_TRAMP)) | ||
| 1537 | return; | ||
| 1538 | |||
| 1524 | do_for_each_ftrace_op(op, ftrace_ops_list) { | 1539 | do_for_each_ftrace_op(op, ftrace_ops_list) { |
| 1540 | /* | ||
| 1541 | * This function is called to clear other tramps | ||
| 1542 | * not the one that is being updated. | ||
| 1543 | */ | ||
| 1544 | if (op == ops) | ||
| 1545 | continue; | ||
| 1525 | if (op->nr_trampolines) | 1546 | if (op->nr_trampolines) |
| 1526 | ftrace_remove_tramp(op, rec); | 1547 | ftrace_remove_tramp(op, rec); |
| 1527 | } while_for_each_ftrace_op(op); | 1548 | } while_for_each_ftrace_op(op); |
| @@ -1554,14 +1575,14 @@ static void __ftrace_hash_rec_update(struct ftrace_ops *ops, | |||
| 1554 | * gets inversed. | 1575 | * gets inversed. |
| 1555 | */ | 1576 | */ |
| 1556 | if (filter_hash) { | 1577 | if (filter_hash) { |
| 1557 | hash = ops->filter_hash; | 1578 | hash = ops->func_hash->filter_hash; |
| 1558 | other_hash = ops->notrace_hash; | 1579 | other_hash = ops->func_hash->notrace_hash; |
| 1559 | if (ftrace_hash_empty(hash)) | 1580 | if (ftrace_hash_empty(hash)) |
| 1560 | all = 1; | 1581 | all = 1; |
| 1561 | } else { | 1582 | } else { |
| 1562 | inc = !inc; | 1583 | inc = !inc; |
| 1563 | hash = ops->notrace_hash; | 1584 | hash = ops->func_hash->notrace_hash; |
| 1564 | other_hash = ops->filter_hash; | 1585 | other_hash = ops->func_hash->filter_hash; |
| 1565 | /* | 1586 | /* |
| 1566 | * If the notrace hash has no items, | 1587 | * If the notrace hash has no items, |
| 1567 | * then there's nothing to do. | 1588 | * then there's nothing to do. |
| @@ -1622,13 +1643,10 @@ static void __ftrace_hash_rec_update(struct ftrace_ops *ops, | |||
| 1622 | /* | 1643 | /* |
| 1623 | * If we are adding another function callback | 1644 | * If we are adding another function callback |
| 1624 | * to this function, and the previous had a | 1645 | * to this function, and the previous had a |
| 1625 | * trampoline used, then we need to go back to | 1646 | * custom trampoline in use, then we need to go |
| 1626 | * the default trampoline. | 1647 | * back to the default trampoline. |
| 1627 | */ | 1648 | */ |
| 1628 | rec->flags &= ~FTRACE_FL_TRAMP; | 1649 | ftrace_clear_tramps(rec, ops); |
| 1629 | |||
| 1630 | /* remove trampolines from any ops for this rec */ | ||
| 1631 | ftrace_clear_tramps(rec); | ||
| 1632 | } | 1650 | } |
| 1633 | 1651 | ||
| 1634 | /* | 1652 | /* |
| @@ -1682,6 +1700,41 @@ static void ftrace_hash_rec_enable(struct ftrace_ops *ops, | |||
| 1682 | __ftrace_hash_rec_update(ops, filter_hash, 1); | 1700 | __ftrace_hash_rec_update(ops, filter_hash, 1); |
| 1683 | } | 1701 | } |
| 1684 | 1702 | ||
| 1703 | static void ftrace_hash_rec_update_modify(struct ftrace_ops *ops, | ||
| 1704 | int filter_hash, int inc) | ||
| 1705 | { | ||
| 1706 | struct ftrace_ops *op; | ||
| 1707 | |||
| 1708 | __ftrace_hash_rec_update(ops, filter_hash, inc); | ||
| 1709 | |||
| 1710 | if (ops->func_hash != &global_ops.local_hash) | ||
| 1711 | return; | ||
| 1712 | |||
| 1713 | /* | ||
| 1714 | * If the ops shares the global_ops hash, then we need to update | ||
| 1715 | * all ops that are enabled and use this hash. | ||
| 1716 | */ | ||
| 1717 | do_for_each_ftrace_op(op, ftrace_ops_list) { | ||
| 1718 | /* Already done */ | ||
| 1719 | if (op == ops) | ||
| 1720 | continue; | ||
| 1721 | if (op->func_hash == &global_ops.local_hash) | ||
| 1722 | __ftrace_hash_rec_update(op, filter_hash, inc); | ||
| 1723 | } while_for_each_ftrace_op(op); | ||
| 1724 | } | ||
| 1725 | |||
| 1726 | static void ftrace_hash_rec_disable_modify(struct ftrace_ops *ops, | ||
| 1727 | int filter_hash) | ||
| 1728 | { | ||
| 1729 | ftrace_hash_rec_update_modify(ops, filter_hash, 0); | ||
| 1730 | } | ||
| 1731 | |||
| 1732 | static void ftrace_hash_rec_enable_modify(struct ftrace_ops *ops, | ||
| 1733 | int filter_hash) | ||
| 1734 | { | ||
| 1735 | ftrace_hash_rec_update_modify(ops, filter_hash, 1); | ||
| 1736 | } | ||
| 1737 | |||
| 1685 | static void print_ip_ins(const char *fmt, unsigned char *p) | 1738 | static void print_ip_ins(const char *fmt, unsigned char *p) |
| 1686 | { | 1739 | { |
| 1687 | int i; | 1740 | int i; |
| @@ -1896,8 +1949,8 @@ unsigned long ftrace_get_addr_new(struct dyn_ftrace *rec) | |||
| 1896 | if (rec->flags & FTRACE_FL_TRAMP) { | 1949 | if (rec->flags & FTRACE_FL_TRAMP) { |
| 1897 | ops = ftrace_find_tramp_ops_new(rec); | 1950 | ops = ftrace_find_tramp_ops_new(rec); |
| 1898 | if (FTRACE_WARN_ON(!ops || !ops->trampoline)) { | 1951 | if (FTRACE_WARN_ON(!ops || !ops->trampoline)) { |
| 1899 | pr_warning("Bad trampoline accounting at: %p (%pS)\n", | 1952 | pr_warn("Bad trampoline accounting at: %p (%pS) (%lx)\n", |
| 1900 | (void *)rec->ip, (void *)rec->ip); | 1953 | (void *)rec->ip, (void *)rec->ip, rec->flags); |
| 1901 | /* Ftrace is shutting down, return anything */ | 1954 | /* Ftrace is shutting down, return anything */ |
| 1902 | return (unsigned long)FTRACE_ADDR; | 1955 | return (unsigned long)FTRACE_ADDR; |
| 1903 | } | 1956 | } |
| @@ -1964,7 +2017,7 @@ __ftrace_replace_code(struct dyn_ftrace *rec, int enable) | |||
| 1964 | return ftrace_make_call(rec, ftrace_addr); | 2017 | return ftrace_make_call(rec, ftrace_addr); |
| 1965 | 2018 | ||
| 1966 | case FTRACE_UPDATE_MAKE_NOP: | 2019 | case FTRACE_UPDATE_MAKE_NOP: |
| 1967 | return ftrace_make_nop(NULL, rec, ftrace_addr); | 2020 | return ftrace_make_nop(NULL, rec, ftrace_old_addr); |
| 1968 | 2021 | ||
| 1969 | case FTRACE_UPDATE_MODIFY_CALL: | 2022 | case FTRACE_UPDATE_MODIFY_CALL: |
| 1970 | return ftrace_modify_call(rec, ftrace_old_addr, ftrace_addr); | 2023 | return ftrace_modify_call(rec, ftrace_old_addr, ftrace_addr); |
| @@ -2227,7 +2280,10 @@ static int ftrace_save_ops_tramp_hash(struct ftrace_ops *ops) | |||
| 2227 | } while_for_each_ftrace_rec(); | 2280 | } while_for_each_ftrace_rec(); |
| 2228 | 2281 | ||
| 2229 | /* The number of recs in the hash must match nr_trampolines */ | 2282 | /* The number of recs in the hash must match nr_trampolines */ |
| 2230 | FTRACE_WARN_ON(ops->tramp_hash->count != ops->nr_trampolines); | 2283 | if (FTRACE_WARN_ON(ops->tramp_hash->count != ops->nr_trampolines)) |
| 2284 | pr_warn("count=%ld trampolines=%d\n", | ||
| 2285 | ops->tramp_hash->count, | ||
| 2286 | ops->nr_trampolines); | ||
| 2231 | 2287 | ||
| 2232 | return 0; | 2288 | return 0; |
| 2233 | } | 2289 | } |
| @@ -2436,8 +2492,8 @@ static inline int ops_traces_mod(struct ftrace_ops *ops) | |||
| 2436 | * Filter_hash being empty will default to trace module. | 2492 | * Filter_hash being empty will default to trace module. |
| 2437 | * But notrace hash requires a test of individual module functions. | 2493 | * But notrace hash requires a test of individual module functions. |
| 2438 | */ | 2494 | */ |
| 2439 | return ftrace_hash_empty(ops->filter_hash) && | 2495 | return ftrace_hash_empty(ops->func_hash->filter_hash) && |
| 2440 | ftrace_hash_empty(ops->notrace_hash); | 2496 | ftrace_hash_empty(ops->func_hash->notrace_hash); |
| 2441 | } | 2497 | } |
| 2442 | 2498 | ||
| 2443 | /* | 2499 | /* |
| @@ -2459,12 +2515,12 @@ ops_references_rec(struct ftrace_ops *ops, struct dyn_ftrace *rec) | |||
| 2459 | return 0; | 2515 | return 0; |
| 2460 | 2516 | ||
| 2461 | /* The function must be in the filter */ | 2517 | /* The function must be in the filter */ |
| 2462 | if (!ftrace_hash_empty(ops->filter_hash) && | 2518 | if (!ftrace_hash_empty(ops->func_hash->filter_hash) && |
| 2463 | !ftrace_lookup_ip(ops->filter_hash, rec->ip)) | 2519 | !ftrace_lookup_ip(ops->func_hash->filter_hash, rec->ip)) |
| 2464 | return 0; | 2520 | return 0; |
| 2465 | 2521 | ||
| 2466 | /* If in notrace hash, we ignore it too */ | 2522 | /* If in notrace hash, we ignore it too */ |
| 2467 | if (ftrace_lookup_ip(ops->notrace_hash, rec->ip)) | 2523 | if (ftrace_lookup_ip(ops->func_hash->notrace_hash, rec->ip)) |
| 2468 | return 0; | 2524 | return 0; |
| 2469 | 2525 | ||
| 2470 | return 1; | 2526 | return 1; |
| @@ -2785,10 +2841,10 @@ t_next(struct seq_file *m, void *v, loff_t *pos) | |||
| 2785 | } else { | 2841 | } else { |
| 2786 | rec = &iter->pg->records[iter->idx++]; | 2842 | rec = &iter->pg->records[iter->idx++]; |
| 2787 | if (((iter->flags & FTRACE_ITER_FILTER) && | 2843 | if (((iter->flags & FTRACE_ITER_FILTER) && |
| 2788 | !(ftrace_lookup_ip(ops->filter_hash, rec->ip))) || | 2844 | !(ftrace_lookup_ip(ops->func_hash->filter_hash, rec->ip))) || |
| 2789 | 2845 | ||
| 2790 | ((iter->flags & FTRACE_ITER_NOTRACE) && | 2846 | ((iter->flags & FTRACE_ITER_NOTRACE) && |
| 2791 | !ftrace_lookup_ip(ops->notrace_hash, rec->ip)) || | 2847 | !ftrace_lookup_ip(ops->func_hash->notrace_hash, rec->ip)) || |
| 2792 | 2848 | ||
| 2793 | ((iter->flags & FTRACE_ITER_ENABLED) && | 2849 | ((iter->flags & FTRACE_ITER_ENABLED) && |
| 2794 | !(rec->flags & FTRACE_FL_ENABLED))) { | 2850 | !(rec->flags & FTRACE_FL_ENABLED))) { |
| @@ -2837,9 +2893,9 @@ static void *t_start(struct seq_file *m, loff_t *pos) | |||
| 2837 | * functions are enabled. | 2893 | * functions are enabled. |
| 2838 | */ | 2894 | */ |
| 2839 | if ((iter->flags & FTRACE_ITER_FILTER && | 2895 | if ((iter->flags & FTRACE_ITER_FILTER && |
| 2840 | ftrace_hash_empty(ops->filter_hash)) || | 2896 | ftrace_hash_empty(ops->func_hash->filter_hash)) || |
| 2841 | (iter->flags & FTRACE_ITER_NOTRACE && | 2897 | (iter->flags & FTRACE_ITER_NOTRACE && |
| 2842 | ftrace_hash_empty(ops->notrace_hash))) { | 2898 | ftrace_hash_empty(ops->func_hash->notrace_hash))) { |
| 2843 | if (*pos > 0) | 2899 | if (*pos > 0) |
| 2844 | return t_hash_start(m, pos); | 2900 | return t_hash_start(m, pos); |
| 2845 | iter->flags |= FTRACE_ITER_PRINTALL; | 2901 | iter->flags |= FTRACE_ITER_PRINTALL; |
| @@ -3001,12 +3057,12 @@ ftrace_regex_open(struct ftrace_ops *ops, int flag, | |||
| 3001 | iter->ops = ops; | 3057 | iter->ops = ops; |
| 3002 | iter->flags = flag; | 3058 | iter->flags = flag; |
| 3003 | 3059 | ||
| 3004 | mutex_lock(&ops->regex_lock); | 3060 | mutex_lock(&ops->func_hash->regex_lock); |
| 3005 | 3061 | ||
| 3006 | if (flag & FTRACE_ITER_NOTRACE) | 3062 | if (flag & FTRACE_ITER_NOTRACE) |
| 3007 | hash = ops->notrace_hash; | 3063 | hash = ops->func_hash->notrace_hash; |
| 3008 | else | 3064 | else |
| 3009 | hash = ops->filter_hash; | 3065 | hash = ops->func_hash->filter_hash; |
| 3010 | 3066 | ||
| 3011 | if (file->f_mode & FMODE_WRITE) { | 3067 | if (file->f_mode & FMODE_WRITE) { |
| 3012 | const int size_bits = FTRACE_HASH_DEFAULT_BITS; | 3068 | const int size_bits = FTRACE_HASH_DEFAULT_BITS; |
| @@ -3041,7 +3097,7 @@ ftrace_regex_open(struct ftrace_ops *ops, int flag, | |||
| 3041 | file->private_data = iter; | 3097 | file->private_data = iter; |
| 3042 | 3098 | ||
| 3043 | out_unlock: | 3099 | out_unlock: |
| 3044 | mutex_unlock(&ops->regex_lock); | 3100 | mutex_unlock(&ops->func_hash->regex_lock); |
| 3045 | 3101 | ||
| 3046 | return ret; | 3102 | return ret; |
| 3047 | } | 3103 | } |
| @@ -3279,7 +3335,7 @@ static struct ftrace_ops trace_probe_ops __read_mostly = | |||
| 3279 | { | 3335 | { |
| 3280 | .func = function_trace_probe_call, | 3336 | .func = function_trace_probe_call, |
| 3281 | .flags = FTRACE_OPS_FL_INITIALIZED, | 3337 | .flags = FTRACE_OPS_FL_INITIALIZED, |
| 3282 | INIT_REGEX_LOCK(trace_probe_ops) | 3338 | INIT_OPS_HASH(trace_probe_ops) |
| 3283 | }; | 3339 | }; |
| 3284 | 3340 | ||
| 3285 | static int ftrace_probe_registered; | 3341 | static int ftrace_probe_registered; |
| @@ -3342,7 +3398,7 @@ register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops, | |||
| 3342 | void *data) | 3398 | void *data) |
| 3343 | { | 3399 | { |
| 3344 | struct ftrace_func_probe *entry; | 3400 | struct ftrace_func_probe *entry; |
| 3345 | struct ftrace_hash **orig_hash = &trace_probe_ops.filter_hash; | 3401 | struct ftrace_hash **orig_hash = &trace_probe_ops.func_hash->filter_hash; |
| 3346 | struct ftrace_hash *hash; | 3402 | struct ftrace_hash *hash; |
| 3347 | struct ftrace_page *pg; | 3403 | struct ftrace_page *pg; |
| 3348 | struct dyn_ftrace *rec; | 3404 | struct dyn_ftrace *rec; |
| @@ -3359,7 +3415,7 @@ register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops, | |||
| 3359 | if (WARN_ON(not)) | 3415 | if (WARN_ON(not)) |
| 3360 | return -EINVAL; | 3416 | return -EINVAL; |
| 3361 | 3417 | ||
| 3362 | mutex_lock(&trace_probe_ops.regex_lock); | 3418 | mutex_lock(&trace_probe_ops.func_hash->regex_lock); |
| 3363 | 3419 | ||
| 3364 | hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash); | 3420 | hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash); |
| 3365 | if (!hash) { | 3421 | if (!hash) { |
| @@ -3428,7 +3484,7 @@ register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops, | |||
| 3428 | out_unlock: | 3484 | out_unlock: |
| 3429 | mutex_unlock(&ftrace_lock); | 3485 | mutex_unlock(&ftrace_lock); |
| 3430 | out: | 3486 | out: |
| 3431 | mutex_unlock(&trace_probe_ops.regex_lock); | 3487 | mutex_unlock(&trace_probe_ops.func_hash->regex_lock); |
| 3432 | free_ftrace_hash(hash); | 3488 | free_ftrace_hash(hash); |
| 3433 | 3489 | ||
| 3434 | return count; | 3490 | return count; |
| @@ -3446,7 +3502,7 @@ __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops, | |||
| 3446 | struct ftrace_func_entry *rec_entry; | 3502 | struct ftrace_func_entry *rec_entry; |
| 3447 | struct ftrace_func_probe *entry; | 3503 | struct ftrace_func_probe *entry; |
| 3448 | struct ftrace_func_probe *p; | 3504 | struct ftrace_func_probe *p; |
| 3449 | struct ftrace_hash **orig_hash = &trace_probe_ops.filter_hash; | 3505 | struct ftrace_hash **orig_hash = &trace_probe_ops.func_hash->filter_hash; |
| 3450 | struct list_head free_list; | 3506 | struct list_head free_list; |
| 3451 | struct ftrace_hash *hash; | 3507 | struct ftrace_hash *hash; |
| 3452 | struct hlist_node *tmp; | 3508 | struct hlist_node *tmp; |
| @@ -3468,7 +3524,7 @@ __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops, | |||
| 3468 | return; | 3524 | return; |
| 3469 | } | 3525 | } |
| 3470 | 3526 | ||
| 3471 | mutex_lock(&trace_probe_ops.regex_lock); | 3527 | mutex_lock(&trace_probe_ops.func_hash->regex_lock); |
| 3472 | 3528 | ||
| 3473 | hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash); | 3529 | hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash); |
| 3474 | if (!hash) | 3530 | if (!hash) |
| @@ -3521,7 +3577,7 @@ __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops, | |||
| 3521 | mutex_unlock(&ftrace_lock); | 3577 | mutex_unlock(&ftrace_lock); |
| 3522 | 3578 | ||
| 3523 | out_unlock: | 3579 | out_unlock: |
| 3524 | mutex_unlock(&trace_probe_ops.regex_lock); | 3580 | mutex_unlock(&trace_probe_ops.func_hash->regex_lock); |
| 3525 | free_ftrace_hash(hash); | 3581 | free_ftrace_hash(hash); |
| 3526 | } | 3582 | } |
| 3527 | 3583 | ||
| @@ -3717,12 +3773,12 @@ ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len, | |||
| 3717 | if (unlikely(ftrace_disabled)) | 3773 | if (unlikely(ftrace_disabled)) |
| 3718 | return -ENODEV; | 3774 | return -ENODEV; |
| 3719 | 3775 | ||
| 3720 | mutex_lock(&ops->regex_lock); | 3776 | mutex_lock(&ops->func_hash->regex_lock); |
| 3721 | 3777 | ||
| 3722 | if (enable) | 3778 | if (enable) |
| 3723 | orig_hash = &ops->filter_hash; | 3779 | orig_hash = &ops->func_hash->filter_hash; |
| 3724 | else | 3780 | else |
| 3725 | orig_hash = &ops->notrace_hash; | 3781 | orig_hash = &ops->func_hash->notrace_hash; |
| 3726 | 3782 | ||
| 3727 | if (reset) | 3783 | if (reset) |
| 3728 | hash = alloc_ftrace_hash(FTRACE_HASH_DEFAULT_BITS); | 3784 | hash = alloc_ftrace_hash(FTRACE_HASH_DEFAULT_BITS); |
| @@ -3752,7 +3808,7 @@ ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len, | |||
| 3752 | mutex_unlock(&ftrace_lock); | 3808 | mutex_unlock(&ftrace_lock); |
| 3753 | 3809 | ||
| 3754 | out_regex_unlock: | 3810 | out_regex_unlock: |
| 3755 | mutex_unlock(&ops->regex_lock); | 3811 | mutex_unlock(&ops->func_hash->regex_lock); |
| 3756 | 3812 | ||
| 3757 | free_ftrace_hash(hash); | 3813 | free_ftrace_hash(hash); |
| 3758 | return ret; | 3814 | return ret; |
| @@ -3975,15 +4031,15 @@ int ftrace_regex_release(struct inode *inode, struct file *file) | |||
| 3975 | 4031 | ||
| 3976 | trace_parser_put(parser); | 4032 | trace_parser_put(parser); |
| 3977 | 4033 | ||
| 3978 | mutex_lock(&iter->ops->regex_lock); | 4034 | mutex_lock(&iter->ops->func_hash->regex_lock); |
| 3979 | 4035 | ||
| 3980 | if (file->f_mode & FMODE_WRITE) { | 4036 | if (file->f_mode & FMODE_WRITE) { |
| 3981 | filter_hash = !!(iter->flags & FTRACE_ITER_FILTER); | 4037 | filter_hash = !!(iter->flags & FTRACE_ITER_FILTER); |
| 3982 | 4038 | ||
| 3983 | if (filter_hash) | 4039 | if (filter_hash) |
| 3984 | orig_hash = &iter->ops->filter_hash; | 4040 | orig_hash = &iter->ops->func_hash->filter_hash; |
| 3985 | else | 4041 | else |
| 3986 | orig_hash = &iter->ops->notrace_hash; | 4042 | orig_hash = &iter->ops->func_hash->notrace_hash; |
| 3987 | 4043 | ||
| 3988 | mutex_lock(&ftrace_lock); | 4044 | mutex_lock(&ftrace_lock); |
| 3989 | ret = ftrace_hash_move(iter->ops, filter_hash, | 4045 | ret = ftrace_hash_move(iter->ops, filter_hash, |
| @@ -3994,7 +4050,7 @@ int ftrace_regex_release(struct inode *inode, struct file *file) | |||
| 3994 | mutex_unlock(&ftrace_lock); | 4050 | mutex_unlock(&ftrace_lock); |
| 3995 | } | 4051 | } |
| 3996 | 4052 | ||
| 3997 | mutex_unlock(&iter->ops->regex_lock); | 4053 | mutex_unlock(&iter->ops->func_hash->regex_lock); |
| 3998 | free_ftrace_hash(iter->hash); | 4054 | free_ftrace_hash(iter->hash); |
| 3999 | kfree(iter); | 4055 | kfree(iter); |
| 4000 | 4056 | ||
| @@ -4611,7 +4667,6 @@ void __init ftrace_init(void) | |||
| 4611 | static struct ftrace_ops global_ops = { | 4667 | static struct ftrace_ops global_ops = { |
| 4612 | .func = ftrace_stub, | 4668 | .func = ftrace_stub, |
| 4613 | .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED, | 4669 | .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED, |
| 4614 | INIT_REGEX_LOCK(global_ops) | ||
| 4615 | }; | 4670 | }; |
| 4616 | 4671 | ||
| 4617 | static int __init ftrace_nodyn_init(void) | 4672 | static int __init ftrace_nodyn_init(void) |
| @@ -4713,7 +4768,7 @@ ftrace_ops_control_func(unsigned long ip, unsigned long parent_ip, | |||
| 4713 | static struct ftrace_ops control_ops = { | 4768 | static struct ftrace_ops control_ops = { |
| 4714 | .func = ftrace_ops_control_func, | 4769 | .func = ftrace_ops_control_func, |
| 4715 | .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED, | 4770 | .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED, |
| 4716 | INIT_REGEX_LOCK(control_ops) | 4771 | INIT_OPS_HASH(control_ops) |
| 4717 | }; | 4772 | }; |
| 4718 | 4773 | ||
| 4719 | static inline void | 4774 | static inline void |
| @@ -5145,6 +5200,17 @@ ftrace_enable_sysctl(struct ctl_table *table, int write, | |||
| 5145 | 5200 | ||
| 5146 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | 5201 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
| 5147 | 5202 | ||
| 5203 | static struct ftrace_ops graph_ops = { | ||
| 5204 | .func = ftrace_stub, | ||
| 5205 | .flags = FTRACE_OPS_FL_RECURSION_SAFE | | ||
| 5206 | FTRACE_OPS_FL_INITIALIZED | | ||
| 5207 | FTRACE_OPS_FL_STUB, | ||
| 5208 | #ifdef FTRACE_GRAPH_TRAMP_ADDR | ||
| 5209 | .trampoline = FTRACE_GRAPH_TRAMP_ADDR, | ||
| 5210 | #endif | ||
| 5211 | ASSIGN_OPS_HASH(graph_ops, &global_ops.local_hash) | ||
| 5212 | }; | ||
| 5213 | |||
| 5148 | static int ftrace_graph_active; | 5214 | static int ftrace_graph_active; |
| 5149 | 5215 | ||
| 5150 | int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace) | 5216 | int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace) |
| @@ -5307,12 +5373,28 @@ static int ftrace_graph_entry_test(struct ftrace_graph_ent *trace) | |||
| 5307 | */ | 5373 | */ |
| 5308 | static void update_function_graph_func(void) | 5374 | static void update_function_graph_func(void) |
| 5309 | { | 5375 | { |
| 5310 | if (ftrace_ops_list == &ftrace_list_end || | 5376 | struct ftrace_ops *op; |
| 5311 | (ftrace_ops_list == &global_ops && | 5377 | bool do_test = false; |
| 5312 | global_ops.next == &ftrace_list_end)) | 5378 | |
| 5313 | ftrace_graph_entry = __ftrace_graph_entry; | 5379 | /* |
| 5314 | else | 5380 | * The graph and global ops share the same set of functions |
| 5381 | * to test. If any other ops is on the list, then | ||
| 5382 | * the graph tracing needs to test if its the function | ||
| 5383 | * it should call. | ||
| 5384 | */ | ||
| 5385 | do_for_each_ftrace_op(op, ftrace_ops_list) { | ||
| 5386 | if (op != &global_ops && op != &graph_ops && | ||
| 5387 | op != &ftrace_list_end) { | ||
| 5388 | do_test = true; | ||
| 5389 | /* in double loop, break out with goto */ | ||
| 5390 | goto out; | ||
| 5391 | } | ||
| 5392 | } while_for_each_ftrace_op(op); | ||
| 5393 | out: | ||
| 5394 | if (do_test) | ||
| 5315 | ftrace_graph_entry = ftrace_graph_entry_test; | 5395 | ftrace_graph_entry = ftrace_graph_entry_test; |
| 5396 | else | ||
| 5397 | ftrace_graph_entry = __ftrace_graph_entry; | ||
| 5316 | } | 5398 | } |
| 5317 | 5399 | ||
| 5318 | static struct notifier_block ftrace_suspend_notifier = { | 5400 | static struct notifier_block ftrace_suspend_notifier = { |
| @@ -5353,16 +5435,7 @@ int register_ftrace_graph(trace_func_graph_ret_t retfunc, | |||
| 5353 | ftrace_graph_entry = ftrace_graph_entry_test; | 5435 | ftrace_graph_entry = ftrace_graph_entry_test; |
| 5354 | update_function_graph_func(); | 5436 | update_function_graph_func(); |
| 5355 | 5437 | ||
| 5356 | /* Function graph doesn't use the .func field of global_ops */ | 5438 | ret = ftrace_startup(&graph_ops, FTRACE_START_FUNC_RET); |
| 5357 | global_ops.flags |= FTRACE_OPS_FL_STUB; | ||
| 5358 | |||
| 5359 | #ifdef CONFIG_DYNAMIC_FTRACE | ||
| 5360 | /* Optimize function graph calling (if implemented by arch) */ | ||
| 5361 | if (FTRACE_GRAPH_TRAMP_ADDR != 0) | ||
| 5362 | global_ops.trampoline = FTRACE_GRAPH_TRAMP_ADDR; | ||
| 5363 | #endif | ||
| 5364 | |||
| 5365 | ret = ftrace_startup(&global_ops, FTRACE_START_FUNC_RET); | ||
| 5366 | 5439 | ||
| 5367 | out: | 5440 | out: |
| 5368 | mutex_unlock(&ftrace_lock); | 5441 | mutex_unlock(&ftrace_lock); |
| @@ -5380,12 +5453,7 @@ void unregister_ftrace_graph(void) | |||
| 5380 | ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub; | 5453 | ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub; |
| 5381 | ftrace_graph_entry = ftrace_graph_entry_stub; | 5454 | ftrace_graph_entry = ftrace_graph_entry_stub; |
| 5382 | __ftrace_graph_entry = ftrace_graph_entry_stub; | 5455 | __ftrace_graph_entry = ftrace_graph_entry_stub; |
| 5383 | ftrace_shutdown(&global_ops, FTRACE_STOP_FUNC_RET); | 5456 | ftrace_shutdown(&graph_ops, FTRACE_STOP_FUNC_RET); |
| 5384 | global_ops.flags &= ~FTRACE_OPS_FL_STUB; | ||
| 5385 | #ifdef CONFIG_DYNAMIC_FTRACE | ||
| 5386 | if (FTRACE_GRAPH_TRAMP_ADDR != 0) | ||
| 5387 | global_ops.trampoline = 0; | ||
| 5388 | #endif | ||
| 5389 | unregister_pm_notifier(&ftrace_suspend_notifier); | 5457 | unregister_pm_notifier(&ftrace_suspend_notifier); |
| 5390 | unregister_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL); | 5458 | unregister_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL); |
| 5391 | 5459 | ||
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index afb04b9b818a..b38fb2b9e237 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c | |||
| @@ -626,8 +626,22 @@ int ring_buffer_poll_wait(struct ring_buffer *buffer, int cpu, | |||
| 626 | work = &cpu_buffer->irq_work; | 626 | work = &cpu_buffer->irq_work; |
| 627 | } | 627 | } |
| 628 | 628 | ||
| 629 | work->waiters_pending = true; | ||
| 630 | poll_wait(filp, &work->waiters, poll_table); | 629 | poll_wait(filp, &work->waiters, poll_table); |
| 630 | work->waiters_pending = true; | ||
| 631 | /* | ||
| 632 | * There's a tight race between setting the waiters_pending and | ||
| 633 | * checking if the ring buffer is empty. Once the waiters_pending bit | ||
| 634 | * is set, the next event will wake the task up, but we can get stuck | ||
| 635 | * if there's only a single event in. | ||
| 636 | * | ||
| 637 | * FIXME: Ideally, we need a memory barrier on the writer side as well, | ||
| 638 | * but adding a memory barrier to all events will cause too much of a | ||
| 639 | * performance hit in the fast path. We only need a memory barrier when | ||
| 640 | * the buffer goes from empty to having content. But as this race is | ||
| 641 | * extremely small, and it's not a problem if another event comes in, we | ||
| 642 | * will fix it later. | ||
| 643 | */ | ||
| 644 | smp_mb(); | ||
| 631 | 645 | ||
| 632 | if ((cpu == RING_BUFFER_ALL_CPUS && !ring_buffer_empty(buffer)) || | 646 | if ((cpu == RING_BUFFER_ALL_CPUS && !ring_buffer_empty(buffer)) || |
| 633 | (cpu != RING_BUFFER_ALL_CPUS && !ring_buffer_empty_cpu(buffer, cpu))) | 647 | (cpu != RING_BUFFER_ALL_CPUS && !ring_buffer_empty_cpu(buffer, cpu))) |
diff --git a/net/atm/lec.c b/net/atm/lec.c index e4853b50cf40..4b98f897044a 100644 --- a/net/atm/lec.c +++ b/net/atm/lec.c | |||
| @@ -410,9 +410,11 @@ static int lec_atm_send(struct atm_vcc *vcc, struct sk_buff *skb) | |||
| 410 | priv->lane2_ops = NULL; | 410 | priv->lane2_ops = NULL; |
| 411 | if (priv->lane_version > 1) | 411 | if (priv->lane_version > 1) |
| 412 | priv->lane2_ops = &lane2_ops; | 412 | priv->lane2_ops = &lane2_ops; |
| 413 | rtnl_lock(); | ||
| 413 | if (dev_set_mtu(dev, mesg->content.config.mtu)) | 414 | if (dev_set_mtu(dev, mesg->content.config.mtu)) |
| 414 | pr_info("%s: change_mtu to %d failed\n", | 415 | pr_info("%s: change_mtu to %d failed\n", |
| 415 | dev->name, mesg->content.config.mtu); | 416 | dev->name, mesg->content.config.mtu); |
| 417 | rtnl_unlock(); | ||
| 416 | priv->is_proxy = mesg->content.config.is_proxy; | 418 | priv->is_proxy = mesg->content.config.is_proxy; |
| 417 | break; | 419 | break; |
| 418 | case l_flush_tran_id: | 420 | case l_flush_tran_id: |
diff --git a/net/batman-adv/fragmentation.c b/net/batman-adv/fragmentation.c index 52c43f904220..fc1835c6bb40 100644 --- a/net/batman-adv/fragmentation.c +++ b/net/batman-adv/fragmentation.c | |||
| @@ -188,7 +188,7 @@ static bool batadv_frag_insert_packet(struct batadv_orig_node *orig_node, | |||
| 188 | 188 | ||
| 189 | /* Reached the end of the list, so insert after 'frag_entry_last'. */ | 189 | /* Reached the end of the list, so insert after 'frag_entry_last'. */ |
| 190 | if (likely(frag_entry_last)) { | 190 | if (likely(frag_entry_last)) { |
| 191 | hlist_add_behind(&frag_entry_last->list, &frag_entry_new->list); | 191 | hlist_add_behind(&frag_entry_new->list, &frag_entry_last->list); |
| 192 | chain->size += skb->len - hdr_size; | 192 | chain->size += skb->len - hdr_size; |
| 193 | chain->timestamp = jiffies; | 193 | chain->timestamp = jiffies; |
| 194 | ret = true; | 194 | ret = true; |
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c index cb4459bd1d29..76b7f5ee8f4c 100644 --- a/net/ipv6/ip6_fib.c +++ b/net/ipv6/ip6_fib.c | |||
| @@ -643,7 +643,7 @@ static int fib6_commit_metrics(struct dst_entry *dst, | |||
| 643 | if (dst->flags & DST_HOST) { | 643 | if (dst->flags & DST_HOST) { |
| 644 | mp = dst_metrics_write_ptr(dst); | 644 | mp = dst_metrics_write_ptr(dst); |
| 645 | } else { | 645 | } else { |
| 646 | mp = kzalloc(sizeof(u32) * RTAX_MAX, GFP_KERNEL); | 646 | mp = kzalloc(sizeof(u32) * RTAX_MAX, GFP_ATOMIC); |
| 647 | if (!mp) | 647 | if (!mp) |
| 648 | return -ENOMEM; | 648 | return -ENOMEM; |
| 649 | dst_init_metrics(dst, mp, 0); | 649 | dst_init_metrics(dst, mp, 0); |
diff --git a/net/mac80211/chan.c b/net/mac80211/chan.c index 6d537f03c0ba..0375009ddc0d 100644 --- a/net/mac80211/chan.c +++ b/net/mac80211/chan.c | |||
| @@ -1444,7 +1444,7 @@ ieee80211_vif_use_reserved_switch(struct ieee80211_local *local) | |||
| 1444 | 1444 | ||
| 1445 | list_del(&sdata->reserved_chanctx_list); | 1445 | list_del(&sdata->reserved_chanctx_list); |
| 1446 | list_move(&sdata->assigned_chanctx_list, | 1446 | list_move(&sdata->assigned_chanctx_list, |
| 1447 | &new_ctx->assigned_vifs); | 1447 | &ctx->assigned_vifs); |
| 1448 | sdata->reserved_chanctx = NULL; | 1448 | sdata->reserved_chanctx = NULL; |
| 1449 | 1449 | ||
| 1450 | ieee80211_vif_chanctx_reservation_complete(sdata); | 1450 | ieee80211_vif_chanctx_reservation_complete(sdata); |
diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c index fe5cda0deb39..5231652a95d9 100644 --- a/net/openvswitch/actions.c +++ b/net/openvswitch/actions.c | |||
| @@ -42,6 +42,9 @@ static int do_execute_actions(struct datapath *dp, struct sk_buff *skb, | |||
| 42 | 42 | ||
| 43 | static int make_writable(struct sk_buff *skb, int write_len) | 43 | static int make_writable(struct sk_buff *skb, int write_len) |
| 44 | { | 44 | { |
| 45 | if (!pskb_may_pull(skb, write_len)) | ||
| 46 | return -ENOMEM; | ||
| 47 | |||
| 45 | if (!skb_cloned(skb) || skb_clone_writable(skb, write_len)) | 48 | if (!skb_cloned(skb) || skb_clone_writable(skb, write_len)) |
| 46 | return 0; | 49 | return 0; |
| 47 | 50 | ||
| @@ -70,6 +73,8 @@ static int __pop_vlan_tci(struct sk_buff *skb, __be16 *current_tci) | |||
| 70 | 73 | ||
| 71 | vlan_set_encap_proto(skb, vhdr); | 74 | vlan_set_encap_proto(skb, vhdr); |
| 72 | skb->mac_header += VLAN_HLEN; | 75 | skb->mac_header += VLAN_HLEN; |
| 76 | if (skb_network_offset(skb) < ETH_HLEN) | ||
| 77 | skb_set_network_header(skb, ETH_HLEN); | ||
| 73 | skb_reset_mac_len(skb); | 78 | skb_reset_mac_len(skb); |
| 74 | 79 | ||
| 75 | return 0; | 80 | return 0; |
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index 8d9f8042705a..93896d2092f6 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c | |||
| @@ -632,6 +632,7 @@ static void init_prb_bdqc(struct packet_sock *po, | |||
| 632 | p1->tov_in_jiffies = msecs_to_jiffies(p1->retire_blk_tov); | 632 | p1->tov_in_jiffies = msecs_to_jiffies(p1->retire_blk_tov); |
| 633 | p1->blk_sizeof_priv = req_u->req3.tp_sizeof_priv; | 633 | p1->blk_sizeof_priv = req_u->req3.tp_sizeof_priv; |
| 634 | 634 | ||
| 635 | p1->max_frame_len = p1->kblk_size - BLK_PLUS_PRIV(p1->blk_sizeof_priv); | ||
| 635 | prb_init_ft_ops(p1, req_u); | 636 | prb_init_ft_ops(p1, req_u); |
| 636 | prb_setup_retire_blk_timer(po, tx_ring); | 637 | prb_setup_retire_blk_timer(po, tx_ring); |
| 637 | prb_open_block(p1, pbd); | 638 | prb_open_block(p1, pbd); |
| @@ -1942,6 +1943,18 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev, | |||
| 1942 | if ((int)snaplen < 0) | 1943 | if ((int)snaplen < 0) |
| 1943 | snaplen = 0; | 1944 | snaplen = 0; |
| 1944 | } | 1945 | } |
| 1946 | } else if (unlikely(macoff + snaplen > | ||
| 1947 | GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len)) { | ||
| 1948 | u32 nval; | ||
| 1949 | |||
| 1950 | nval = GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len - macoff; | ||
| 1951 | pr_err_once("tpacket_rcv: packet too big, clamped from %u to %u. macoff=%u\n", | ||
| 1952 | snaplen, nval, macoff); | ||
| 1953 | snaplen = nval; | ||
| 1954 | if (unlikely((int)snaplen < 0)) { | ||
| 1955 | snaplen = 0; | ||
| 1956 | macoff = GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len; | ||
| 1957 | } | ||
| 1945 | } | 1958 | } |
| 1946 | spin_lock(&sk->sk_receive_queue.lock); | 1959 | spin_lock(&sk->sk_receive_queue.lock); |
| 1947 | h.raw = packet_current_rx_frame(po, skb, | 1960 | h.raw = packet_current_rx_frame(po, skb, |
| @@ -3783,6 +3796,10 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u, | |||
| 3783 | goto out; | 3796 | goto out; |
| 3784 | if (unlikely(req->tp_block_size & (PAGE_SIZE - 1))) | 3797 | if (unlikely(req->tp_block_size & (PAGE_SIZE - 1))) |
| 3785 | goto out; | 3798 | goto out; |
| 3799 | if (po->tp_version >= TPACKET_V3 && | ||
| 3800 | (int)(req->tp_block_size - | ||
| 3801 | BLK_PLUS_PRIV(req_u->req3.tp_sizeof_priv)) <= 0) | ||
| 3802 | goto out; | ||
| 3786 | if (unlikely(req->tp_frame_size < po->tp_hdrlen + | 3803 | if (unlikely(req->tp_frame_size < po->tp_hdrlen + |
| 3787 | po->tp_reserve)) | 3804 | po->tp_reserve)) |
| 3788 | goto out; | 3805 | goto out; |
diff --git a/net/packet/internal.h b/net/packet/internal.h index eb9580a6b25f..cdddf6a30399 100644 --- a/net/packet/internal.h +++ b/net/packet/internal.h | |||
| @@ -29,6 +29,7 @@ struct tpacket_kbdq_core { | |||
| 29 | char *pkblk_start; | 29 | char *pkblk_start; |
| 30 | char *pkblk_end; | 30 | char *pkblk_end; |
| 31 | int kblk_size; | 31 | int kblk_size; |
| 32 | unsigned int max_frame_len; | ||
| 32 | unsigned int knum_blocks; | 33 | unsigned int knum_blocks; |
| 33 | uint64_t knxt_seq_num; | 34 | uint64_t knxt_seq_num; |
| 34 | char *prev; | 35 | char *prev; |
diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c index ead526467cca..762a04bb8f6d 100644 --- a/net/sched/sch_cbq.c +++ b/net/sched/sch_cbq.c | |||
| @@ -159,7 +159,6 @@ struct cbq_sched_data { | |||
| 159 | struct cbq_class *tx_borrowed; | 159 | struct cbq_class *tx_borrowed; |
| 160 | int tx_len; | 160 | int tx_len; |
| 161 | psched_time_t now; /* Cached timestamp */ | 161 | psched_time_t now; /* Cached timestamp */ |
| 162 | psched_time_t now_rt; /* Cached real time */ | ||
| 163 | unsigned int pmask; | 162 | unsigned int pmask; |
| 164 | 163 | ||
| 165 | struct hrtimer delay_timer; | 164 | struct hrtimer delay_timer; |
| @@ -353,12 +352,7 @@ cbq_mark_toplevel(struct cbq_sched_data *q, struct cbq_class *cl) | |||
| 353 | int toplevel = q->toplevel; | 352 | int toplevel = q->toplevel; |
| 354 | 353 | ||
| 355 | if (toplevel > cl->level && !(qdisc_is_throttled(cl->q))) { | 354 | if (toplevel > cl->level && !(qdisc_is_throttled(cl->q))) { |
| 356 | psched_time_t now; | 355 | psched_time_t now = psched_get_time(); |
| 357 | psched_tdiff_t incr; | ||
| 358 | |||
| 359 | now = psched_get_time(); | ||
| 360 | incr = now - q->now_rt; | ||
| 361 | now = q->now + incr; | ||
| 362 | 356 | ||
| 363 | do { | 357 | do { |
| 364 | if (cl->undertime < now) { | 358 | if (cl->undertime < now) { |
| @@ -700,8 +694,13 @@ cbq_update(struct cbq_sched_data *q) | |||
| 700 | struct cbq_class *this = q->tx_class; | 694 | struct cbq_class *this = q->tx_class; |
| 701 | struct cbq_class *cl = this; | 695 | struct cbq_class *cl = this; |
| 702 | int len = q->tx_len; | 696 | int len = q->tx_len; |
| 697 | psched_time_t now; | ||
| 703 | 698 | ||
| 704 | q->tx_class = NULL; | 699 | q->tx_class = NULL; |
| 700 | /* Time integrator. We calculate EOS time | ||
| 701 | * by adding expected packet transmission time. | ||
| 702 | */ | ||
| 703 | now = q->now + L2T(&q->link, len); | ||
| 705 | 704 | ||
| 706 | for ( ; cl; cl = cl->share) { | 705 | for ( ; cl; cl = cl->share) { |
| 707 | long avgidle = cl->avgidle; | 706 | long avgidle = cl->avgidle; |
| @@ -717,7 +716,7 @@ cbq_update(struct cbq_sched_data *q) | |||
| 717 | * idle = (now - last) - last_pktlen/rate | 716 | * idle = (now - last) - last_pktlen/rate |
| 718 | */ | 717 | */ |
| 719 | 718 | ||
| 720 | idle = q->now - cl->last; | 719 | idle = now - cl->last; |
| 721 | if ((unsigned long)idle > 128*1024*1024) { | 720 | if ((unsigned long)idle > 128*1024*1024) { |
| 722 | avgidle = cl->maxidle; | 721 | avgidle = cl->maxidle; |
| 723 | } else { | 722 | } else { |
| @@ -761,7 +760,7 @@ cbq_update(struct cbq_sched_data *q) | |||
| 761 | idle -= L2T(&q->link, len); | 760 | idle -= L2T(&q->link, len); |
| 762 | idle += L2T(cl, len); | 761 | idle += L2T(cl, len); |
| 763 | 762 | ||
| 764 | cl->undertime = q->now + idle; | 763 | cl->undertime = now + idle; |
| 765 | } else { | 764 | } else { |
| 766 | /* Underlimit */ | 765 | /* Underlimit */ |
| 767 | 766 | ||
| @@ -771,7 +770,8 @@ cbq_update(struct cbq_sched_data *q) | |||
| 771 | else | 770 | else |
| 772 | cl->avgidle = avgidle; | 771 | cl->avgidle = avgidle; |
| 773 | } | 772 | } |
| 774 | cl->last = q->now; | 773 | if ((s64)(now - cl->last) > 0) |
| 774 | cl->last = now; | ||
| 775 | } | 775 | } |
| 776 | 776 | ||
| 777 | cbq_update_toplevel(q, this, q->tx_borrowed); | 777 | cbq_update_toplevel(q, this, q->tx_borrowed); |
| @@ -943,31 +943,13 @@ cbq_dequeue(struct Qdisc *sch) | |||
| 943 | struct sk_buff *skb; | 943 | struct sk_buff *skb; |
| 944 | struct cbq_sched_data *q = qdisc_priv(sch); | 944 | struct cbq_sched_data *q = qdisc_priv(sch); |
| 945 | psched_time_t now; | 945 | psched_time_t now; |
| 946 | psched_tdiff_t incr; | ||
| 947 | 946 | ||
| 948 | now = psched_get_time(); | 947 | now = psched_get_time(); |
| 949 | incr = now - q->now_rt; | 948 | |
| 950 | 949 | if (q->tx_class) | |
| 951 | if (q->tx_class) { | ||
| 952 | psched_tdiff_t incr2; | ||
| 953 | /* Time integrator. We calculate EOS time | ||
| 954 | * by adding expected packet transmission time. | ||
| 955 | * If real time is greater, we warp artificial clock, | ||
| 956 | * so that: | ||
| 957 | * | ||
| 958 | * cbq_time = max(real_time, work); | ||
| 959 | */ | ||
| 960 | incr2 = L2T(&q->link, q->tx_len); | ||
| 961 | q->now += incr2; | ||
| 962 | cbq_update(q); | 950 | cbq_update(q); |
| 963 | if ((incr -= incr2) < 0) | 951 | |
| 964 | incr = 0; | 952 | q->now = now; |
| 965 | q->now += incr; | ||
| 966 | } else { | ||
| 967 | if (now > q->now) | ||
| 968 | q->now = now; | ||
| 969 | } | ||
| 970 | q->now_rt = now; | ||
| 971 | 953 | ||
| 972 | for (;;) { | 954 | for (;;) { |
| 973 | q->wd_expires = 0; | 955 | q->wd_expires = 0; |
| @@ -1223,7 +1205,6 @@ cbq_reset(struct Qdisc *sch) | |||
| 1223 | hrtimer_cancel(&q->delay_timer); | 1205 | hrtimer_cancel(&q->delay_timer); |
| 1224 | q->toplevel = TC_CBQ_MAXLEVEL; | 1206 | q->toplevel = TC_CBQ_MAXLEVEL; |
| 1225 | q->now = psched_get_time(); | 1207 | q->now = psched_get_time(); |
| 1226 | q->now_rt = q->now; | ||
| 1227 | 1208 | ||
| 1228 | for (prio = 0; prio <= TC_CBQ_MAXPRIO; prio++) | 1209 | for (prio = 0; prio <= TC_CBQ_MAXPRIO; prio++) |
| 1229 | q->active[prio] = NULL; | 1210 | q->active[prio] = NULL; |
| @@ -1407,7 +1388,6 @@ static int cbq_init(struct Qdisc *sch, struct nlattr *opt) | |||
| 1407 | q->delay_timer.function = cbq_undelay; | 1388 | q->delay_timer.function = cbq_undelay; |
| 1408 | q->toplevel = TC_CBQ_MAXLEVEL; | 1389 | q->toplevel = TC_CBQ_MAXLEVEL; |
| 1409 | q->now = psched_get_time(); | 1390 | q->now = psched_get_time(); |
| 1410 | q->now_rt = q->now; | ||
| 1411 | 1391 | ||
| 1412 | cbq_link_class(&q->link); | 1392 | cbq_link_class(&q->link); |
| 1413 | 1393 | ||
diff --git a/net/sctp/associola.c b/net/sctp/associola.c index 06a9ee6b2d3a..a88b8524846e 100644 --- a/net/sctp/associola.c +++ b/net/sctp/associola.c | |||
| @@ -813,6 +813,7 @@ void sctp_assoc_control_transport(struct sctp_association *asoc, | |||
| 813 | else { | 813 | else { |
| 814 | dst_release(transport->dst); | 814 | dst_release(transport->dst); |
| 815 | transport->dst = NULL; | 815 | transport->dst = NULL; |
| 816 | ulp_notify = false; | ||
| 816 | } | 817 | } |
| 817 | 818 | ||
| 818 | spc_state = SCTP_ADDR_UNREACHABLE; | 819 | spc_state = SCTP_ADDR_UNREACHABLE; |
| @@ -1244,7 +1245,7 @@ static struct sctp_transport *sctp_trans_elect_best(struct sctp_transport *curr, | |||
| 1244 | { | 1245 | { |
| 1245 | u8 score_curr, score_best; | 1246 | u8 score_curr, score_best; |
| 1246 | 1247 | ||
| 1247 | if (best == NULL) | 1248 | if (best == NULL || curr == best) |
| 1248 | return curr; | 1249 | return curr; |
| 1249 | 1250 | ||
| 1250 | score_curr = sctp_trans_score(curr); | 1251 | score_curr = sctp_trans_score(curr); |
| @@ -1355,14 +1356,11 @@ static void sctp_select_active_and_retran_path(struct sctp_association *asoc) | |||
| 1355 | trans_sec = trans_pri; | 1356 | trans_sec = trans_pri; |
| 1356 | 1357 | ||
| 1357 | /* If we failed to find a usable transport, just camp on the | 1358 | /* If we failed to find a usable transport, just camp on the |
| 1358 | * primary or retran, even if they are inactive, if possible | 1359 | * active or pick a PF iff it's the better choice. |
| 1359 | * pick a PF iff it's the better choice. | ||
| 1360 | */ | 1360 | */ |
| 1361 | if (trans_pri == NULL) { | 1361 | if (trans_pri == NULL) { |
| 1362 | trans_pri = sctp_trans_elect_best(asoc->peer.primary_path, | 1362 | trans_pri = sctp_trans_elect_best(asoc->peer.active_path, trans_pf); |
| 1363 | asoc->peer.retran_path); | 1363 | trans_sec = trans_pri; |
| 1364 | trans_pri = sctp_trans_elect_best(trans_pri, trans_pf); | ||
| 1365 | trans_sec = asoc->peer.primary_path; | ||
| 1366 | } | 1364 | } |
| 1367 | 1365 | ||
| 1368 | /* Set the active and retran transports. */ | 1366 | /* Set the active and retran transports. */ |
diff --git a/net/tipc/port.h b/net/tipc/port.h index 3f93454592b6..3087da39ee47 100644 --- a/net/tipc/port.h +++ b/net/tipc/port.h | |||
| @@ -179,9 +179,12 @@ static inline int tipc_port_importance(struct tipc_port *port) | |||
| 179 | return msg_importance(&port->phdr); | 179 | return msg_importance(&port->phdr); |
| 180 | } | 180 | } |
| 181 | 181 | ||
| 182 | static inline void tipc_port_set_importance(struct tipc_port *port, int imp) | 182 | static inline int tipc_port_set_importance(struct tipc_port *port, int imp) |
| 183 | { | 183 | { |
| 184 | if (imp > TIPC_CRITICAL_IMPORTANCE) | ||
| 185 | return -EINVAL; | ||
| 184 | msg_set_importance(&port->phdr, (u32)imp); | 186 | msg_set_importance(&port->phdr, (u32)imp); |
| 187 | return 0; | ||
| 185 | } | 188 | } |
| 186 | 189 | ||
| 187 | #endif | 190 | #endif |
diff --git a/net/tipc/socket.c b/net/tipc/socket.c index 7d423ee10897..ff8c8118d56e 100644 --- a/net/tipc/socket.c +++ b/net/tipc/socket.c | |||
| @@ -1973,7 +1973,7 @@ static int tipc_setsockopt(struct socket *sock, int lvl, int opt, | |||
| 1973 | 1973 | ||
| 1974 | switch (opt) { | 1974 | switch (opt) { |
| 1975 | case TIPC_IMPORTANCE: | 1975 | case TIPC_IMPORTANCE: |
| 1976 | tipc_port_set_importance(port, value); | 1976 | res = tipc_port_set_importance(port, value); |
| 1977 | break; | 1977 | break; |
| 1978 | case TIPC_SRC_DROPPABLE: | 1978 | case TIPC_SRC_DROPPABLE: |
| 1979 | if (sock->type != SOCK_STREAM) | 1979 | if (sock->type != SOCK_STREAM) |
diff --git a/scripts/kernel-doc b/scripts/kernel-doc index 16a07cfa4d34..70bea942b413 100755 --- a/scripts/kernel-doc +++ b/scripts/kernel-doc | |||
| @@ -2085,6 +2085,7 @@ sub dump_function($$) { | |||
| 2085 | $prototype =~ s/^noinline +//; | 2085 | $prototype =~ s/^noinline +//; |
| 2086 | $prototype =~ s/__init +//; | 2086 | $prototype =~ s/__init +//; |
| 2087 | $prototype =~ s/__init_or_module +//; | 2087 | $prototype =~ s/__init_or_module +//; |
| 2088 | $prototype =~ s/__meminit +//; | ||
| 2088 | $prototype =~ s/__must_check +//; | 2089 | $prototype =~ s/__must_check +//; |
| 2089 | $prototype =~ s/__weak +//; | 2090 | $prototype =~ s/__weak +//; |
| 2090 | my $define = $prototype =~ s/^#\s*define\s+//; #ak added | 2091 | my $define = $prototype =~ s/^#\s*define\s+//; #ak added |
