diff options
292 files changed, 7521 insertions, 3488 deletions
diff --git a/Documentation/ABI/testing/sysfs-bus-pci-devices-cciss b/Documentation/ABI/testing/sysfs-bus-pci-devices-cciss index 0a92a7c93a62..4f29e5f1ebfa 100644 --- a/Documentation/ABI/testing/sysfs-bus-pci-devices-cciss +++ b/Documentation/ABI/testing/sysfs-bus-pci-devices-cciss | |||
| @@ -31,3 +31,31 @@ Date: March 2009 | |||
| 31 | Kernel Version: 2.6.30 | 31 | Kernel Version: 2.6.30 |
| 32 | Contact: iss_storagedev@hp.com | 32 | Contact: iss_storagedev@hp.com |
| 33 | Description: A symbolic link to /sys/block/cciss!cXdY | 33 | Description: A symbolic link to /sys/block/cciss!cXdY |
| 34 | |||
| 35 | Where: /sys/bus/pci/devices/<dev>/ccissX/rescan | ||
| 36 | Date: August 2009 | ||
| 37 | Kernel Version: 2.6.31 | ||
| 38 | Contact: iss_storagedev@hp.com | ||
| 39 | Description: Kicks of a rescan of the controller to discover logical | ||
| 40 | drive topology changes. | ||
| 41 | |||
| 42 | Where: /sys/bus/pci/devices/<dev>/ccissX/cXdY/lunid | ||
| 43 | Date: August 2009 | ||
| 44 | Kernel Version: 2.6.31 | ||
| 45 | Contact: iss_storagedev@hp.com | ||
| 46 | Description: Displays the 8-byte LUN ID used to address logical | ||
| 47 | drive Y of controller X. | ||
| 48 | |||
| 49 | Where: /sys/bus/pci/devices/<dev>/ccissX/cXdY/raid_level | ||
| 50 | Date: August 2009 | ||
| 51 | Kernel Version: 2.6.31 | ||
| 52 | Contact: iss_storagedev@hp.com | ||
| 53 | Description: Displays the RAID level of logical drive Y of | ||
| 54 | controller X. | ||
| 55 | |||
| 56 | Where: /sys/bus/pci/devices/<dev>/ccissX/cXdY/usage_count | ||
| 57 | Date: August 2009 | ||
| 58 | Kernel Version: 2.6.31 | ||
| 59 | Contact: iss_storagedev@hp.com | ||
| 60 | Description: Displays the usage count (number of opens) of logical drive Y | ||
| 61 | of controller X. | ||
diff --git a/Documentation/cgroups/cgroups.txt b/Documentation/cgroups/cgroups.txt index 455d4e6d346d..0b33bfe7dde9 100644 --- a/Documentation/cgroups/cgroups.txt +++ b/Documentation/cgroups/cgroups.txt | |||
| @@ -227,7 +227,14 @@ as the path relative to the root of the cgroup file system. | |||
| 227 | Each cgroup is represented by a directory in the cgroup file system | 227 | Each cgroup is represented by a directory in the cgroup file system |
| 228 | containing the following files describing that cgroup: | 228 | containing the following files describing that cgroup: |
| 229 | 229 | ||
| 230 | - tasks: list of tasks (by pid) attached to that cgroup | 230 | - tasks: list of tasks (by pid) attached to that cgroup. This list |
| 231 | is not guaranteed to be sorted. Writing a thread id into this file | ||
| 232 | moves the thread into this cgroup. | ||
| 233 | - cgroup.procs: list of tgids in the cgroup. This list is not | ||
| 234 | guaranteed to be sorted or free of duplicate tgids, and userspace | ||
| 235 | should sort/uniquify the list if this property is required. | ||
| 236 | Writing a tgid into this file moves all threads with that tgid into | ||
| 237 | this cgroup. | ||
| 231 | - notify_on_release flag: run the release agent on exit? | 238 | - notify_on_release flag: run the release agent on exit? |
| 232 | - release_agent: the path to use for release notifications (this file | 239 | - release_agent: the path to use for release notifications (this file |
| 233 | exists in the top cgroup only) | 240 | exists in the top cgroup only) |
| @@ -374,7 +381,7 @@ Now you want to do something with this cgroup. | |||
| 374 | 381 | ||
| 375 | In this directory you can find several files: | 382 | In this directory you can find several files: |
| 376 | # ls | 383 | # ls |
| 377 | notify_on_release tasks | 384 | cgroup.procs notify_on_release tasks |
| 378 | (plus whatever files added by the attached subsystems) | 385 | (plus whatever files added by the attached subsystems) |
| 379 | 386 | ||
| 380 | Now attach your shell to this cgroup: | 387 | Now attach your shell to this cgroup: |
diff --git a/Documentation/hwmon/ltc4215 b/Documentation/hwmon/ltc4215 index 2e6a21eb656c..c196a1846259 100644 --- a/Documentation/hwmon/ltc4215 +++ b/Documentation/hwmon/ltc4215 | |||
| @@ -22,12 +22,13 @@ Usage Notes | |||
| 22 | ----------- | 22 | ----------- |
| 23 | 23 | ||
| 24 | This driver does not probe for LTC4215 devices, due to the fact that some | 24 | This driver does not probe for LTC4215 devices, due to the fact that some |
| 25 | of the possible addresses are unfriendly to probing. You will need to use | 25 | of the possible addresses are unfriendly to probing. You will have to |
| 26 | the "force" parameter to tell the driver where to find the device. | 26 | instantiate the devices explicitly. |
| 27 | 27 | ||
| 28 | Example: the following will load the driver for an LTC4215 at address 0x44 | 28 | Example: the following will load the driver for an LTC4215 at address 0x44 |
| 29 | on I2C bus #0: | 29 | on I2C bus #0: |
| 30 | $ modprobe ltc4215 force=0,0x44 | 30 | $ modprobe ltc4215 |
| 31 | $ echo ltc4215 0x44 > /sys/bus/i2c/devices/i2c-0/new_device | ||
| 31 | 32 | ||
| 32 | 33 | ||
| 33 | Sysfs entries | 34 | Sysfs entries |
diff --git a/Documentation/hwmon/ltc4245 b/Documentation/hwmon/ltc4245 index bae7a3adc5d8..02838a47d862 100644 --- a/Documentation/hwmon/ltc4245 +++ b/Documentation/hwmon/ltc4245 | |||
| @@ -23,12 +23,13 @@ Usage Notes | |||
| 23 | ----------- | 23 | ----------- |
| 24 | 24 | ||
| 25 | This driver does not probe for LTC4245 devices, due to the fact that some | 25 | This driver does not probe for LTC4245 devices, due to the fact that some |
| 26 | of the possible addresses are unfriendly to probing. You will need to use | 26 | of the possible addresses are unfriendly to probing. You will have to |
| 27 | the "force" parameter to tell the driver where to find the device. | 27 | instantiate the devices explicitly. |
| 28 | 28 | ||
| 29 | Example: the following will load the driver for an LTC4245 at address 0x23 | 29 | Example: the following will load the driver for an LTC4245 at address 0x23 |
| 30 | on I2C bus #1: | 30 | on I2C bus #1: |
| 31 | $ modprobe ltc4245 force=1,0x23 | 31 | $ modprobe ltc4245 |
| 32 | $ echo ltc4245 0x23 > /sys/bus/i2c/devices/i2c-1/new_device | ||
| 32 | 33 | ||
| 33 | 34 | ||
| 34 | Sysfs entries | 35 | Sysfs entries |
diff --git a/Documentation/i2c/instantiating-devices b/Documentation/i2c/instantiating-devices index c740b7b41088..e89490270aba 100644 --- a/Documentation/i2c/instantiating-devices +++ b/Documentation/i2c/instantiating-devices | |||
| @@ -188,7 +188,7 @@ segment, the address is sufficient to uniquely identify the device to be | |||
| 188 | deleted. | 188 | deleted. |
| 189 | 189 | ||
| 190 | Example: | 190 | Example: |
| 191 | # echo eeprom 0x50 > /sys/class/i2c-adapter/i2c-3/new_device | 191 | # echo eeprom 0x50 > /sys/bus/i2c/devices/i2c-3/new_device |
| 192 | 192 | ||
| 193 | While this interface should only be used when in-kernel device declaration | 193 | While this interface should only be used when in-kernel device declaration |
| 194 | can't be done, there is a variety of cases where it can be helpful: | 194 | can't be done, there is a variety of cases where it can be helpful: |
diff --git a/Documentation/isdn/INTERFACE.CAPI b/Documentation/isdn/INTERFACE.CAPI index 686e107923ec..5fe8de5cc727 100644 --- a/Documentation/isdn/INTERFACE.CAPI +++ b/Documentation/isdn/INTERFACE.CAPI | |||
| @@ -60,10 +60,9 @@ open() operation on regular files or character devices. | |||
| 60 | 60 | ||
| 61 | After a successful return from register_appl(), CAPI messages from the | 61 | After a successful return from register_appl(), CAPI messages from the |
| 62 | application may be passed to the driver for the device via calls to the | 62 | application may be passed to the driver for the device via calls to the |
| 63 | send_message() callback function. The CAPI message to send is stored in the | 63 | send_message() callback function. Conversely, the driver may call Kernel |
| 64 | data portion of an skb. Conversely, the driver may call Kernel CAPI's | 64 | CAPI's capi_ctr_handle_message() function to pass a received CAPI message to |
| 65 | capi_ctr_handle_message() function to pass a received CAPI message to Kernel | 65 | Kernel CAPI for forwarding to an application, specifying its ApplID. |
| 66 | CAPI for forwarding to an application, specifying its ApplID. | ||
| 67 | 66 | ||
| 68 | Deregistration requests (CAPI operation CAPI_RELEASE) from applications are | 67 | Deregistration requests (CAPI operation CAPI_RELEASE) from applications are |
| 69 | forwarded as calls to the release_appl() callback function, passing the same | 68 | forwarded as calls to the release_appl() callback function, passing the same |
| @@ -142,6 +141,7 @@ u16 (*send_message)(struct capi_ctr *ctrlr, struct sk_buff *skb) | |||
| 142 | to accepting or queueing the message. Errors occurring during the | 141 | to accepting or queueing the message. Errors occurring during the |
| 143 | actual processing of the message should be signaled with an | 142 | actual processing of the message should be signaled with an |
| 144 | appropriate reply message. | 143 | appropriate reply message. |
| 144 | May be called in process or interrupt context. | ||
| 145 | Calls to this function are not serialized by Kernel CAPI, ie. it must | 145 | Calls to this function are not serialized by Kernel CAPI, ie. it must |
| 146 | be prepared to be re-entered. | 146 | be prepared to be re-entered. |
| 147 | 147 | ||
| @@ -154,7 +154,8 @@ read_proc_t *ctr_read_proc | |||
| 154 | system entry, /proc/capi/controllers/<n>; will be called with a | 154 | system entry, /proc/capi/controllers/<n>; will be called with a |
| 155 | pointer to the device's capi_ctr structure as the last (data) argument | 155 | pointer to the device's capi_ctr structure as the last (data) argument |
| 156 | 156 | ||
| 157 | Note: Callback functions are never called in interrupt context. | 157 | Note: Callback functions except send_message() are never called in interrupt |
| 158 | context. | ||
| 158 | 159 | ||
| 159 | - to be filled in before calling capi_ctr_ready(): | 160 | - to be filled in before calling capi_ctr_ready(): |
| 160 | 161 | ||
| @@ -171,14 +172,40 @@ u8 serial[CAPI_SERIAL_LEN] | |||
| 171 | value to return for CAPI_GET_SERIAL | 172 | value to return for CAPI_GET_SERIAL |
| 172 | 173 | ||
| 173 | 174 | ||
| 174 | 4.3 The _cmsg Structure | 175 | 4.3 SKBs |
| 176 | |||
| 177 | CAPI messages are passed between Kernel CAPI and the driver via send_message() | ||
| 178 | and capi_ctr_handle_message(), stored in the data portion of a socket buffer | ||
| 179 | (skb). Each skb contains a single CAPI message coded according to the CAPI 2.0 | ||
| 180 | standard. | ||
| 181 | |||
| 182 | For the data transfer messages, DATA_B3_REQ and DATA_B3_IND, the actual | ||
| 183 | payload data immediately follows the CAPI message itself within the same skb. | ||
| 184 | The Data and Data64 parameters are not used for processing. The Data64 | ||
| 185 | parameter may be omitted by setting the length field of the CAPI message to 22 | ||
| 186 | instead of 30. | ||
| 187 | |||
| 188 | |||
| 189 | 4.4 The _cmsg Structure | ||
| 175 | 190 | ||
| 176 | (declared in <linux/isdn/capiutil.h>) | 191 | (declared in <linux/isdn/capiutil.h>) |
| 177 | 192 | ||
| 178 | The _cmsg structure stores the contents of a CAPI 2.0 message in an easily | 193 | The _cmsg structure stores the contents of a CAPI 2.0 message in an easily |
| 179 | accessible form. It contains members for all possible CAPI 2.0 parameters, of | 194 | accessible form. It contains members for all possible CAPI 2.0 parameters, |
| 180 | which only those appearing in the message type currently being processed are | 195 | including subparameters of the Additional Info and B Protocol structured |
| 181 | actually used. Unused members should be set to zero. | 196 | parameters, with the following exceptions: |
| 197 | |||
| 198 | * second Calling party number (CONNECT_IND) | ||
| 199 | |||
| 200 | * Data64 (DATA_B3_REQ and DATA_B3_IND) | ||
| 201 | |||
| 202 | * Sending complete (subparameter of Additional Info, CONNECT_REQ and INFO_REQ) | ||
| 203 | |||
| 204 | * Global Configuration (subparameter of B Protocol, CONNECT_REQ, CONNECT_RESP | ||
| 205 | and SELECT_B_PROTOCOL_REQ) | ||
| 206 | |||
| 207 | Only those parameters appearing in the message type currently being processed | ||
| 208 | are actually used. Unused members should be set to zero. | ||
| 182 | 209 | ||
| 183 | Members are named after the CAPI 2.0 standard names of the parameters they | 210 | Members are named after the CAPI 2.0 standard names of the parameters they |
| 184 | represent. See <linux/isdn/capiutil.h> for the exact spelling. Member data | 211 | represent. See <linux/isdn/capiutil.h> for the exact spelling. Member data |
| @@ -190,18 +217,19 @@ u16 for CAPI parameters of type 'word' | |||
| 190 | 217 | ||
| 191 | u32 for CAPI parameters of type 'dword' | 218 | u32 for CAPI parameters of type 'dword' |
| 192 | 219 | ||
| 193 | _cstruct for CAPI parameters of type 'struct' not containing any | 220 | _cstruct for CAPI parameters of type 'struct' |
| 194 | variably-sized (struct) subparameters (eg. 'Called Party Number') | ||
| 195 | The member is a pointer to a buffer containing the parameter in | 221 | The member is a pointer to a buffer containing the parameter in |
| 196 | CAPI encoding (length + content). It may also be NULL, which will | 222 | CAPI encoding (length + content). It may also be NULL, which will |
| 197 | be taken to represent an empty (zero length) parameter. | 223 | be taken to represent an empty (zero length) parameter. |
| 224 | Subparameters are stored in encoded form within the content part. | ||
| 198 | 225 | ||
| 199 | _cmstruct for CAPI parameters of type 'struct' containing 'struct' | 226 | _cmstruct alternative representation for CAPI parameters of type 'struct' |
| 200 | subparameters ('Additional Info' and 'B Protocol') | 227 | (used only for the 'Additional Info' and 'B Protocol' parameters) |
| 201 | The representation is a single byte containing one of the values: | 228 | The representation is a single byte containing one of the values: |
| 202 | CAPI_DEFAULT: the parameter is empty | 229 | CAPI_DEFAULT: The parameter is empty/absent. |
| 203 | CAPI_COMPOSE: the values of the subparameters are stored | 230 | CAPI_COMPOSE: The parameter is present. |
| 204 | individually in the corresponding _cmsg structure members | 231 | Subparameter values are stored individually in the corresponding |
| 232 | _cmsg structure members. | ||
| 205 | 233 | ||
| 206 | Functions capi_cmsg2message() and capi_message2cmsg() are provided to convert | 234 | Functions capi_cmsg2message() and capi_message2cmsg() are provided to convert |
| 207 | messages between their transport encoding described in the CAPI 2.0 standard | 235 | messages between their transport encoding described in the CAPI 2.0 standard |
| @@ -297,3 +325,26 @@ char *capi_cmd2str(u8 Command, u8 Subcommand) | |||
| 297 | be NULL if the command/subcommand is not one of those defined in the | 325 | be NULL if the command/subcommand is not one of those defined in the |
| 298 | CAPI 2.0 standard. | 326 | CAPI 2.0 standard. |
| 299 | 327 | ||
| 328 | |||
| 329 | 7. Debugging | ||
| 330 | |||
| 331 | The module kernelcapi has a module parameter showcapimsgs controlling some | ||
| 332 | debugging output produced by the module. It can only be set when the module is | ||
| 333 | loaded, via a parameter "showcapimsgs=<n>" to the modprobe command, either on | ||
| 334 | the command line or in the configuration file. | ||
| 335 | |||
| 336 | If the lowest bit of showcapimsgs is set, kernelcapi logs controller and | ||
| 337 | application up and down events. | ||
| 338 | |||
| 339 | In addition, every registered CAPI controller has an associated traceflag | ||
| 340 | parameter controlling how CAPI messages sent from and to tha controller are | ||
| 341 | logged. The traceflag parameter is initialized with the value of the | ||
| 342 | showcapimsgs parameter when the controller is registered, but can later be | ||
| 343 | changed via the MANUFACTURER_REQ command KCAPI_CMD_TRACE. | ||
| 344 | |||
| 345 | If the value of traceflag is non-zero, CAPI messages are logged. | ||
| 346 | DATA_B3 messages are only logged if the value of traceflag is > 2. | ||
| 347 | |||
| 348 | If the lowest bit of traceflag is set, only the command/subcommand and message | ||
| 349 | length are logged. Otherwise, kernelcapi logs a readable representation of | ||
| 350 | the entire message. | ||
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt index 6fa7292947e5..9107b387e91f 100644 --- a/Documentation/kernel-parameters.txt +++ b/Documentation/kernel-parameters.txt | |||
| @@ -671,6 +671,7 @@ and is between 256 and 4096 characters. It is defined in the file | |||
| 671 | earlyprintk= [X86,SH,BLACKFIN] | 671 | earlyprintk= [X86,SH,BLACKFIN] |
| 672 | earlyprintk=vga | 672 | earlyprintk=vga |
| 673 | earlyprintk=serial[,ttySn[,baudrate]] | 673 | earlyprintk=serial[,ttySn[,baudrate]] |
| 674 | earlyprintk=ttySn[,baudrate] | ||
| 674 | earlyprintk=dbgp[debugController#] | 675 | earlyprintk=dbgp[debugController#] |
| 675 | 676 | ||
| 676 | Append ",keep" to not disable it when the real console | 677 | Append ",keep" to not disable it when the real console |
diff --git a/Documentation/i2c/chips/eeprom b/Documentation/misc-devices/eeprom index f7e8104b5764..f7e8104b5764 100644 --- a/Documentation/i2c/chips/eeprom +++ b/Documentation/misc-devices/eeprom | |||
diff --git a/Documentation/i2c/chips/max6875 b/Documentation/misc-devices/max6875 index 10ca43cd1a72..1e89ee3ccc1b 100644 --- a/Documentation/i2c/chips/max6875 +++ b/Documentation/misc-devices/max6875 | |||
| @@ -42,10 +42,12 @@ General Remarks | |||
| 42 | 42 | ||
| 43 | Valid addresses for the MAX6875 are 0x50 and 0x52. | 43 | Valid addresses for the MAX6875 are 0x50 and 0x52. |
| 44 | Valid addresses for the MAX6874 are 0x50, 0x52, 0x54 and 0x56. | 44 | Valid addresses for the MAX6874 are 0x50, 0x52, 0x54 and 0x56. |
| 45 | The driver does not probe any address, so you must force the address. | 45 | The driver does not probe any address, so you explicitly instantiate the |
| 46 | devices. | ||
| 46 | 47 | ||
| 47 | Example: | 48 | Example: |
| 48 | $ modprobe max6875 force=0,0x50 | 49 | $ modprobe max6875 |
| 50 | $ echo max6875 0x50 > /sys/bus/i2c/devices/i2c-0/new_device | ||
| 49 | 51 | ||
| 50 | The MAX6874/MAX6875 ignores address bit 0, so this driver attaches to multiple | 52 | The MAX6874/MAX6875 ignores address bit 0, so this driver attaches to multiple |
| 51 | addresses. For example, for address 0x50, it also reserves 0x51. | 53 | addresses. For example, for address 0x50, it also reserves 0x51. |
diff --git a/Documentation/networking/pktgen.txt b/Documentation/networking/pktgen.txt index c6cf4a3c16e0..61bb645d50e0 100644 --- a/Documentation/networking/pktgen.txt +++ b/Documentation/networking/pktgen.txt | |||
| @@ -90,6 +90,11 @@ Examples: | |||
| 90 | pgset "dstmac 00:00:00:00:00:00" sets MAC destination address | 90 | pgset "dstmac 00:00:00:00:00:00" sets MAC destination address |
| 91 | pgset "srcmac 00:00:00:00:00:00" sets MAC source address | 91 | pgset "srcmac 00:00:00:00:00:00" sets MAC source address |
| 92 | 92 | ||
| 93 | pgset "queue_map_min 0" Sets the min value of tx queue interval | ||
| 94 | pgset "queue_map_max 7" Sets the max value of tx queue interval, for multiqueue devices | ||
| 95 | To select queue 1 of a given device, | ||
| 96 | use queue_map_min=1 and queue_map_max=1 | ||
| 97 | |||
| 93 | pgset "src_mac_count 1" Sets the number of MACs we'll range through. | 98 | pgset "src_mac_count 1" Sets the number of MACs we'll range through. |
| 94 | The 'minimum' MAC is what you set with srcmac. | 99 | The 'minimum' MAC is what you set with srcmac. |
| 95 | 100 | ||
| @@ -101,6 +106,9 @@ Examples: | |||
| 101 | IPDST_RND, UDPSRC_RND, | 106 | IPDST_RND, UDPSRC_RND, |
| 102 | UDPDST_RND, MACSRC_RND, MACDST_RND | 107 | UDPDST_RND, MACSRC_RND, MACDST_RND |
| 103 | MPLS_RND, VID_RND, SVID_RND | 108 | MPLS_RND, VID_RND, SVID_RND |
| 109 | QUEUE_MAP_RND # queue map random | ||
| 110 | QUEUE_MAP_CPU # queue map mirrors smp_processor_id() | ||
| 111 | |||
| 104 | 112 | ||
| 105 | pgset "udp_src_min 9" set UDP source port min, If < udp_src_max, then | 113 | pgset "udp_src_min 9" set UDP source port min, If < udp_src_max, then |
| 106 | cycle through the port range. | 114 | cycle through the port range. |
diff --git a/Documentation/vm/ksm.txt b/Documentation/vm/ksm.txt index 72a22f65960e..262d8e6793a3 100644 --- a/Documentation/vm/ksm.txt +++ b/Documentation/vm/ksm.txt | |||
| @@ -52,15 +52,15 @@ The KSM daemon is controlled by sysfs files in /sys/kernel/mm/ksm/, | |||
| 52 | readable by all but writable only by root: | 52 | readable by all but writable only by root: |
| 53 | 53 | ||
| 54 | max_kernel_pages - set to maximum number of kernel pages that KSM may use | 54 | max_kernel_pages - set to maximum number of kernel pages that KSM may use |
| 55 | e.g. "echo 2000 > /sys/kernel/mm/ksm/max_kernel_pages" | 55 | e.g. "echo 100000 > /sys/kernel/mm/ksm/max_kernel_pages" |
| 56 | Value 0 imposes no limit on the kernel pages KSM may use; | 56 | Value 0 imposes no limit on the kernel pages KSM may use; |
| 57 | but note that any process using MADV_MERGEABLE can cause | 57 | but note that any process using MADV_MERGEABLE can cause |
| 58 | KSM to allocate these pages, unswappable until it exits. | 58 | KSM to allocate these pages, unswappable until it exits. |
| 59 | Default: 2000 (chosen for demonstration purposes) | 59 | Default: quarter of memory (chosen to not pin too much) |
| 60 | 60 | ||
| 61 | pages_to_scan - how many present pages to scan before ksmd goes to sleep | 61 | pages_to_scan - how many present pages to scan before ksmd goes to sleep |
| 62 | e.g. "echo 200 > /sys/kernel/mm/ksm/pages_to_scan" | 62 | e.g. "echo 100 > /sys/kernel/mm/ksm/pages_to_scan" |
| 63 | Default: 200 (chosen for demonstration purposes) | 63 | Default: 100 (chosen for demonstration purposes) |
| 64 | 64 | ||
| 65 | sleep_millisecs - how many milliseconds ksmd should sleep before next scan | 65 | sleep_millisecs - how many milliseconds ksmd should sleep before next scan |
| 66 | e.g. "echo 20 > /sys/kernel/mm/ksm/sleep_millisecs" | 66 | e.g. "echo 20 > /sys/kernel/mm/ksm/sleep_millisecs" |
| @@ -70,7 +70,8 @@ run - set 0 to stop ksmd from running but keep merged pages, | |||
| 70 | set 1 to run ksmd e.g. "echo 1 > /sys/kernel/mm/ksm/run", | 70 | set 1 to run ksmd e.g. "echo 1 > /sys/kernel/mm/ksm/run", |
| 71 | set 2 to stop ksmd and unmerge all pages currently merged, | 71 | set 2 to stop ksmd and unmerge all pages currently merged, |
| 72 | but leave mergeable areas registered for next run | 72 | but leave mergeable areas registered for next run |
| 73 | Default: 1 (for immediate use by apps which register) | 73 | Default: 0 (must be changed to 1 to activate KSM, |
| 74 | except if CONFIG_SYSFS is disabled) | ||
| 74 | 75 | ||
| 75 | The effectiveness of KSM and MADV_MERGEABLE is shown in /sys/kernel/mm/ksm/: | 76 | The effectiveness of KSM and MADV_MERGEABLE is shown in /sys/kernel/mm/ksm/: |
| 76 | 77 | ||
| @@ -86,4 +87,4 @@ pages_volatile embraces several different kinds of activity, but a high | |||
| 86 | proportion there would also indicate poor use of madvise MADV_MERGEABLE. | 87 | proportion there would also indicate poor use of madvise MADV_MERGEABLE. |
| 87 | 88 | ||
| 88 | Izik Eidus, | 89 | Izik Eidus, |
| 89 | Hugh Dickins, 30 July 2009 | 90 | Hugh Dickins, 24 Sept 2009 |
diff --git a/Documentation/vm/page-types.c b/Documentation/vm/page-types.c index fa1a30d9e9d5..3ec4f2a22585 100644 --- a/Documentation/vm/page-types.c +++ b/Documentation/vm/page-types.c | |||
| @@ -2,7 +2,10 @@ | |||
| 2 | * page-types: Tool for querying page flags | 2 | * page-types: Tool for querying page flags |
| 3 | * | 3 | * |
| 4 | * Copyright (C) 2009 Intel corporation | 4 | * Copyright (C) 2009 Intel corporation |
| 5 | * Copyright (C) 2009 Wu Fengguang <fengguang.wu@intel.com> | 5 | * |
| 6 | * Authors: Wu Fengguang <fengguang.wu@intel.com> | ||
| 7 | * | ||
| 8 | * Released under the General Public License (GPL). | ||
| 6 | */ | 9 | */ |
| 7 | 10 | ||
| 8 | #define _LARGEFILE64_SOURCE | 11 | #define _LARGEFILE64_SOURCE |
| @@ -69,7 +72,9 @@ | |||
| 69 | #define KPF_COMPOUND_TAIL 16 | 72 | #define KPF_COMPOUND_TAIL 16 |
| 70 | #define KPF_HUGE 17 | 73 | #define KPF_HUGE 17 |
| 71 | #define KPF_UNEVICTABLE 18 | 74 | #define KPF_UNEVICTABLE 18 |
| 75 | #define KPF_HWPOISON 19 | ||
| 72 | #define KPF_NOPAGE 20 | 76 | #define KPF_NOPAGE 20 |
| 77 | #define KPF_KSM 21 | ||
| 73 | 78 | ||
| 74 | /* [32-] kernel hacking assistances */ | 79 | /* [32-] kernel hacking assistances */ |
| 75 | #define KPF_RESERVED 32 | 80 | #define KPF_RESERVED 32 |
| @@ -116,7 +121,9 @@ static char *page_flag_names[] = { | |||
| 116 | [KPF_COMPOUND_TAIL] = "T:compound_tail", | 121 | [KPF_COMPOUND_TAIL] = "T:compound_tail", |
| 117 | [KPF_HUGE] = "G:huge", | 122 | [KPF_HUGE] = "G:huge", |
| 118 | [KPF_UNEVICTABLE] = "u:unevictable", | 123 | [KPF_UNEVICTABLE] = "u:unevictable", |
| 124 | [KPF_HWPOISON] = "X:hwpoison", | ||
| 119 | [KPF_NOPAGE] = "n:nopage", | 125 | [KPF_NOPAGE] = "n:nopage", |
| 126 | [KPF_KSM] = "x:ksm", | ||
| 120 | 127 | ||
| 121 | [KPF_RESERVED] = "r:reserved", | 128 | [KPF_RESERVED] = "r:reserved", |
| 122 | [KPF_MLOCKED] = "m:mlocked", | 129 | [KPF_MLOCKED] = "m:mlocked", |
| @@ -152,9 +159,6 @@ static unsigned long opt_size[MAX_ADDR_RANGES]; | |||
| 152 | static int nr_vmas; | 159 | static int nr_vmas; |
| 153 | static unsigned long pg_start[MAX_VMAS]; | 160 | static unsigned long pg_start[MAX_VMAS]; |
| 154 | static unsigned long pg_end[MAX_VMAS]; | 161 | static unsigned long pg_end[MAX_VMAS]; |
| 155 | static unsigned long voffset; | ||
| 156 | |||
| 157 | static int pagemap_fd; | ||
| 158 | 162 | ||
| 159 | #define MAX_BIT_FILTERS 64 | 163 | #define MAX_BIT_FILTERS 64 |
| 160 | static int nr_bit_filters; | 164 | static int nr_bit_filters; |
| @@ -163,9 +167,16 @@ static uint64_t opt_bits[MAX_BIT_FILTERS]; | |||
| 163 | 167 | ||
| 164 | static int page_size; | 168 | static int page_size; |
| 165 | 169 | ||
| 166 | #define PAGES_BATCH (64 << 10) /* 64k pages */ | 170 | static int pagemap_fd; |
| 167 | static int kpageflags_fd; | 171 | static int kpageflags_fd; |
| 168 | 172 | ||
| 173 | static int opt_hwpoison; | ||
| 174 | static int opt_unpoison; | ||
| 175 | |||
| 176 | static char *hwpoison_debug_fs = "/debug/hwpoison"; | ||
| 177 | static int hwpoison_inject_fd; | ||
| 178 | static int hwpoison_forget_fd; | ||
| 179 | |||
| 169 | #define HASH_SHIFT 13 | 180 | #define HASH_SHIFT 13 |
| 170 | #define HASH_SIZE (1 << HASH_SHIFT) | 181 | #define HASH_SIZE (1 << HASH_SHIFT) |
| 171 | #define HASH_MASK (HASH_SIZE - 1) | 182 | #define HASH_MASK (HASH_SIZE - 1) |
| @@ -207,6 +218,74 @@ static void fatal(const char *x, ...) | |||
| 207 | exit(EXIT_FAILURE); | 218 | exit(EXIT_FAILURE); |
| 208 | } | 219 | } |
| 209 | 220 | ||
| 221 | int checked_open(const char *pathname, int flags) | ||
| 222 | { | ||
| 223 | int fd = open(pathname, flags); | ||
| 224 | |||
| 225 | if (fd < 0) { | ||
| 226 | perror(pathname); | ||
| 227 | exit(EXIT_FAILURE); | ||
| 228 | } | ||
| 229 | |||
| 230 | return fd; | ||
| 231 | } | ||
| 232 | |||
| 233 | /* | ||
| 234 | * pagemap/kpageflags routines | ||
| 235 | */ | ||
| 236 | |||
| 237 | static unsigned long do_u64_read(int fd, char *name, | ||
| 238 | uint64_t *buf, | ||
| 239 | unsigned long index, | ||
| 240 | unsigned long count) | ||
| 241 | { | ||
| 242 | long bytes; | ||
| 243 | |||
| 244 | if (index > ULONG_MAX / 8) | ||
| 245 | fatal("index overflow: %lu\n", index); | ||
| 246 | |||
| 247 | if (lseek(fd, index * 8, SEEK_SET) < 0) { | ||
| 248 | perror(name); | ||
| 249 | exit(EXIT_FAILURE); | ||
| 250 | } | ||
| 251 | |||
| 252 | bytes = read(fd, buf, count * 8); | ||
| 253 | if (bytes < 0) { | ||
| 254 | perror(name); | ||
| 255 | exit(EXIT_FAILURE); | ||
| 256 | } | ||
| 257 | if (bytes % 8) | ||
| 258 | fatal("partial read: %lu bytes\n", bytes); | ||
| 259 | |||
| 260 | return bytes / 8; | ||
| 261 | } | ||
| 262 | |||
| 263 | static unsigned long kpageflags_read(uint64_t *buf, | ||
| 264 | unsigned long index, | ||
| 265 | unsigned long pages) | ||
| 266 | { | ||
| 267 | return do_u64_read(kpageflags_fd, PROC_KPAGEFLAGS, buf, index, pages); | ||
| 268 | } | ||
| 269 | |||
| 270 | static unsigned long pagemap_read(uint64_t *buf, | ||
| 271 | unsigned long index, | ||
| 272 | unsigned long pages) | ||
| 273 | { | ||
| 274 | return do_u64_read(pagemap_fd, "/proc/pid/pagemap", buf, index, pages); | ||
| 275 | } | ||
| 276 | |||
| 277 | static unsigned long pagemap_pfn(uint64_t val) | ||
| 278 | { | ||
| 279 | unsigned long pfn; | ||
| 280 | |||
| 281 | if (val & PM_PRESENT) | ||
| 282 | pfn = PM_PFRAME(val); | ||
| 283 | else | ||
| 284 | pfn = 0; | ||
| 285 | |||
| 286 | return pfn; | ||
| 287 | } | ||
| 288 | |||
| 210 | 289 | ||
| 211 | /* | 290 | /* |
| 212 | * page flag names | 291 | * page flag names |
| @@ -255,7 +334,8 @@ static char *page_flag_longname(uint64_t flags) | |||
| 255 | * page list and summary | 334 | * page list and summary |
| 256 | */ | 335 | */ |
| 257 | 336 | ||
| 258 | static void show_page_range(unsigned long offset, uint64_t flags) | 337 | static void show_page_range(unsigned long voffset, |
| 338 | unsigned long offset, uint64_t flags) | ||
| 259 | { | 339 | { |
| 260 | static uint64_t flags0; | 340 | static uint64_t flags0; |
| 261 | static unsigned long voff; | 341 | static unsigned long voff; |
| @@ -281,7 +361,8 @@ static void show_page_range(unsigned long offset, uint64_t flags) | |||
| 281 | count = 1; | 361 | count = 1; |
| 282 | } | 362 | } |
| 283 | 363 | ||
| 284 | static void show_page(unsigned long offset, uint64_t flags) | 364 | static void show_page(unsigned long voffset, |
| 365 | unsigned long offset, uint64_t flags) | ||
| 285 | { | 366 | { |
| 286 | if (opt_pid) | 367 | if (opt_pid) |
| 287 | printf("%lx\t", voffset); | 368 | printf("%lx\t", voffset); |
| @@ -362,6 +443,62 @@ static uint64_t well_known_flags(uint64_t flags) | |||
| 362 | return flags; | 443 | return flags; |
| 363 | } | 444 | } |
| 364 | 445 | ||
| 446 | static uint64_t kpageflags_flags(uint64_t flags) | ||
| 447 | { | ||
| 448 | flags = expand_overloaded_flags(flags); | ||
| 449 | |||
| 450 | if (!opt_raw) | ||
| 451 | flags = well_known_flags(flags); | ||
| 452 | |||
| 453 | return flags; | ||
| 454 | } | ||
| 455 | |||
| 456 | /* | ||
| 457 | * page actions | ||
| 458 | */ | ||
| 459 | |||
| 460 | static void prepare_hwpoison_fd(void) | ||
| 461 | { | ||
| 462 | char buf[100]; | ||
| 463 | |||
| 464 | if (opt_hwpoison && !hwpoison_inject_fd) { | ||
| 465 | sprintf(buf, "%s/corrupt-pfn", hwpoison_debug_fs); | ||
| 466 | hwpoison_inject_fd = checked_open(buf, O_WRONLY); | ||
| 467 | } | ||
| 468 | |||
| 469 | if (opt_unpoison && !hwpoison_forget_fd) { | ||
| 470 | sprintf(buf, "%s/renew-pfn", hwpoison_debug_fs); | ||
| 471 | hwpoison_forget_fd = checked_open(buf, O_WRONLY); | ||
| 472 | } | ||
| 473 | } | ||
| 474 | |||
| 475 | static int hwpoison_page(unsigned long offset) | ||
| 476 | { | ||
| 477 | char buf[100]; | ||
| 478 | int len; | ||
| 479 | |||
| 480 | len = sprintf(buf, "0x%lx\n", offset); | ||
| 481 | len = write(hwpoison_inject_fd, buf, len); | ||
| 482 | if (len < 0) { | ||
| 483 | perror("hwpoison inject"); | ||
| 484 | return len; | ||
| 485 | } | ||
| 486 | return 0; | ||
| 487 | } | ||
| 488 | |||
| 489 | static int unpoison_page(unsigned long offset) | ||
| 490 | { | ||
| 491 | char buf[100]; | ||
| 492 | int len; | ||
| 493 | |||
| 494 | len = sprintf(buf, "0x%lx\n", offset); | ||
| 495 | len = write(hwpoison_forget_fd, buf, len); | ||
| 496 | if (len < 0) { | ||
| 497 | perror("hwpoison forget"); | ||
| 498 | return len; | ||
| 499 | } | ||
| 500 | return 0; | ||
| 501 | } | ||
| 365 | 502 | ||
| 366 | /* | 503 | /* |
| 367 | * page frame walker | 504 | * page frame walker |
| @@ -394,104 +531,83 @@ static int hash_slot(uint64_t flags) | |||
| 394 | exit(EXIT_FAILURE); | 531 | exit(EXIT_FAILURE); |
| 395 | } | 532 | } |
| 396 | 533 | ||
| 397 | static void add_page(unsigned long offset, uint64_t flags) | 534 | static void add_page(unsigned long voffset, |
| 535 | unsigned long offset, uint64_t flags) | ||
| 398 | { | 536 | { |
| 399 | flags = expand_overloaded_flags(flags); | 537 | flags = kpageflags_flags(flags); |
| 400 | |||
| 401 | if (!opt_raw) | ||
| 402 | flags = well_known_flags(flags); | ||
| 403 | 538 | ||
| 404 | if (!bit_mask_ok(flags)) | 539 | if (!bit_mask_ok(flags)) |
| 405 | return; | 540 | return; |
| 406 | 541 | ||
| 542 | if (opt_hwpoison) | ||
| 543 | hwpoison_page(offset); | ||
| 544 | if (opt_unpoison) | ||
| 545 | unpoison_page(offset); | ||
| 546 | |||
| 407 | if (opt_list == 1) | 547 | if (opt_list == 1) |
| 408 | show_page_range(offset, flags); | 548 | show_page_range(voffset, offset, flags); |
| 409 | else if (opt_list == 2) | 549 | else if (opt_list == 2) |
| 410 | show_page(offset, flags); | 550 | show_page(voffset, offset, flags); |
| 411 | 551 | ||
| 412 | nr_pages[hash_slot(flags)]++; | 552 | nr_pages[hash_slot(flags)]++; |
| 413 | total_pages++; | 553 | total_pages++; |
| 414 | } | 554 | } |
| 415 | 555 | ||
| 416 | static void walk_pfn(unsigned long index, unsigned long count) | 556 | #define KPAGEFLAGS_BATCH (64 << 10) /* 64k pages */ |
| 557 | static void walk_pfn(unsigned long voffset, | ||
| 558 | unsigned long index, | ||
| 559 | unsigned long count) | ||
| 417 | { | 560 | { |
| 561 | uint64_t buf[KPAGEFLAGS_BATCH]; | ||
| 418 | unsigned long batch; | 562 | unsigned long batch; |
| 419 | unsigned long n; | 563 | unsigned long pages; |
| 420 | unsigned long i; | 564 | unsigned long i; |
| 421 | 565 | ||
| 422 | if (index > ULONG_MAX / KPF_BYTES) | ||
| 423 | fatal("index overflow: %lu\n", index); | ||
| 424 | |||
| 425 | lseek(kpageflags_fd, index * KPF_BYTES, SEEK_SET); | ||
| 426 | |||
| 427 | while (count) { | 566 | while (count) { |
| 428 | uint64_t kpageflags_buf[KPF_BYTES * PAGES_BATCH]; | 567 | batch = min_t(unsigned long, count, KPAGEFLAGS_BATCH); |
| 429 | 568 | pages = kpageflags_read(buf, index, batch); | |
| 430 | batch = min_t(unsigned long, count, PAGES_BATCH); | 569 | if (pages == 0) |
| 431 | n = read(kpageflags_fd, kpageflags_buf, batch * KPF_BYTES); | ||
| 432 | if (n == 0) | ||
| 433 | break; | 570 | break; |
| 434 | if (n < 0) { | ||
| 435 | perror(PROC_KPAGEFLAGS); | ||
| 436 | exit(EXIT_FAILURE); | ||
| 437 | } | ||
| 438 | 571 | ||
| 439 | if (n % KPF_BYTES != 0) | 572 | for (i = 0; i < pages; i++) |
| 440 | fatal("partial read: %lu bytes\n", n); | 573 | add_page(voffset + i, index + i, buf[i]); |
| 441 | n = n / KPF_BYTES; | ||
| 442 | 574 | ||
| 443 | for (i = 0; i < n; i++) | 575 | index += pages; |
| 444 | add_page(index + i, kpageflags_buf[i]); | 576 | count -= pages; |
| 445 | |||
| 446 | index += batch; | ||
| 447 | count -= batch; | ||
| 448 | } | 577 | } |
| 449 | } | 578 | } |
| 450 | 579 | ||
| 451 | 580 | #define PAGEMAP_BATCH (64 << 10) | |
| 452 | #define PAGEMAP_BATCH 4096 | 581 | static void walk_vma(unsigned long index, unsigned long count) |
| 453 | static unsigned long task_pfn(unsigned long pgoff) | ||
| 454 | { | 582 | { |
| 455 | static uint64_t buf[PAGEMAP_BATCH]; | 583 | uint64_t buf[PAGEMAP_BATCH]; |
| 456 | static unsigned long start; | 584 | unsigned long batch; |
| 457 | static long count; | 585 | unsigned long pages; |
| 458 | uint64_t pfn; | 586 | unsigned long pfn; |
| 587 | unsigned long i; | ||
| 459 | 588 | ||
| 460 | if (pgoff < start || pgoff >= start + count) { | 589 | while (count) { |
| 461 | if (lseek64(pagemap_fd, | 590 | batch = min_t(unsigned long, count, PAGEMAP_BATCH); |
| 462 | (uint64_t)pgoff * PM_ENTRY_BYTES, | 591 | pages = pagemap_read(buf, index, batch); |
| 463 | SEEK_SET) < 0) { | 592 | if (pages == 0) |
| 464 | perror("pagemap seek"); | 593 | break; |
| 465 | exit(EXIT_FAILURE); | ||
| 466 | } | ||
| 467 | count = read(pagemap_fd, buf, sizeof(buf)); | ||
| 468 | if (count == 0) | ||
| 469 | return 0; | ||
| 470 | if (count < 0) { | ||
| 471 | perror("pagemap read"); | ||
| 472 | exit(EXIT_FAILURE); | ||
| 473 | } | ||
| 474 | if (count % PM_ENTRY_BYTES) { | ||
| 475 | fatal("pagemap read not aligned.\n"); | ||
| 476 | exit(EXIT_FAILURE); | ||
| 477 | } | ||
| 478 | count /= PM_ENTRY_BYTES; | ||
| 479 | start = pgoff; | ||
| 480 | } | ||
| 481 | 594 | ||
| 482 | pfn = buf[pgoff - start]; | 595 | for (i = 0; i < pages; i++) { |
| 483 | if (pfn & PM_PRESENT) | 596 | pfn = pagemap_pfn(buf[i]); |
| 484 | pfn = PM_PFRAME(pfn); | 597 | if (pfn) |
| 485 | else | 598 | walk_pfn(index + i, pfn, 1); |
| 486 | pfn = 0; | 599 | } |
| 487 | 600 | ||
| 488 | return pfn; | 601 | index += pages; |
| 602 | count -= pages; | ||
| 603 | } | ||
| 489 | } | 604 | } |
| 490 | 605 | ||
| 491 | static void walk_task(unsigned long index, unsigned long count) | 606 | static void walk_task(unsigned long index, unsigned long count) |
| 492 | { | 607 | { |
| 493 | int i = 0; | ||
| 494 | const unsigned long end = index + count; | 608 | const unsigned long end = index + count; |
| 609 | unsigned long start; | ||
| 610 | int i = 0; | ||
| 495 | 611 | ||
| 496 | while (index < end) { | 612 | while (index < end) { |
| 497 | 613 | ||
| @@ -501,15 +617,11 @@ static void walk_task(unsigned long index, unsigned long count) | |||
| 501 | if (pg_start[i] >= end) | 617 | if (pg_start[i] >= end) |
| 502 | return; | 618 | return; |
| 503 | 619 | ||
| 504 | voffset = max_t(unsigned long, pg_start[i], index); | 620 | start = max_t(unsigned long, pg_start[i], index); |
| 505 | index = min_t(unsigned long, pg_end[i], end); | 621 | index = min_t(unsigned long, pg_end[i], end); |
| 506 | 622 | ||
| 507 | assert(voffset < index); | 623 | assert(start < index); |
| 508 | for (; voffset < index; voffset++) { | 624 | walk_vma(start, index - start); |
| 509 | unsigned long pfn = task_pfn(voffset); | ||
| 510 | if (pfn) | ||
| 511 | walk_pfn(pfn, 1); | ||
| 512 | } | ||
| 513 | } | 625 | } |
| 514 | } | 626 | } |
| 515 | 627 | ||
| @@ -527,18 +639,14 @@ static void walk_addr_ranges(void) | |||
| 527 | { | 639 | { |
| 528 | int i; | 640 | int i; |
| 529 | 641 | ||
| 530 | kpageflags_fd = open(PROC_KPAGEFLAGS, O_RDONLY); | 642 | kpageflags_fd = checked_open(PROC_KPAGEFLAGS, O_RDONLY); |
| 531 | if (kpageflags_fd < 0) { | ||
| 532 | perror(PROC_KPAGEFLAGS); | ||
| 533 | exit(EXIT_FAILURE); | ||
| 534 | } | ||
| 535 | 643 | ||
| 536 | if (!nr_addr_ranges) | 644 | if (!nr_addr_ranges) |
| 537 | add_addr_range(0, ULONG_MAX); | 645 | add_addr_range(0, ULONG_MAX); |
| 538 | 646 | ||
| 539 | for (i = 0; i < nr_addr_ranges; i++) | 647 | for (i = 0; i < nr_addr_ranges; i++) |
| 540 | if (!opt_pid) | 648 | if (!opt_pid) |
| 541 | walk_pfn(opt_offset[i], opt_size[i]); | 649 | walk_pfn(0, opt_offset[i], opt_size[i]); |
| 542 | else | 650 | else |
| 543 | walk_task(opt_offset[i], opt_size[i]); | 651 | walk_task(opt_offset[i], opt_size[i]); |
| 544 | 652 | ||
| @@ -575,6 +683,8 @@ static void usage(void) | |||
| 575 | " -l|--list Show page details in ranges\n" | 683 | " -l|--list Show page details in ranges\n" |
| 576 | " -L|--list-each Show page details one by one\n" | 684 | " -L|--list-each Show page details one by one\n" |
| 577 | " -N|--no-summary Don't show summay info\n" | 685 | " -N|--no-summary Don't show summay info\n" |
| 686 | " -X|--hwpoison hwpoison pages\n" | ||
| 687 | " -x|--unpoison unpoison pages\n" | ||
| 578 | " -h|--help Show this usage message\n" | 688 | " -h|--help Show this usage message\n" |
| 579 | "addr-spec:\n" | 689 | "addr-spec:\n" |
| 580 | " N one page at offset N (unit: pages)\n" | 690 | " N one page at offset N (unit: pages)\n" |
| @@ -624,11 +734,7 @@ static void parse_pid(const char *str) | |||
| 624 | opt_pid = parse_number(str); | 734 | opt_pid = parse_number(str); |
| 625 | 735 | ||
| 626 | sprintf(buf, "/proc/%d/pagemap", opt_pid); | 736 | sprintf(buf, "/proc/%d/pagemap", opt_pid); |
| 627 | pagemap_fd = open(buf, O_RDONLY); | 737 | pagemap_fd = checked_open(buf, O_RDONLY); |
| 628 | if (pagemap_fd < 0) { | ||
| 629 | perror(buf); | ||
| 630 | exit(EXIT_FAILURE); | ||
| 631 | } | ||
| 632 | 738 | ||
| 633 | sprintf(buf, "/proc/%d/maps", opt_pid); | 739 | sprintf(buf, "/proc/%d/maps", opt_pid); |
| 634 | file = fopen(buf, "r"); | 740 | file = fopen(buf, "r"); |
| @@ -788,6 +894,8 @@ static struct option opts[] = { | |||
| 788 | { "list" , 0, NULL, 'l' }, | 894 | { "list" , 0, NULL, 'l' }, |
| 789 | { "list-each" , 0, NULL, 'L' }, | 895 | { "list-each" , 0, NULL, 'L' }, |
| 790 | { "no-summary", 0, NULL, 'N' }, | 896 | { "no-summary", 0, NULL, 'N' }, |
| 897 | { "hwpoison" , 0, NULL, 'X' }, | ||
| 898 | { "unpoison" , 0, NULL, 'x' }, | ||
| 791 | { "help" , 0, NULL, 'h' }, | 899 | { "help" , 0, NULL, 'h' }, |
| 792 | { NULL , 0, NULL, 0 } | 900 | { NULL , 0, NULL, 0 } |
| 793 | }; | 901 | }; |
| @@ -799,7 +907,7 @@ int main(int argc, char *argv[]) | |||
| 799 | page_size = getpagesize(); | 907 | page_size = getpagesize(); |
| 800 | 908 | ||
| 801 | while ((c = getopt_long(argc, argv, | 909 | while ((c = getopt_long(argc, argv, |
| 802 | "rp:f:a:b:lLNh", opts, NULL)) != -1) { | 910 | "rp:f:a:b:lLNXxh", opts, NULL)) != -1) { |
| 803 | switch (c) { | 911 | switch (c) { |
| 804 | case 'r': | 912 | case 'r': |
| 805 | opt_raw = 1; | 913 | opt_raw = 1; |
| @@ -825,6 +933,14 @@ int main(int argc, char *argv[]) | |||
| 825 | case 'N': | 933 | case 'N': |
| 826 | opt_no_summary = 1; | 934 | opt_no_summary = 1; |
| 827 | break; | 935 | break; |
| 936 | case 'X': | ||
| 937 | opt_hwpoison = 1; | ||
| 938 | prepare_hwpoison_fd(); | ||
| 939 | break; | ||
| 940 | case 'x': | ||
| 941 | opt_unpoison = 1; | ||
| 942 | prepare_hwpoison_fd(); | ||
| 943 | break; | ||
| 828 | case 'h': | 944 | case 'h': |
| 829 | usage(); | 945 | usage(); |
| 830 | exit(0); | 946 | exit(0); |
| @@ -844,7 +960,7 @@ int main(int argc, char *argv[]) | |||
| 844 | walk_addr_ranges(); | 960 | walk_addr_ranges(); |
| 845 | 961 | ||
| 846 | if (opt_list == 1) | 962 | if (opt_list == 1) |
| 847 | show_page_range(0, 0); /* drain the buffer */ | 963 | show_page_range(0, 0, 0); /* drain the buffer */ |
| 848 | 964 | ||
| 849 | if (opt_no_summary) | 965 | if (opt_no_summary) |
| 850 | return 0; | 966 | return 0; |
diff --git a/Documentation/vm/pagemap.txt b/Documentation/vm/pagemap.txt index 600a304a828c..df09b9650a81 100644 --- a/Documentation/vm/pagemap.txt +++ b/Documentation/vm/pagemap.txt | |||
| @@ -57,7 +57,9 @@ There are three components to pagemap: | |||
| 57 | 16. COMPOUND_TAIL | 57 | 16. COMPOUND_TAIL |
| 58 | 16. HUGE | 58 | 16. HUGE |
| 59 | 18. UNEVICTABLE | 59 | 18. UNEVICTABLE |
| 60 | 19. HWPOISON | ||
| 60 | 20. NOPAGE | 61 | 20. NOPAGE |
| 62 | 21. KSM | ||
| 61 | 63 | ||
| 62 | Short descriptions to the page flags: | 64 | Short descriptions to the page flags: |
| 63 | 65 | ||
| @@ -86,9 +88,15 @@ Short descriptions to the page flags: | |||
| 86 | 17. HUGE | 88 | 17. HUGE |
| 87 | this is an integral part of a HugeTLB page | 89 | this is an integral part of a HugeTLB page |
| 88 | 90 | ||
| 91 | 19. HWPOISON | ||
| 92 | hardware detected memory corruption on this page: don't touch the data! | ||
| 93 | |||
| 89 | 20. NOPAGE | 94 | 20. NOPAGE |
| 90 | no page frame exists at the requested address | 95 | no page frame exists at the requested address |
| 91 | 96 | ||
| 97 | 21. KSM | ||
| 98 | identical memory pages dynamically shared between one or more processes | ||
| 99 | |||
| 92 | [IO related page flags] | 100 | [IO related page flags] |
| 93 | 1. ERROR IO error occurred | 101 | 1. ERROR IO error occurred |
| 94 | 3. UPTODATE page has up-to-date data | 102 | 3. UPTODATE page has up-to-date data |
diff --git a/Documentation/w1/masters/ds2482 b/Documentation/w1/masters/ds2482 index 9210d6fa5024..299b91c7609f 100644 --- a/Documentation/w1/masters/ds2482 +++ b/Documentation/w1/masters/ds2482 | |||
| @@ -24,8 +24,8 @@ General Remarks | |||
| 24 | 24 | ||
| 25 | Valid addresses are 0x18, 0x19, 0x1a, and 0x1b. | 25 | Valid addresses are 0x18, 0x19, 0x1a, and 0x1b. |
| 26 | However, the device cannot be detected without writing to the i2c bus, so no | 26 | However, the device cannot be detected without writing to the i2c bus, so no |
| 27 | detection is done. | 27 | detection is done. You should instantiate the device explicitly. |
| 28 | You should force the device address. | ||
| 29 | 28 | ||
| 30 | $ modprobe ds2482 force=0,0x18 | 29 | $ modprobe ds2482 |
| 30 | $ echo ds2482 0x18 > /sys/bus/i2c/devices/i2c-0/new_device | ||
| 31 | 31 | ||
diff --git a/MAINTAINERS b/MAINTAINERS index 737a9b2c532d..e1da925b38c8 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
| @@ -257,6 +257,13 @@ W: http://www.lesswatts.org/projects/acpi/ | |||
| 257 | S: Supported | 257 | S: Supported |
| 258 | F: drivers/acpi/fan.c | 258 | F: drivers/acpi/fan.c |
| 259 | 259 | ||
| 260 | ACPI PROCESSOR AGGREGATOR DRIVER | ||
| 261 | M: Shaohua Li <shaohua.li@intel.com> | ||
| 262 | L: linux-acpi@vger.kernel.org | ||
| 263 | W: http://www.lesswatts.org/projects/acpi/ | ||
| 264 | S: Supported | ||
| 265 | F: drivers/acpi/acpi_pad.c | ||
| 266 | |||
| 260 | ACPI THERMAL DRIVER | 267 | ACPI THERMAL DRIVER |
| 261 | M: Zhang Rui <rui.zhang@intel.com> | 268 | M: Zhang Rui <rui.zhang@intel.com> |
| 262 | L: linux-acpi@vger.kernel.org | 269 | L: linux-acpi@vger.kernel.org |
| @@ -3636,6 +3643,13 @@ F: Documentation/blockdev/nbd.txt | |||
| 3636 | F: drivers/block/nbd.c | 3643 | F: drivers/block/nbd.c |
| 3637 | F: include/linux/nbd.h | 3644 | F: include/linux/nbd.h |
| 3638 | 3645 | ||
| 3646 | NETWORK DROP MONITOR | ||
| 3647 | M: Neil Horman <nhorman@tuxdriver.com> | ||
| 3648 | L: netdev@vger.kernel.org | ||
| 3649 | S: Maintained | ||
| 3650 | W: https://fedorahosted.org/dropwatch/ | ||
| 3651 | F: net/core/drop_monitor.c | ||
| 3652 | |||
| 3639 | NETWORKING [GENERAL] | 3653 | NETWORKING [GENERAL] |
| 3640 | M: "David S. Miller" <davem@davemloft.net> | 3654 | M: "David S. Miller" <davem@davemloft.net> |
| 3641 | L: netdev@vger.kernel.org | 3655 | L: netdev@vger.kernel.org |
| @@ -3966,6 +3980,7 @@ F: drivers/block/paride/ | |||
| 3966 | PARISC ARCHITECTURE | 3980 | PARISC ARCHITECTURE |
| 3967 | M: Kyle McMartin <kyle@mcmartin.ca> | 3981 | M: Kyle McMartin <kyle@mcmartin.ca> |
| 3968 | M: Helge Deller <deller@gmx.de> | 3982 | M: Helge Deller <deller@gmx.de> |
| 3983 | M: "James E.J. Bottomley" <jejb@parisc-linux.org> | ||
| 3969 | L: linux-parisc@vger.kernel.org | 3984 | L: linux-parisc@vger.kernel.org |
| 3970 | W: http://www.parisc-linux.org/ | 3985 | W: http://www.parisc-linux.org/ |
| 3971 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/kyle/parisc-2.6.git | 3986 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/kyle/parisc-2.6.git |
| @@ -1,7 +1,7 @@ | |||
| 1 | VERSION = 2 | 1 | VERSION = 2 |
| 2 | PATCHLEVEL = 6 | 2 | PATCHLEVEL = 6 |
| 3 | SUBLEVEL = 32 | 3 | SUBLEVEL = 32 |
| 4 | EXTRAVERSION = -rc2 | 4 | EXTRAVERSION = -rc3 |
| 5 | NAME = Man-Eating Seals of Antiquity | 5 | NAME = Man-Eating Seals of Antiquity |
| 6 | 6 | ||
| 7 | # *DOCUMENTATION* | 7 | # *DOCUMENTATION* |
diff --git a/arch/arm/mach-omap2/clock34xx.c b/arch/arm/mach-omap2/clock34xx.c index fafcd32e6907..489556eecbd1 100644 --- a/arch/arm/mach-omap2/clock34xx.c +++ b/arch/arm/mach-omap2/clock34xx.c | |||
| @@ -338,6 +338,13 @@ static struct omap_clk omap34xx_clks[] = { | |||
| 338 | */ | 338 | */ |
| 339 | #define SDRC_MPURATE_LOOPS 96 | 339 | #define SDRC_MPURATE_LOOPS 96 |
| 340 | 340 | ||
| 341 | /* | ||
| 342 | * DPLL5_FREQ_FOR_USBHOST: USBHOST and USBTLL are the only clocks | ||
| 343 | * that are sourced by DPLL5, and both of these require this clock | ||
| 344 | * to be at 120 MHz for proper operation. | ||
| 345 | */ | ||
| 346 | #define DPLL5_FREQ_FOR_USBHOST 120000000 | ||
| 347 | |||
| 341 | /** | 348 | /** |
| 342 | * omap3430es2_clk_ssi_find_idlest - return CM_IDLEST info for SSI | 349 | * omap3430es2_clk_ssi_find_idlest - return CM_IDLEST info for SSI |
| 343 | * @clk: struct clk * being enabled | 350 | * @clk: struct clk * being enabled |
| @@ -1056,6 +1063,28 @@ void omap2_clk_prepare_for_reboot(void) | |||
| 1056 | #endif | 1063 | #endif |
| 1057 | } | 1064 | } |
| 1058 | 1065 | ||
| 1066 | static void omap3_clk_lock_dpll5(void) | ||
| 1067 | { | ||
| 1068 | struct clk *dpll5_clk; | ||
| 1069 | struct clk *dpll5_m2_clk; | ||
| 1070 | |||
| 1071 | dpll5_clk = clk_get(NULL, "dpll5_ck"); | ||
| 1072 | clk_set_rate(dpll5_clk, DPLL5_FREQ_FOR_USBHOST); | ||
| 1073 | clk_enable(dpll5_clk); | ||
| 1074 | |||
| 1075 | /* Enable autoidle to allow it to enter low power bypass */ | ||
| 1076 | omap3_dpll_allow_idle(dpll5_clk); | ||
| 1077 | |||
| 1078 | /* Program dpll5_m2_clk divider for no division */ | ||
| 1079 | dpll5_m2_clk = clk_get(NULL, "dpll5_m2_ck"); | ||
| 1080 | clk_enable(dpll5_m2_clk); | ||
| 1081 | clk_set_rate(dpll5_m2_clk, DPLL5_FREQ_FOR_USBHOST); | ||
| 1082 | |||
| 1083 | clk_disable(dpll5_m2_clk); | ||
| 1084 | clk_disable(dpll5_clk); | ||
| 1085 | return; | ||
| 1086 | } | ||
| 1087 | |||
| 1059 | /* REVISIT: Move this init stuff out into clock.c */ | 1088 | /* REVISIT: Move this init stuff out into clock.c */ |
| 1060 | 1089 | ||
| 1061 | /* | 1090 | /* |
| @@ -1148,6 +1177,12 @@ int __init omap2_clk_init(void) | |||
| 1148 | */ | 1177 | */ |
| 1149 | clk_enable_init_clocks(); | 1178 | clk_enable_init_clocks(); |
| 1150 | 1179 | ||
| 1180 | /* | ||
| 1181 | * Lock DPLL5 and put it in autoidle. | ||
| 1182 | */ | ||
| 1183 | if (omap_rev() >= OMAP3430_REV_ES2_0) | ||
| 1184 | omap3_clk_lock_dpll5(); | ||
| 1185 | |||
| 1151 | /* Avoid sleeping during omap2_clk_prepare_for_reboot() */ | 1186 | /* Avoid sleeping during omap2_clk_prepare_for_reboot() */ |
| 1152 | /* REVISIT: not yet ready for 343x */ | 1187 | /* REVISIT: not yet ready for 343x */ |
| 1153 | #if 0 | 1188 | #if 0 |
diff --git a/arch/arm/mach-omap2/pm-debug.c b/arch/arm/mach-omap2/pm-debug.c index 1b4c1600f8d8..2fc4d6abbd0a 100644 --- a/arch/arm/mach-omap2/pm-debug.c +++ b/arch/arm/mach-omap2/pm-debug.c | |||
| @@ -541,7 +541,7 @@ static int __init pm_dbg_init(void) | |||
| 541 | printk(KERN_ERR "%s: only OMAP3 supported\n", __func__); | 541 | printk(KERN_ERR "%s: only OMAP3 supported\n", __func__); |
| 542 | return -ENODEV; | 542 | return -ENODEV; |
| 543 | } | 543 | } |
| 544 | 544 | ||
| 545 | d = debugfs_create_dir("pm_debug", NULL); | 545 | d = debugfs_create_dir("pm_debug", NULL); |
| 546 | if (IS_ERR(d)) | 546 | if (IS_ERR(d)) |
| 547 | return PTR_ERR(d); | 547 | return PTR_ERR(d); |
| @@ -551,7 +551,7 @@ static int __init pm_dbg_init(void) | |||
| 551 | (void) debugfs_create_file("time", S_IRUGO, | 551 | (void) debugfs_create_file("time", S_IRUGO, |
| 552 | d, (void *)DEBUG_FILE_TIMERS, &debug_fops); | 552 | d, (void *)DEBUG_FILE_TIMERS, &debug_fops); |
| 553 | 553 | ||
| 554 | pwrdm_for_each(pwrdms_setup, (void *)d); | 554 | pwrdm_for_each_nolock(pwrdms_setup, (void *)d); |
| 555 | 555 | ||
| 556 | pm_dbg_dir = debugfs_create_dir("registers", d); | 556 | pm_dbg_dir = debugfs_create_dir("registers", d); |
| 557 | if (IS_ERR(pm_dbg_dir)) | 557 | if (IS_ERR(pm_dbg_dir)) |
diff --git a/arch/arm/mach-omap2/pm34xx.c b/arch/arm/mach-omap2/pm34xx.c index 0ff5a6c53aa0..378c2f618358 100644 --- a/arch/arm/mach-omap2/pm34xx.c +++ b/arch/arm/mach-omap2/pm34xx.c | |||
| @@ -51,97 +51,112 @@ static void (*_omap_sram_idle)(u32 *addr, int save_state); | |||
| 51 | 51 | ||
| 52 | static struct powerdomain *mpu_pwrdm; | 52 | static struct powerdomain *mpu_pwrdm; |
| 53 | 53 | ||
| 54 | /* PRCM Interrupt Handler for wakeups */ | 54 | /* |
| 55 | static irqreturn_t prcm_interrupt_handler (int irq, void *dev_id) | 55 | * PRCM Interrupt Handler Helper Function |
| 56 | * | ||
| 57 | * The purpose of this function is to clear any wake-up events latched | ||
| 58 | * in the PRCM PM_WKST_x registers. It is possible that a wake-up event | ||
| 59 | * may occur whilst attempting to clear a PM_WKST_x register and thus | ||
| 60 | * set another bit in this register. A while loop is used to ensure | ||
| 61 | * that any peripheral wake-up events occurring while attempting to | ||
| 62 | * clear the PM_WKST_x are detected and cleared. | ||
| 63 | */ | ||
| 64 | static int prcm_clear_mod_irqs(s16 module, u8 regs) | ||
| 56 | { | 65 | { |
| 57 | u32 wkst, irqstatus_mpu; | 66 | u32 wkst, fclk, iclk, clken; |
| 58 | u32 fclk, iclk; | 67 | u16 wkst_off = (regs == 3) ? OMAP3430ES2_PM_WKST3 : PM_WKST1; |
| 59 | 68 | u16 fclk_off = (regs == 3) ? OMAP3430ES2_CM_FCLKEN3 : CM_FCLKEN1; | |
| 60 | /* WKUP */ | 69 | u16 iclk_off = (regs == 3) ? CM_ICLKEN3 : CM_ICLKEN1; |
| 61 | wkst = prm_read_mod_reg(WKUP_MOD, PM_WKST); | 70 | u16 grpsel_off = (regs == 3) ? |
| 71 | OMAP3430ES2_PM_MPUGRPSEL3 : OMAP3430_PM_MPUGRPSEL; | ||
| 72 | int c = 0; | ||
| 73 | |||
| 74 | wkst = prm_read_mod_reg(module, wkst_off); | ||
| 75 | wkst &= prm_read_mod_reg(module, grpsel_off); | ||
| 62 | if (wkst) { | 76 | if (wkst) { |
| 63 | iclk = cm_read_mod_reg(WKUP_MOD, CM_ICLKEN); | 77 | iclk = cm_read_mod_reg(module, iclk_off); |
| 64 | fclk = cm_read_mod_reg(WKUP_MOD, CM_FCLKEN); | 78 | fclk = cm_read_mod_reg(module, fclk_off); |
| 65 | cm_set_mod_reg_bits(wkst, WKUP_MOD, CM_ICLKEN); | 79 | while (wkst) { |
| 66 | cm_set_mod_reg_bits(wkst, WKUP_MOD, CM_FCLKEN); | 80 | clken = wkst; |
| 67 | prm_write_mod_reg(wkst, WKUP_MOD, PM_WKST); | 81 | cm_set_mod_reg_bits(clken, module, iclk_off); |
| 68 | while (prm_read_mod_reg(WKUP_MOD, PM_WKST)) | 82 | /* |
| 69 | cpu_relax(); | 83 | * For USBHOST, we don't know whether HOST1 or |
| 70 | cm_write_mod_reg(iclk, WKUP_MOD, CM_ICLKEN); | 84 | * HOST2 woke us up, so enable both f-clocks |
| 71 | cm_write_mod_reg(fclk, WKUP_MOD, CM_FCLKEN); | 85 | */ |
| 86 | if (module == OMAP3430ES2_USBHOST_MOD) | ||
| 87 | clken |= 1 << OMAP3430ES2_EN_USBHOST2_SHIFT; | ||
| 88 | cm_set_mod_reg_bits(clken, module, fclk_off); | ||
| 89 | prm_write_mod_reg(wkst, module, wkst_off); | ||
| 90 | wkst = prm_read_mod_reg(module, wkst_off); | ||
| 91 | c++; | ||
| 92 | } | ||
| 93 | cm_write_mod_reg(iclk, module, iclk_off); | ||
| 94 | cm_write_mod_reg(fclk, module, fclk_off); | ||
| 72 | } | 95 | } |
| 73 | 96 | ||
| 74 | /* CORE */ | 97 | return c; |
| 75 | wkst = prm_read_mod_reg(CORE_MOD, PM_WKST1); | 98 | } |
| 76 | if (wkst) { | ||
| 77 | iclk = cm_read_mod_reg(CORE_MOD, CM_ICLKEN1); | ||
| 78 | fclk = cm_read_mod_reg(CORE_MOD, CM_FCLKEN1); | ||
| 79 | cm_set_mod_reg_bits(wkst, CORE_MOD, CM_ICLKEN1); | ||
| 80 | cm_set_mod_reg_bits(wkst, CORE_MOD, CM_FCLKEN1); | ||
| 81 | prm_write_mod_reg(wkst, CORE_MOD, PM_WKST1); | ||
| 82 | while (prm_read_mod_reg(CORE_MOD, PM_WKST1)) | ||
| 83 | cpu_relax(); | ||
| 84 | cm_write_mod_reg(iclk, CORE_MOD, CM_ICLKEN1); | ||
| 85 | cm_write_mod_reg(fclk, CORE_MOD, CM_FCLKEN1); | ||
| 86 | } | ||
| 87 | wkst = prm_read_mod_reg(CORE_MOD, OMAP3430ES2_PM_WKST3); | ||
| 88 | if (wkst) { | ||
| 89 | iclk = cm_read_mod_reg(CORE_MOD, CM_ICLKEN3); | ||
| 90 | fclk = cm_read_mod_reg(CORE_MOD, OMAP3430ES2_CM_FCLKEN3); | ||
| 91 | cm_set_mod_reg_bits(wkst, CORE_MOD, CM_ICLKEN3); | ||
| 92 | cm_set_mod_reg_bits(wkst, CORE_MOD, OMAP3430ES2_CM_FCLKEN3); | ||
| 93 | prm_write_mod_reg(wkst, CORE_MOD, OMAP3430ES2_PM_WKST3); | ||
| 94 | while (prm_read_mod_reg(CORE_MOD, OMAP3430ES2_PM_WKST3)) | ||
| 95 | cpu_relax(); | ||
| 96 | cm_write_mod_reg(iclk, CORE_MOD, CM_ICLKEN3); | ||
| 97 | cm_write_mod_reg(fclk, CORE_MOD, OMAP3430ES2_CM_FCLKEN3); | ||
| 98 | } | ||
| 99 | 99 | ||
| 100 | /* PER */ | 100 | static int _prcm_int_handle_wakeup(void) |
| 101 | wkst = prm_read_mod_reg(OMAP3430_PER_MOD, PM_WKST); | 101 | { |
| 102 | if (wkst) { | 102 | int c; |
| 103 | iclk = cm_read_mod_reg(OMAP3430_PER_MOD, CM_ICLKEN); | ||
| 104 | fclk = cm_read_mod_reg(OMAP3430_PER_MOD, CM_FCLKEN); | ||
| 105 | cm_set_mod_reg_bits(wkst, OMAP3430_PER_MOD, CM_ICLKEN); | ||
| 106 | cm_set_mod_reg_bits(wkst, OMAP3430_PER_MOD, CM_FCLKEN); | ||
| 107 | prm_write_mod_reg(wkst, OMAP3430_PER_MOD, PM_WKST); | ||
| 108 | while (prm_read_mod_reg(OMAP3430_PER_MOD, PM_WKST)) | ||
| 109 | cpu_relax(); | ||
| 110 | cm_write_mod_reg(iclk, OMAP3430_PER_MOD, CM_ICLKEN); | ||
| 111 | cm_write_mod_reg(fclk, OMAP3430_PER_MOD, CM_FCLKEN); | ||
| 112 | } | ||
| 113 | 103 | ||
| 104 | c = prcm_clear_mod_irqs(WKUP_MOD, 1); | ||
| 105 | c += prcm_clear_mod_irqs(CORE_MOD, 1); | ||
| 106 | c += prcm_clear_mod_irqs(OMAP3430_PER_MOD, 1); | ||
| 114 | if (omap_rev() > OMAP3430_REV_ES1_0) { | 107 | if (omap_rev() > OMAP3430_REV_ES1_0) { |
| 115 | /* USBHOST */ | 108 | c += prcm_clear_mod_irqs(CORE_MOD, 3); |
| 116 | wkst = prm_read_mod_reg(OMAP3430ES2_USBHOST_MOD, PM_WKST); | 109 | c += prcm_clear_mod_irqs(OMAP3430ES2_USBHOST_MOD, 1); |
| 117 | if (wkst) { | ||
| 118 | iclk = cm_read_mod_reg(OMAP3430ES2_USBHOST_MOD, | ||
| 119 | CM_ICLKEN); | ||
| 120 | fclk = cm_read_mod_reg(OMAP3430ES2_USBHOST_MOD, | ||
| 121 | CM_FCLKEN); | ||
| 122 | cm_set_mod_reg_bits(wkst, OMAP3430ES2_USBHOST_MOD, | ||
| 123 | CM_ICLKEN); | ||
| 124 | cm_set_mod_reg_bits(wkst, OMAP3430ES2_USBHOST_MOD, | ||
| 125 | CM_FCLKEN); | ||
| 126 | prm_write_mod_reg(wkst, OMAP3430ES2_USBHOST_MOD, | ||
| 127 | PM_WKST); | ||
| 128 | while (prm_read_mod_reg(OMAP3430ES2_USBHOST_MOD, | ||
| 129 | PM_WKST)) | ||
| 130 | cpu_relax(); | ||
| 131 | cm_write_mod_reg(iclk, OMAP3430ES2_USBHOST_MOD, | ||
| 132 | CM_ICLKEN); | ||
| 133 | cm_write_mod_reg(fclk, OMAP3430ES2_USBHOST_MOD, | ||
| 134 | CM_FCLKEN); | ||
| 135 | } | ||
| 136 | } | 110 | } |
| 137 | 111 | ||
| 138 | irqstatus_mpu = prm_read_mod_reg(OCP_MOD, | 112 | return c; |
| 139 | OMAP3_PRM_IRQSTATUS_MPU_OFFSET); | 113 | } |
| 140 | prm_write_mod_reg(irqstatus_mpu, OCP_MOD, | 114 | |
| 141 | OMAP3_PRM_IRQSTATUS_MPU_OFFSET); | 115 | /* |
| 116 | * PRCM Interrupt Handler | ||
| 117 | * | ||
| 118 | * The PRM_IRQSTATUS_MPU register indicates if there are any pending | ||
| 119 | * interrupts from the PRCM for the MPU. These bits must be cleared in | ||
| 120 | * order to clear the PRCM interrupt. The PRCM interrupt handler is | ||
| 121 | * implemented to simply clear the PRM_IRQSTATUS_MPU in order to clear | ||
| 122 | * the PRCM interrupt. Please note that bit 0 of the PRM_IRQSTATUS_MPU | ||
| 123 | * register indicates that a wake-up event is pending for the MPU and | ||
| 124 | * this bit can only be cleared if the all the wake-up events latched | ||
| 125 | * in the various PM_WKST_x registers have been cleared. The interrupt | ||
| 126 | * handler is implemented using a do-while loop so that if a wake-up | ||
| 127 | * event occurred during the processing of the prcm interrupt handler | ||
| 128 | * (setting a bit in the corresponding PM_WKST_x register and thus | ||
| 129 | * preventing us from clearing bit 0 of the PRM_IRQSTATUS_MPU register) | ||
| 130 | * this would be handled. | ||
| 131 | */ | ||
| 132 | static irqreturn_t prcm_interrupt_handler (int irq, void *dev_id) | ||
| 133 | { | ||
| 134 | u32 irqstatus_mpu; | ||
| 135 | int c = 0; | ||
| 136 | |||
| 137 | do { | ||
| 138 | irqstatus_mpu = prm_read_mod_reg(OCP_MOD, | ||
| 139 | OMAP3_PRM_IRQSTATUS_MPU_OFFSET); | ||
| 140 | |||
| 141 | if (irqstatus_mpu & (OMAP3430_WKUP_ST | OMAP3430_IO_ST)) { | ||
| 142 | c = _prcm_int_handle_wakeup(); | ||
| 143 | |||
| 144 | /* | ||
| 145 | * Is the MPU PRCM interrupt handler racing with the | ||
| 146 | * IVA2 PRCM interrupt handler ? | ||
| 147 | */ | ||
| 148 | WARN(c == 0, "prcm: WARNING: PRCM indicated MPU wakeup " | ||
| 149 | "but no wakeup sources are marked\n"); | ||
| 150 | } else { | ||
| 151 | /* XXX we need to expand our PRCM interrupt handler */ | ||
| 152 | WARN(1, "prcm: WARNING: PRCM interrupt received, but " | ||
| 153 | "no code to handle it (%08x)\n", irqstatus_mpu); | ||
| 154 | } | ||
| 155 | |||
| 156 | prm_write_mod_reg(irqstatus_mpu, OCP_MOD, | ||
| 157 | OMAP3_PRM_IRQSTATUS_MPU_OFFSET); | ||
| 142 | 158 | ||
| 143 | while (prm_read_mod_reg(OCP_MOD, OMAP3_PRM_IRQSTATUS_MPU_OFFSET)) | 159 | } while (prm_read_mod_reg(OCP_MOD, OMAP3_PRM_IRQSTATUS_MPU_OFFSET)); |
| 144 | cpu_relax(); | ||
| 145 | 160 | ||
| 146 | return IRQ_HANDLED; | 161 | return IRQ_HANDLED; |
| 147 | } | 162 | } |
| @@ -624,6 +639,16 @@ static void __init prcm_setup_regs(void) | |||
| 624 | prm_write_mod_reg(OMAP3430_IO_EN | OMAP3430_WKUP_EN, | 639 | prm_write_mod_reg(OMAP3430_IO_EN | OMAP3430_WKUP_EN, |
| 625 | OCP_MOD, OMAP3_PRM_IRQENABLE_MPU_OFFSET); | 640 | OCP_MOD, OMAP3_PRM_IRQENABLE_MPU_OFFSET); |
| 626 | 641 | ||
| 642 | /* Enable GPIO wakeups in PER */ | ||
| 643 | prm_write_mod_reg(OMAP3430_EN_GPIO2 | OMAP3430_EN_GPIO3 | | ||
| 644 | OMAP3430_EN_GPIO4 | OMAP3430_EN_GPIO5 | | ||
| 645 | OMAP3430_EN_GPIO6, OMAP3430_PER_MOD, PM_WKEN); | ||
| 646 | /* and allow them to wake up MPU */ | ||
| 647 | prm_write_mod_reg(OMAP3430_GRPSEL_GPIO2 | OMAP3430_EN_GPIO3 | | ||
| 648 | OMAP3430_GRPSEL_GPIO4 | OMAP3430_EN_GPIO5 | | ||
| 649 | OMAP3430_GRPSEL_GPIO6, | ||
| 650 | OMAP3430_PER_MOD, OMAP3430_PM_MPUGRPSEL); | ||
| 651 | |||
| 627 | /* Don't attach IVA interrupts */ | 652 | /* Don't attach IVA interrupts */ |
| 628 | prm_write_mod_reg(0, WKUP_MOD, OMAP3430_PM_IVAGRPSEL); | 653 | prm_write_mod_reg(0, WKUP_MOD, OMAP3430_PM_IVAGRPSEL); |
| 629 | prm_write_mod_reg(0, CORE_MOD, OMAP3430_PM_IVAGRPSEL1); | 654 | prm_write_mod_reg(0, CORE_MOD, OMAP3430_PM_IVAGRPSEL1); |
diff --git a/arch/arm/mach-omap2/powerdomain.c b/arch/arm/mach-omap2/powerdomain.c index 2594cbff3947..f00289abd30f 100644 --- a/arch/arm/mach-omap2/powerdomain.c +++ b/arch/arm/mach-omap2/powerdomain.c | |||
| @@ -273,35 +273,50 @@ struct powerdomain *pwrdm_lookup(const char *name) | |||
| 273 | } | 273 | } |
| 274 | 274 | ||
| 275 | /** | 275 | /** |
| 276 | * pwrdm_for_each - call function on each registered clockdomain | 276 | * pwrdm_for_each_nolock - call function on each registered clockdomain |
| 277 | * @fn: callback function * | 277 | * @fn: callback function * |
| 278 | * | 278 | * |
| 279 | * Call the supplied function for each registered powerdomain. The | 279 | * Call the supplied function for each registered powerdomain. The |
| 280 | * callback function can return anything but 0 to bail out early from | 280 | * callback function can return anything but 0 to bail out early from |
| 281 | * the iterator. The callback function is called with the pwrdm_rwlock | 281 | * the iterator. Returns the last return value of the callback function, which |
| 282 | * held for reading, so no powerdomain structure manipulation | 282 | * should be 0 for success or anything else to indicate failure; or -EINVAL if |
| 283 | * functions should be called from the callback, although hardware | 283 | * the function pointer is null. |
| 284 | * powerdomain control functions are fine. Returns the last return | ||
| 285 | * value of the callback function, which should be 0 for success or | ||
| 286 | * anything else to indicate failure; or -EINVAL if the function | ||
| 287 | * pointer is null. | ||
| 288 | */ | 284 | */ |
| 289 | int pwrdm_for_each(int (*fn)(struct powerdomain *pwrdm, void *user), | 285 | int pwrdm_for_each_nolock(int (*fn)(struct powerdomain *pwrdm, void *user), |
| 290 | void *user) | 286 | void *user) |
| 291 | { | 287 | { |
| 292 | struct powerdomain *temp_pwrdm; | 288 | struct powerdomain *temp_pwrdm; |
| 293 | unsigned long flags; | ||
| 294 | int ret = 0; | 289 | int ret = 0; |
| 295 | 290 | ||
| 296 | if (!fn) | 291 | if (!fn) |
| 297 | return -EINVAL; | 292 | return -EINVAL; |
| 298 | 293 | ||
| 299 | read_lock_irqsave(&pwrdm_rwlock, flags); | ||
| 300 | list_for_each_entry(temp_pwrdm, &pwrdm_list, node) { | 294 | list_for_each_entry(temp_pwrdm, &pwrdm_list, node) { |
| 301 | ret = (*fn)(temp_pwrdm, user); | 295 | ret = (*fn)(temp_pwrdm, user); |
| 302 | if (ret) | 296 | if (ret) |
| 303 | break; | 297 | break; |
| 304 | } | 298 | } |
| 299 | |||
| 300 | return ret; | ||
| 301 | } | ||
| 302 | |||
| 303 | /** | ||
| 304 | * pwrdm_for_each - call function on each registered clockdomain | ||
| 305 | * @fn: callback function * | ||
| 306 | * | ||
| 307 | * This function is the same as 'pwrdm_for_each_nolock()', but keeps the | ||
| 308 | * &pwrdm_rwlock locked for reading, so no powerdomain structure manipulation | ||
| 309 | * functions should be called from the callback, although hardware powerdomain | ||
| 310 | * control functions are fine. | ||
| 311 | */ | ||
| 312 | int pwrdm_for_each(int (*fn)(struct powerdomain *pwrdm, void *user), | ||
| 313 | void *user) | ||
| 314 | { | ||
| 315 | unsigned long flags; | ||
| 316 | int ret; | ||
| 317 | |||
| 318 | read_lock_irqsave(&pwrdm_rwlock, flags); | ||
| 319 | ret = pwrdm_for_each_nolock(fn, user); | ||
| 305 | read_unlock_irqrestore(&pwrdm_rwlock, flags); | 320 | read_unlock_irqrestore(&pwrdm_rwlock, flags); |
| 306 | 321 | ||
| 307 | return ret; | 322 | return ret; |
diff --git a/arch/arm/plat-omap/include/mach/cpu.h b/arch/arm/plat-omap/include/mach/cpu.h index 11e73d9e8928..f129efb3075e 100644 --- a/arch/arm/plat-omap/include/mach/cpu.h +++ b/arch/arm/plat-omap/include/mach/cpu.h | |||
| @@ -303,32 +303,21 @@ IS_OMAP_TYPE(3430, 0x3430) | |||
| 303 | #define cpu_is_omap2430() 0 | 303 | #define cpu_is_omap2430() 0 |
| 304 | #define cpu_is_omap3430() 0 | 304 | #define cpu_is_omap3430() 0 |
| 305 | 305 | ||
| 306 | #if defined(MULTI_OMAP1) | ||
| 307 | # if defined(CONFIG_ARCH_OMAP730) | ||
| 308 | # undef cpu_is_omap730 | ||
| 309 | # define cpu_is_omap730() is_omap730() | ||
| 310 | # endif | ||
| 311 | # if defined(CONFIG_ARCH_OMAP850) | ||
| 312 | # undef cpu_is_omap850 | ||
| 313 | # define cpu_is_omap850() is_omap850() | ||
| 314 | # endif | ||
| 315 | #else | ||
| 316 | # if defined(CONFIG_ARCH_OMAP730) | ||
| 317 | # undef cpu_is_omap730 | ||
| 318 | # define cpu_is_omap730() 1 | ||
| 319 | # endif | ||
| 320 | #endif | ||
| 321 | #else | ||
| 322 | # if defined(CONFIG_ARCH_OMAP850) | ||
| 323 | # undef cpu_is_omap850 | ||
| 324 | # define cpu_is_omap850() 1 | ||
| 325 | # endif | ||
| 326 | #endif | ||
| 327 | |||
| 328 | /* | 306 | /* |
| 329 | * Whether we have MULTI_OMAP1 or not, we still need to distinguish | 307 | * Whether we have MULTI_OMAP1 or not, we still need to distinguish |
| 330 | * between 330 vs. 1510 and 1611B/5912 vs. 1710. | 308 | * between 730 vs 850, 330 vs. 1510 and 1611B/5912 vs. 1710. |
| 331 | */ | 309 | */ |
| 310 | |||
| 311 | #if defined(CONFIG_ARCH_OMAP730) | ||
| 312 | # undef cpu_is_omap730 | ||
| 313 | # define cpu_is_omap730() is_omap730() | ||
| 314 | #endif | ||
| 315 | |||
| 316 | #if defined(CONFIG_ARCH_OMAP850) | ||
| 317 | # undef cpu_is_omap850 | ||
| 318 | # define cpu_is_omap850() is_omap850() | ||
| 319 | #endif | ||
| 320 | |||
| 332 | #if defined(CONFIG_ARCH_OMAP15XX) | 321 | #if defined(CONFIG_ARCH_OMAP15XX) |
| 333 | # undef cpu_is_omap310 | 322 | # undef cpu_is_omap310 |
| 334 | # undef cpu_is_omap1510 | 323 | # undef cpu_is_omap1510 |
| @@ -433,3 +422,5 @@ IS_OMAP_TYPE(3430, 0x3430) | |||
| 433 | 422 | ||
| 434 | int omap_chip_is(struct omap_chip_id oci); | 423 | int omap_chip_is(struct omap_chip_id oci); |
| 435 | void omap2_check_revision(void); | 424 | void omap2_check_revision(void); |
| 425 | |||
| 426 | #endif | ||
diff --git a/arch/arm/plat-omap/include/mach/powerdomain.h b/arch/arm/plat-omap/include/mach/powerdomain.h index 6271d8556a40..fa6461423bd0 100644 --- a/arch/arm/plat-omap/include/mach/powerdomain.h +++ b/arch/arm/plat-omap/include/mach/powerdomain.h | |||
| @@ -135,6 +135,8 @@ struct powerdomain *pwrdm_lookup(const char *name); | |||
| 135 | 135 | ||
| 136 | int pwrdm_for_each(int (*fn)(struct powerdomain *pwrdm, void *user), | 136 | int pwrdm_for_each(int (*fn)(struct powerdomain *pwrdm, void *user), |
| 137 | void *user); | 137 | void *user); |
| 138 | int pwrdm_for_each_nolock(int (*fn)(struct powerdomain *pwrdm, void *user), | ||
| 139 | void *user); | ||
| 138 | 140 | ||
| 139 | int pwrdm_add_clkdm(struct powerdomain *pwrdm, struct clockdomain *clkdm); | 141 | int pwrdm_add_clkdm(struct powerdomain *pwrdm, struct clockdomain *clkdm); |
| 140 | int pwrdm_del_clkdm(struct powerdomain *pwrdm, struct clockdomain *clkdm); | 142 | int pwrdm_del_clkdm(struct powerdomain *pwrdm, struct clockdomain *clkdm); |
diff --git a/arch/arm/plat-omap/iovmm.c b/arch/arm/plat-omap/iovmm.c index 57f7122a0919..dc3fac3dd0ea 100644 --- a/arch/arm/plat-omap/iovmm.c +++ b/arch/arm/plat-omap/iovmm.c | |||
| @@ -47,7 +47,7 @@ | |||
| 47 | * 'va': mpu virtual address | 47 | * 'va': mpu virtual address |
| 48 | * | 48 | * |
| 49 | * 'c': contiguous memory area | 49 | * 'c': contiguous memory area |
| 50 | * 'd': dicontiguous memory area | 50 | * 'd': discontiguous memory area |
| 51 | * 'a': anonymous memory allocation | 51 | * 'a': anonymous memory allocation |
| 52 | * '()': optional feature | 52 | * '()': optional feature |
| 53 | * | 53 | * |
| @@ -363,8 +363,9 @@ void *da_to_va(struct iommu *obj, u32 da) | |||
| 363 | goto out; | 363 | goto out; |
| 364 | } | 364 | } |
| 365 | va = area->va; | 365 | va = area->va; |
| 366 | mutex_unlock(&obj->mmap_lock); | ||
| 367 | out: | 366 | out: |
| 367 | mutex_unlock(&obj->mmap_lock); | ||
| 368 | |||
| 368 | return va; | 369 | return va; |
| 369 | } | 370 | } |
| 370 | EXPORT_SYMBOL_GPL(da_to_va); | 371 | EXPORT_SYMBOL_GPL(da_to_va); |
| @@ -398,7 +399,7 @@ static inline void sgtable_drain_vmalloc(struct sg_table *sgt) | |||
| 398 | { | 399 | { |
| 399 | /* | 400 | /* |
| 400 | * Actually this is not necessary at all, just exists for | 401 | * Actually this is not necessary at all, just exists for |
| 401 | * consistency of the code readibility. | 402 | * consistency of the code readability. |
| 402 | */ | 403 | */ |
| 403 | BUG_ON(!sgt); | 404 | BUG_ON(!sgt); |
| 404 | } | 405 | } |
| @@ -434,7 +435,7 @@ static inline void sgtable_drain_kmalloc(struct sg_table *sgt) | |||
| 434 | { | 435 | { |
| 435 | /* | 436 | /* |
| 436 | * Actually this is not necessary at all, just exists for | 437 | * Actually this is not necessary at all, just exists for |
| 437 | * consistency of the code readibility | 438 | * consistency of the code readability |
| 438 | */ | 439 | */ |
| 439 | BUG_ON(!sgt); | 440 | BUG_ON(!sgt); |
| 440 | } | 441 | } |
diff --git a/arch/arm/plat-omap/sram.c b/arch/arm/plat-omap/sram.c index 925f64711c37..75d1f26e5b17 100644 --- a/arch/arm/plat-omap/sram.c +++ b/arch/arm/plat-omap/sram.c | |||
| @@ -270,7 +270,8 @@ void * omap_sram_push(void * start, unsigned long size) | |||
| 270 | omap_sram_ceil -= size; | 270 | omap_sram_ceil -= size; |
| 271 | omap_sram_ceil = ROUND_DOWN(omap_sram_ceil, sizeof(void *)); | 271 | omap_sram_ceil = ROUND_DOWN(omap_sram_ceil, sizeof(void *)); |
| 272 | memcpy((void *)omap_sram_ceil, start, size); | 272 | memcpy((void *)omap_sram_ceil, start, size); |
| 273 | flush_icache_range((unsigned long)start, (unsigned long)(start + size)); | 273 | flush_icache_range((unsigned long)omap_sram_ceil, |
| 274 | (unsigned long)(omap_sram_ceil + size)); | ||
| 274 | 275 | ||
| 275 | return (void *)omap_sram_ceil; | 276 | return (void *)omap_sram_ceil; |
| 276 | } | 277 | } |
diff --git a/arch/m68knommu/kernel/asm-offsets.c b/arch/m68knommu/kernel/asm-offsets.c index 594ee0e657fe..9a8876f715d8 100644 --- a/arch/m68knommu/kernel/asm-offsets.c +++ b/arch/m68knommu/kernel/asm-offsets.c | |||
| @@ -45,25 +45,25 @@ int main(void) | |||
| 45 | DEFINE(THREAD_FPSTATE, offsetof(struct thread_struct, fpstate)); | 45 | DEFINE(THREAD_FPSTATE, offsetof(struct thread_struct, fpstate)); |
| 46 | 46 | ||
| 47 | /* offsets into the pt_regs */ | 47 | /* offsets into the pt_regs */ |
| 48 | DEFINE(PT_D0, offsetof(struct pt_regs, d0)); | 48 | DEFINE(PT_OFF_D0, offsetof(struct pt_regs, d0)); |
| 49 | DEFINE(PT_ORIG_D0, offsetof(struct pt_regs, orig_d0)); | 49 | DEFINE(PT_OFF_ORIG_D0, offsetof(struct pt_regs, orig_d0)); |
| 50 | DEFINE(PT_D1, offsetof(struct pt_regs, d1)); | 50 | DEFINE(PT_OFF_D1, offsetof(struct pt_regs, d1)); |
| 51 | DEFINE(PT_D2, offsetof(struct pt_regs, d2)); | 51 | DEFINE(PT_OFF_D2, offsetof(struct pt_regs, d2)); |
| 52 | DEFINE(PT_D3, offsetof(struct pt_regs, d3)); | 52 | DEFINE(PT_OFF_D3, offsetof(struct pt_regs, d3)); |
| 53 | DEFINE(PT_D4, offsetof(struct pt_regs, d4)); | 53 | DEFINE(PT_OFF_D4, offsetof(struct pt_regs, d4)); |
| 54 | DEFINE(PT_D5, offsetof(struct pt_regs, d5)); | 54 | DEFINE(PT_OFF_D5, offsetof(struct pt_regs, d5)); |
| 55 | DEFINE(PT_A0, offsetof(struct pt_regs, a0)); | 55 | DEFINE(PT_OFF_A0, offsetof(struct pt_regs, a0)); |
| 56 | DEFINE(PT_A1, offsetof(struct pt_regs, a1)); | 56 | DEFINE(PT_OFF_A1, offsetof(struct pt_regs, a1)); |
| 57 | DEFINE(PT_A2, offsetof(struct pt_regs, a2)); | 57 | DEFINE(PT_OFF_A2, offsetof(struct pt_regs, a2)); |
| 58 | DEFINE(PT_PC, offsetof(struct pt_regs, pc)); | 58 | DEFINE(PT_OFF_PC, offsetof(struct pt_regs, pc)); |
| 59 | DEFINE(PT_SR, offsetof(struct pt_regs, sr)); | 59 | DEFINE(PT_OFF_SR, offsetof(struct pt_regs, sr)); |
| 60 | 60 | ||
| 61 | #ifdef CONFIG_COLDFIRE | 61 | #ifdef CONFIG_COLDFIRE |
| 62 | /* bitfields are a bit difficult */ | 62 | /* bitfields are a bit difficult */ |
| 63 | DEFINE(PT_FORMATVEC, offsetof(struct pt_regs, sr) - 2); | 63 | DEFINE(PT_OFF_FORMATVEC, offsetof(struct pt_regs, sr) - 2); |
| 64 | #else | 64 | #else |
| 65 | /* bitfields are a bit difficult */ | 65 | /* bitfields are a bit difficult */ |
| 66 | DEFINE(PT_VECTOR, offsetof(struct pt_regs, pc) + 4); | 66 | DEFINE(PT_OFF_VECTOR, offsetof(struct pt_regs, pc) + 4); |
| 67 | #endif | 67 | #endif |
| 68 | 68 | ||
| 69 | /* signal defines */ | 69 | /* signal defines */ |
diff --git a/arch/m68knommu/kernel/entry.S b/arch/m68knommu/kernel/entry.S index f56faa5c9cd9..56043ade3941 100644 --- a/arch/m68knommu/kernel/entry.S +++ b/arch/m68knommu/kernel/entry.S | |||
| @@ -46,7 +46,7 @@ | |||
| 46 | ENTRY(buserr) | 46 | ENTRY(buserr) |
| 47 | SAVE_ALL | 47 | SAVE_ALL |
| 48 | moveq #-1,%d0 | 48 | moveq #-1,%d0 |
| 49 | movel %d0,%sp@(PT_ORIG_D0) | 49 | movel %d0,%sp@(PT_OFF_ORIG_D0) |
| 50 | movel %sp,%sp@- /* stack frame pointer argument */ | 50 | movel %sp,%sp@- /* stack frame pointer argument */ |
| 51 | jsr buserr_c | 51 | jsr buserr_c |
| 52 | addql #4,%sp | 52 | addql #4,%sp |
| @@ -55,7 +55,7 @@ ENTRY(buserr) | |||
| 55 | ENTRY(trap) | 55 | ENTRY(trap) |
| 56 | SAVE_ALL | 56 | SAVE_ALL |
| 57 | moveq #-1,%d0 | 57 | moveq #-1,%d0 |
| 58 | movel %d0,%sp@(PT_ORIG_D0) | 58 | movel %d0,%sp@(PT_OFF_ORIG_D0) |
| 59 | movel %sp,%sp@- /* stack frame pointer argument */ | 59 | movel %sp,%sp@- /* stack frame pointer argument */ |
| 60 | jsr trap_c | 60 | jsr trap_c |
| 61 | addql #4,%sp | 61 | addql #4,%sp |
| @@ -67,7 +67,7 @@ ENTRY(trap) | |||
| 67 | ENTRY(dbginterrupt) | 67 | ENTRY(dbginterrupt) |
| 68 | SAVE_ALL | 68 | SAVE_ALL |
| 69 | moveq #-1,%d0 | 69 | moveq #-1,%d0 |
| 70 | movel %d0,%sp@(PT_ORIG_D0) | 70 | movel %d0,%sp@(PT_OFF_ORIG_D0) |
| 71 | movel %sp,%sp@- /* stack frame pointer argument */ | 71 | movel %sp,%sp@- /* stack frame pointer argument */ |
| 72 | jsr dbginterrupt_c | 72 | jsr dbginterrupt_c |
| 73 | addql #4,%sp | 73 | addql #4,%sp |
diff --git a/arch/m68knommu/mm/init.c b/arch/m68knommu/mm/init.c index b1703c67a4f1..f3236d0b522d 100644 --- a/arch/m68knommu/mm/init.c +++ b/arch/m68knommu/mm/init.c | |||
| @@ -162,7 +162,7 @@ void free_initrd_mem(unsigned long start, unsigned long end) | |||
| 162 | totalram_pages++; | 162 | totalram_pages++; |
| 163 | pages++; | 163 | pages++; |
| 164 | } | 164 | } |
| 165 | printk (KERN_NOTICE "Freeing initrd memory: %dk freed\n", pages); | 165 | printk (KERN_NOTICE "Freeing initrd memory: %dk freed\n", pages * (PAGE_SIZE / 1024)); |
| 166 | } | 166 | } |
| 167 | #endif | 167 | #endif |
| 168 | 168 | ||
diff --git a/arch/m68knommu/platform/5206e/config.c b/arch/m68knommu/platform/5206e/config.c index 0f41ba82a3b5..942397984c66 100644 --- a/arch/m68knommu/platform/5206e/config.c +++ b/arch/m68knommu/platform/5206e/config.c | |||
| @@ -17,7 +17,6 @@ | |||
| 17 | #include <asm/mcfsim.h> | 17 | #include <asm/mcfsim.h> |
| 18 | #include <asm/mcfuart.h> | 18 | #include <asm/mcfuart.h> |
| 19 | #include <asm/mcfdma.h> | 19 | #include <asm/mcfdma.h> |
| 20 | #include <asm/mcfuart.h> | ||
| 21 | 20 | ||
| 22 | /***************************************************************************/ | 21 | /***************************************************************************/ |
| 23 | 22 | ||
diff --git a/arch/m68knommu/platform/68328/entry.S b/arch/m68knommu/platform/68328/entry.S index b1aef72f3baf..9d80d2c42866 100644 --- a/arch/m68knommu/platform/68328/entry.S +++ b/arch/m68knommu/platform/68328/entry.S | |||
| @@ -39,17 +39,17 @@ | |||
| 39 | .globl inthandler7 | 39 | .globl inthandler7 |
| 40 | 40 | ||
| 41 | badsys: | 41 | badsys: |
| 42 | movel #-ENOSYS,%sp@(PT_D0) | 42 | movel #-ENOSYS,%sp@(PT_OFF_D0) |
| 43 | jra ret_from_exception | 43 | jra ret_from_exception |
| 44 | 44 | ||
| 45 | do_trace: | 45 | do_trace: |
| 46 | movel #-ENOSYS,%sp@(PT_D0) /* needed for strace*/ | 46 | movel #-ENOSYS,%sp@(PT_OFF_D0) /* needed for strace*/ |
| 47 | subql #4,%sp | 47 | subql #4,%sp |
| 48 | SAVE_SWITCH_STACK | 48 | SAVE_SWITCH_STACK |
| 49 | jbsr syscall_trace | 49 | jbsr syscall_trace |
| 50 | RESTORE_SWITCH_STACK | 50 | RESTORE_SWITCH_STACK |
| 51 | addql #4,%sp | 51 | addql #4,%sp |
| 52 | movel %sp@(PT_ORIG_D0),%d1 | 52 | movel %sp@(PT_OFF_ORIG_D0),%d1 |
| 53 | movel #-ENOSYS,%d0 | 53 | movel #-ENOSYS,%d0 |
| 54 | cmpl #NR_syscalls,%d1 | 54 | cmpl #NR_syscalls,%d1 |
| 55 | jcc 1f | 55 | jcc 1f |
| @@ -57,7 +57,7 @@ do_trace: | |||
| 57 | lea sys_call_table, %a0 | 57 | lea sys_call_table, %a0 |
| 58 | jbsr %a0@(%d1) | 58 | jbsr %a0@(%d1) |
| 59 | 59 | ||
| 60 | 1: movel %d0,%sp@(PT_D0) /* save the return value */ | 60 | 1: movel %d0,%sp@(PT_OFF_D0) /* save the return value */ |
| 61 | subql #4,%sp /* dummy return address */ | 61 | subql #4,%sp /* dummy return address */ |
| 62 | SAVE_SWITCH_STACK | 62 | SAVE_SWITCH_STACK |
| 63 | jbsr syscall_trace | 63 | jbsr syscall_trace |
| @@ -75,7 +75,7 @@ ENTRY(system_call) | |||
| 75 | jbsr set_esp0 | 75 | jbsr set_esp0 |
| 76 | addql #4,%sp | 76 | addql #4,%sp |
| 77 | 77 | ||
| 78 | movel %sp@(PT_ORIG_D0),%d0 | 78 | movel %sp@(PT_OFF_ORIG_D0),%d0 |
| 79 | 79 | ||
| 80 | movel %sp,%d1 /* get thread_info pointer */ | 80 | movel %sp,%d1 /* get thread_info pointer */ |
| 81 | andl #-THREAD_SIZE,%d1 | 81 | andl #-THREAD_SIZE,%d1 |
| @@ -88,10 +88,10 @@ ENTRY(system_call) | |||
| 88 | lea sys_call_table,%a0 | 88 | lea sys_call_table,%a0 |
| 89 | movel %a0@(%d0), %a0 | 89 | movel %a0@(%d0), %a0 |
| 90 | jbsr %a0@ | 90 | jbsr %a0@ |
| 91 | movel %d0,%sp@(PT_D0) /* save the return value*/ | 91 | movel %d0,%sp@(PT_OFF_D0) /* save the return value*/ |
| 92 | 92 | ||
| 93 | ret_from_exception: | 93 | ret_from_exception: |
| 94 | btst #5,%sp@(PT_SR) /* check if returning to kernel*/ | 94 | btst #5,%sp@(PT_OFF_SR) /* check if returning to kernel*/ |
| 95 | jeq Luser_return /* if so, skip resched, signals*/ | 95 | jeq Luser_return /* if so, skip resched, signals*/ |
| 96 | 96 | ||
| 97 | Lkernel_return: | 97 | Lkernel_return: |
| @@ -133,7 +133,7 @@ Lreturn: | |||
| 133 | */ | 133 | */ |
| 134 | inthandler1: | 134 | inthandler1: |
| 135 | SAVE_ALL | 135 | SAVE_ALL |
| 136 | movew %sp@(PT_VECTOR), %d0 | 136 | movew %sp@(PT_OFF_VECTOR), %d0 |
| 137 | and #0x3ff, %d0 | 137 | and #0x3ff, %d0 |
| 138 | 138 | ||
| 139 | movel %sp,%sp@- | 139 | movel %sp,%sp@- |
| @@ -144,7 +144,7 @@ inthandler1: | |||
| 144 | 144 | ||
| 145 | inthandler2: | 145 | inthandler2: |
| 146 | SAVE_ALL | 146 | SAVE_ALL |
| 147 | movew %sp@(PT_VECTOR), %d0 | 147 | movew %sp@(PT_OFF_VECTOR), %d0 |
| 148 | and #0x3ff, %d0 | 148 | and #0x3ff, %d0 |
| 149 | 149 | ||
| 150 | movel %sp,%sp@- | 150 | movel %sp,%sp@- |
| @@ -155,7 +155,7 @@ inthandler2: | |||
| 155 | 155 | ||
| 156 | inthandler3: | 156 | inthandler3: |
| 157 | SAVE_ALL | 157 | SAVE_ALL |
| 158 | movew %sp@(PT_VECTOR), %d0 | 158 | movew %sp@(PT_OFF_VECTOR), %d0 |
| 159 | and #0x3ff, %d0 | 159 | and #0x3ff, %d0 |
| 160 | 160 | ||
| 161 | movel %sp,%sp@- | 161 | movel %sp,%sp@- |
| @@ -166,7 +166,7 @@ inthandler3: | |||
| 166 | 166 | ||
| 167 | inthandler4: | 167 | inthandler4: |
| 168 | SAVE_ALL | 168 | SAVE_ALL |
| 169 | movew %sp@(PT_VECTOR), %d0 | 169 | movew %sp@(PT_OFF_VECTOR), %d0 |
| 170 | and #0x3ff, %d0 | 170 | and #0x3ff, %d0 |
| 171 | 171 | ||
| 172 | movel %sp,%sp@- | 172 | movel %sp,%sp@- |
| @@ -177,7 +177,7 @@ inthandler4: | |||
| 177 | 177 | ||
| 178 | inthandler5: | 178 | inthandler5: |
| 179 | SAVE_ALL | 179 | SAVE_ALL |
| 180 | movew %sp@(PT_VECTOR), %d0 | 180 | movew %sp@(PT_OFF_VECTOR), %d0 |
| 181 | and #0x3ff, %d0 | 181 | and #0x3ff, %d0 |
| 182 | 182 | ||
| 183 | movel %sp,%sp@- | 183 | movel %sp,%sp@- |
| @@ -188,7 +188,7 @@ inthandler5: | |||
| 188 | 188 | ||
| 189 | inthandler6: | 189 | inthandler6: |
| 190 | SAVE_ALL | 190 | SAVE_ALL |
| 191 | movew %sp@(PT_VECTOR), %d0 | 191 | movew %sp@(PT_OFF_VECTOR), %d0 |
| 192 | and #0x3ff, %d0 | 192 | and #0x3ff, %d0 |
| 193 | 193 | ||
| 194 | movel %sp,%sp@- | 194 | movel %sp,%sp@- |
| @@ -199,7 +199,7 @@ inthandler6: | |||
| 199 | 199 | ||
| 200 | inthandler7: | 200 | inthandler7: |
| 201 | SAVE_ALL | 201 | SAVE_ALL |
| 202 | movew %sp@(PT_VECTOR), %d0 | 202 | movew %sp@(PT_OFF_VECTOR), %d0 |
| 203 | and #0x3ff, %d0 | 203 | and #0x3ff, %d0 |
| 204 | 204 | ||
| 205 | movel %sp,%sp@- | 205 | movel %sp,%sp@- |
| @@ -210,7 +210,7 @@ inthandler7: | |||
| 210 | 210 | ||
| 211 | inthandler: | 211 | inthandler: |
| 212 | SAVE_ALL | 212 | SAVE_ALL |
| 213 | movew %sp@(PT_VECTOR), %d0 | 213 | movew %sp@(PT_OFF_VECTOR), %d0 |
| 214 | and #0x3ff, %d0 | 214 | and #0x3ff, %d0 |
| 215 | 215 | ||
| 216 | movel %sp,%sp@- | 216 | movel %sp,%sp@- |
| @@ -224,7 +224,7 @@ ret_from_interrupt: | |||
| 224 | 2: | 224 | 2: |
| 225 | RESTORE_ALL | 225 | RESTORE_ALL |
| 226 | 1: | 226 | 1: |
| 227 | moveb %sp@(PT_SR), %d0 | 227 | moveb %sp@(PT_OFF_SR), %d0 |
| 228 | and #7, %d0 | 228 | and #7, %d0 |
| 229 | jhi 2b | 229 | jhi 2b |
| 230 | 230 | ||
diff --git a/arch/m68knommu/platform/68360/entry.S b/arch/m68knommu/platform/68360/entry.S index 55dfefe38642..6d3460a39cac 100644 --- a/arch/m68knommu/platform/68360/entry.S +++ b/arch/m68knommu/platform/68360/entry.S | |||
| @@ -35,17 +35,17 @@ | |||
| 35 | .globl inthandler | 35 | .globl inthandler |
| 36 | 36 | ||
| 37 | badsys: | 37 | badsys: |
| 38 | movel #-ENOSYS,%sp@(PT_D0) | 38 | movel #-ENOSYS,%sp@(PT_OFF_D0) |
| 39 | jra ret_from_exception | 39 | jra ret_from_exception |
| 40 | 40 | ||
| 41 | do_trace: | 41 | do_trace: |
| 42 | movel #-ENOSYS,%sp@(PT_D0) /* needed for strace*/ | 42 | movel #-ENOSYS,%sp@(PT_OFF_D0) /* needed for strace*/ |
| 43 | subql #4,%sp | 43 | subql #4,%sp |
| 44 | SAVE_SWITCH_STACK | 44 | SAVE_SWITCH_STACK |
| 45 | jbsr syscall_trace | 45 | jbsr syscall_trace |
| 46 | RESTORE_SWITCH_STACK | 46 | RESTORE_SWITCH_STACK |
| 47 | addql #4,%sp | 47 | addql #4,%sp |
| 48 | movel %sp@(PT_ORIG_D0),%d1 | 48 | movel %sp@(PT_OFF_ORIG_D0),%d1 |
| 49 | movel #-ENOSYS,%d0 | 49 | movel #-ENOSYS,%d0 |
| 50 | cmpl #NR_syscalls,%d1 | 50 | cmpl #NR_syscalls,%d1 |
| 51 | jcc 1f | 51 | jcc 1f |
| @@ -53,7 +53,7 @@ do_trace: | |||
| 53 | lea sys_call_table, %a0 | 53 | lea sys_call_table, %a0 |
| 54 | jbsr %a0@(%d1) | 54 | jbsr %a0@(%d1) |
| 55 | 55 | ||
| 56 | 1: movel %d0,%sp@(PT_D0) /* save the return value */ | 56 | 1: movel %d0,%sp@(PT_OFF_D0) /* save the return value */ |
| 57 | subql #4,%sp /* dummy return address */ | 57 | subql #4,%sp /* dummy return address */ |
| 58 | SAVE_SWITCH_STACK | 58 | SAVE_SWITCH_STACK |
| 59 | jbsr syscall_trace | 59 | jbsr syscall_trace |
| @@ -79,10 +79,10 @@ ENTRY(system_call) | |||
| 79 | lea sys_call_table,%a0 | 79 | lea sys_call_table,%a0 |
| 80 | movel %a0@(%d0), %a0 | 80 | movel %a0@(%d0), %a0 |
| 81 | jbsr %a0@ | 81 | jbsr %a0@ |
| 82 | movel %d0,%sp@(PT_D0) /* save the return value*/ | 82 | movel %d0,%sp@(PT_OFF_D0) /* save the return value*/ |
| 83 | 83 | ||
| 84 | ret_from_exception: | 84 | ret_from_exception: |
| 85 | btst #5,%sp@(PT_SR) /* check if returning to kernel*/ | 85 | btst #5,%sp@(PT_OFF_SR) /* check if returning to kernel*/ |
| 86 | jeq Luser_return /* if so, skip resched, signals*/ | 86 | jeq Luser_return /* if so, skip resched, signals*/ |
| 87 | 87 | ||
| 88 | Lkernel_return: | 88 | Lkernel_return: |
| @@ -124,7 +124,7 @@ Lreturn: | |||
| 124 | */ | 124 | */ |
| 125 | inthandler: | 125 | inthandler: |
| 126 | SAVE_ALL | 126 | SAVE_ALL |
| 127 | movew %sp@(PT_VECTOR), %d0 | 127 | movew %sp@(PT_OFF_VECTOR), %d0 |
| 128 | and.l #0x3ff, %d0 | 128 | and.l #0x3ff, %d0 |
| 129 | lsr.l #0x02, %d0 | 129 | lsr.l #0x02, %d0 |
| 130 | 130 | ||
| @@ -139,7 +139,7 @@ ret_from_interrupt: | |||
| 139 | 2: | 139 | 2: |
| 140 | RESTORE_ALL | 140 | RESTORE_ALL |
| 141 | 1: | 141 | 1: |
| 142 | moveb %sp@(PT_SR), %d0 | 142 | moveb %sp@(PT_OFF_SR), %d0 |
| 143 | and #7, %d0 | 143 | and #7, %d0 |
| 144 | jhi 2b | 144 | jhi 2b |
| 145 | /* check if we need to do software interrupts */ | 145 | /* check if we need to do software interrupts */ |
diff --git a/arch/m68knommu/platform/coldfire/entry.S b/arch/m68knommu/platform/coldfire/entry.S index 3b471c0da24a..dd7d591f70ea 100644 --- a/arch/m68knommu/platform/coldfire/entry.S +++ b/arch/m68knommu/platform/coldfire/entry.S | |||
| @@ -81,11 +81,11 @@ ENTRY(system_call) | |||
| 81 | 81 | ||
| 82 | movel %d3,%a0 | 82 | movel %d3,%a0 |
| 83 | jbsr %a0@ | 83 | jbsr %a0@ |
| 84 | movel %d0,%sp@(PT_D0) /* save the return value */ | 84 | movel %d0,%sp@(PT_OFF_D0) /* save the return value */ |
| 85 | jra ret_from_exception | 85 | jra ret_from_exception |
| 86 | 1: | 86 | 1: |
| 87 | movel #-ENOSYS,%d2 /* strace needs -ENOSYS in PT_D0 */ | 87 | movel #-ENOSYS,%d2 /* strace needs -ENOSYS in PT_OFF_D0 */ |
| 88 | movel %d2,PT_D0(%sp) /* on syscall entry */ | 88 | movel %d2,PT_OFF_D0(%sp) /* on syscall entry */ |
| 89 | subql #4,%sp | 89 | subql #4,%sp |
| 90 | SAVE_SWITCH_STACK | 90 | SAVE_SWITCH_STACK |
| 91 | jbsr syscall_trace | 91 | jbsr syscall_trace |
| @@ -93,7 +93,7 @@ ENTRY(system_call) | |||
| 93 | addql #4,%sp | 93 | addql #4,%sp |
| 94 | movel %d3,%a0 | 94 | movel %d3,%a0 |
| 95 | jbsr %a0@ | 95 | jbsr %a0@ |
| 96 | movel %d0,%sp@(PT_D0) /* save the return value */ | 96 | movel %d0,%sp@(PT_OFF_D0) /* save the return value */ |
| 97 | subql #4,%sp /* dummy return address */ | 97 | subql #4,%sp /* dummy return address */ |
| 98 | SAVE_SWITCH_STACK | 98 | SAVE_SWITCH_STACK |
| 99 | jbsr syscall_trace | 99 | jbsr syscall_trace |
| @@ -104,7 +104,7 @@ ret_from_signal: | |||
| 104 | 104 | ||
| 105 | ret_from_exception: | 105 | ret_from_exception: |
| 106 | move #0x2700,%sr /* disable intrs */ | 106 | move #0x2700,%sr /* disable intrs */ |
| 107 | btst #5,%sp@(PT_SR) /* check if returning to kernel */ | 107 | btst #5,%sp@(PT_OFF_SR) /* check if returning to kernel */ |
| 108 | jeq Luser_return /* if so, skip resched, signals */ | 108 | jeq Luser_return /* if so, skip resched, signals */ |
| 109 | 109 | ||
| 110 | #ifdef CONFIG_PREEMPT | 110 | #ifdef CONFIG_PREEMPT |
| @@ -142,8 +142,8 @@ Luser_return: | |||
| 142 | Lreturn: | 142 | Lreturn: |
| 143 | move #0x2700,%sr /* disable intrs */ | 143 | move #0x2700,%sr /* disable intrs */ |
| 144 | movel sw_usp,%a0 /* get usp */ | 144 | movel sw_usp,%a0 /* get usp */ |
| 145 | movel %sp@(PT_PC),%a0@- /* copy exception program counter */ | 145 | movel %sp@(PT_OFF_PC),%a0@- /* copy exception program counter */ |
| 146 | movel %sp@(PT_FORMATVEC),%a0@-/* copy exception format/vector/sr */ | 146 | movel %sp@(PT_OFF_FORMATVEC),%a0@- /* copy exception format/vector/sr */ |
| 147 | moveml %sp@,%d1-%d5/%a0-%a2 | 147 | moveml %sp@,%d1-%d5/%a0-%a2 |
| 148 | lea %sp@(32),%sp /* space for 8 regs */ | 148 | lea %sp@(32),%sp /* space for 8 regs */ |
| 149 | movel %sp@+,%d0 | 149 | movel %sp@+,%d0 |
| @@ -181,9 +181,9 @@ Lsignal_return: | |||
| 181 | ENTRY(inthandler) | 181 | ENTRY(inthandler) |
| 182 | SAVE_ALL | 182 | SAVE_ALL |
| 183 | moveq #-1,%d0 | 183 | moveq #-1,%d0 |
| 184 | movel %d0,%sp@(PT_ORIG_D0) | 184 | movel %d0,%sp@(PT_OFF_ORIG_D0) |
| 185 | 185 | ||
| 186 | movew %sp@(PT_FORMATVEC),%d0 /* put exception # in d0 */ | 186 | movew %sp@(PT_OFF_FORMATVEC),%d0 /* put exception # in d0 */ |
| 187 | andl #0x03fc,%d0 /* mask out vector only */ | 187 | andl #0x03fc,%d0 /* mask out vector only */ |
| 188 | 188 | ||
| 189 | movel %sp,%sp@- /* push regs arg */ | 189 | movel %sp,%sp@- /* push regs arg */ |
| @@ -203,7 +203,7 @@ ENTRY(inthandler) | |||
| 203 | ENTRY(fasthandler) | 203 | ENTRY(fasthandler) |
| 204 | SAVE_LOCAL | 204 | SAVE_LOCAL |
| 205 | 205 | ||
| 206 | movew %sp@(PT_FORMATVEC),%d0 | 206 | movew %sp@(PT_OFF_FORMATVEC),%d0 |
| 207 | andl #0x03fc,%d0 /* mask out vector only */ | 207 | andl #0x03fc,%d0 /* mask out vector only */ |
| 208 | 208 | ||
| 209 | movel %sp,%sp@- /* push regs arg */ | 209 | movel %sp,%sp@- /* push regs arg */ |
diff --git a/arch/microblaze/kernel/entry.S b/arch/microblaze/kernel/entry.S index acc1f05d1e2c..e3ecb36dd554 100644 --- a/arch/microblaze/kernel/entry.S +++ b/arch/microblaze/kernel/entry.S | |||
| @@ -592,6 +592,8 @@ C_ENTRY(full_exception_trap): | |||
| 592 | nop | 592 | nop |
| 593 | mfs r7, rfsr; /* save FSR */ | 593 | mfs r7, rfsr; /* save FSR */ |
| 594 | nop | 594 | nop |
| 595 | mts rfsr, r0; /* Clear sticky fsr */ | ||
| 596 | nop | ||
| 595 | la r12, r0, full_exception | 597 | la r12, r0, full_exception |
| 596 | set_vms; | 598 | set_vms; |
| 597 | rtbd r12, 0; | 599 | rtbd r12, 0; |
diff --git a/arch/microblaze/kernel/hw_exception_handler.S b/arch/microblaze/kernel/hw_exception_handler.S index 6b0288ebccd6..2b86c03aa841 100644 --- a/arch/microblaze/kernel/hw_exception_handler.S +++ b/arch/microblaze/kernel/hw_exception_handler.S | |||
| @@ -384,7 +384,7 @@ handle_other_ex: /* Handle Other exceptions here */ | |||
| 384 | addk r8, r17, r0; /* Load exception address */ | 384 | addk r8, r17, r0; /* Load exception address */ |
| 385 | bralid r15, full_exception; /* Branch to the handler */ | 385 | bralid r15, full_exception; /* Branch to the handler */ |
| 386 | nop; | 386 | nop; |
| 387 | mts r0, rfsr; /* Clear sticky fsr */ | 387 | mts rfsr, r0; /* Clear sticky fsr */ |
| 388 | nop | 388 | nop |
| 389 | 389 | ||
| 390 | /* | 390 | /* |
diff --git a/arch/microblaze/kernel/process.c b/arch/microblaze/kernel/process.c index 4201c743cc9f..c592d475b3d8 100644 --- a/arch/microblaze/kernel/process.c +++ b/arch/microblaze/kernel/process.c | |||
| @@ -235,7 +235,9 @@ void start_thread(struct pt_regs *regs, unsigned long pc, unsigned long usp) | |||
| 235 | regs->pc = pc; | 235 | regs->pc = pc; |
| 236 | regs->r1 = usp; | 236 | regs->r1 = usp; |
| 237 | regs->pt_mode = 0; | 237 | regs->pt_mode = 0; |
| 238 | #ifdef CONFIG_MMU | ||
| 238 | regs->msr |= MSR_UMS; | 239 | regs->msr |= MSR_UMS; |
| 240 | #endif | ||
| 239 | } | 241 | } |
| 240 | 242 | ||
| 241 | #ifdef CONFIG_MMU | 243 | #ifdef CONFIG_MMU |
diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig index f388dc68f605..524d9352f17e 100644 --- a/arch/parisc/Kconfig +++ b/arch/parisc/Kconfig | |||
| @@ -18,6 +18,7 @@ config PARISC | |||
| 18 | select BUG | 18 | select BUG |
| 19 | select HAVE_PERF_EVENTS | 19 | select HAVE_PERF_EVENTS |
| 20 | select GENERIC_ATOMIC64 if !64BIT | 20 | select GENERIC_ATOMIC64 if !64BIT |
| 21 | select HAVE_ARCH_TRACEHOOK | ||
| 21 | help | 22 | help |
| 22 | The PA-RISC microprocessor is designed by Hewlett-Packard and used | 23 | The PA-RISC microprocessor is designed by Hewlett-Packard and used |
| 23 | in many of their workstations & servers (HP9000 700 and 800 series, | 24 | in many of their workstations & servers (HP9000 700 and 800 series, |
diff --git a/arch/parisc/include/asm/fixmap.h b/arch/parisc/include/asm/fixmap.h index de3fe3a18229..6fec4d4a1a18 100644 --- a/arch/parisc/include/asm/fixmap.h +++ b/arch/parisc/include/asm/fixmap.h | |||
| @@ -21,9 +21,9 @@ | |||
| 21 | #define KERNEL_MAP_END (TMPALIAS_MAP_START) | 21 | #define KERNEL_MAP_END (TMPALIAS_MAP_START) |
| 22 | 22 | ||
| 23 | #ifndef __ASSEMBLY__ | 23 | #ifndef __ASSEMBLY__ |
| 24 | extern void *vmalloc_start; | 24 | extern void *parisc_vmalloc_start; |
| 25 | #define PCXL_DMA_MAP_SIZE (8*1024*1024) | 25 | #define PCXL_DMA_MAP_SIZE (8*1024*1024) |
| 26 | #define VMALLOC_START ((unsigned long)vmalloc_start) | 26 | #define VMALLOC_START ((unsigned long)parisc_vmalloc_start) |
| 27 | #define VMALLOC_END (KERNEL_MAP_END) | 27 | #define VMALLOC_END (KERNEL_MAP_END) |
| 28 | #endif /*__ASSEMBLY__*/ | 28 | #endif /*__ASSEMBLY__*/ |
| 29 | 29 | ||
diff --git a/arch/parisc/include/asm/hardirq.h b/arch/parisc/include/asm/hardirq.h index ce93133d5112..0d68184a76cb 100644 --- a/arch/parisc/include/asm/hardirq.h +++ b/arch/parisc/include/asm/hardirq.h | |||
| @@ -1,29 +1,11 @@ | |||
| 1 | /* hardirq.h: PA-RISC hard IRQ support. | 1 | /* hardirq.h: PA-RISC hard IRQ support. |
| 2 | * | 2 | * |
| 3 | * Copyright (C) 2001 Matthew Wilcox <matthew@wil.cx> | 3 | * Copyright (C) 2001 Matthew Wilcox <matthew@wil.cx> |
| 4 | * | ||
| 5 | * The locking is really quite interesting. There's a cpu-local | ||
| 6 | * count of how many interrupts are being handled, and a global | ||
| 7 | * lock. An interrupt can only be serviced if the global lock | ||
| 8 | * is free. You can't be sure no more interrupts are being | ||
| 9 | * serviced until you've acquired the lock and then checked | ||
| 10 | * all the per-cpu interrupt counts are all zero. It's a specialised | ||
| 11 | * br_lock, and that's exactly how Sparc does it. We don't because | ||
| 12 | * it's more locking for us. This way is lock-free in the interrupt path. | ||
| 13 | */ | 4 | */ |
| 14 | 5 | ||
| 15 | #ifndef _PARISC_HARDIRQ_H | 6 | #ifndef _PARISC_HARDIRQ_H |
| 16 | #define _PARISC_HARDIRQ_H | 7 | #define _PARISC_HARDIRQ_H |
| 17 | 8 | ||
| 18 | #include <linux/threads.h> | 9 | #include <asm-generic/hardirq.h> |
| 19 | #include <linux/irq.h> | ||
| 20 | |||
| 21 | typedef struct { | ||
| 22 | unsigned long __softirq_pending; /* set_bit is used on this */ | ||
| 23 | } ____cacheline_aligned irq_cpustat_t; | ||
| 24 | |||
| 25 | #include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */ | ||
| 26 | |||
| 27 | void ack_bad_irq(unsigned int irq); | ||
| 28 | 10 | ||
| 29 | #endif /* _PARISC_HARDIRQ_H */ | 11 | #endif /* _PARISC_HARDIRQ_H */ |
diff --git a/arch/parisc/include/asm/ptrace.h b/arch/parisc/include/asm/ptrace.h index 302f68dc889c..aead40b16dd8 100644 --- a/arch/parisc/include/asm/ptrace.h +++ b/arch/parisc/include/asm/ptrace.h | |||
| @@ -59,8 +59,11 @@ void user_enable_block_step(struct task_struct *task); | |||
| 59 | #define user_mode(regs) (((regs)->iaoq[0] & 3) ? 1 : 0) | 59 | #define user_mode(regs) (((regs)->iaoq[0] & 3) ? 1 : 0) |
| 60 | #define user_space(regs) (((regs)->iasq[1] != 0) ? 1 : 0) | 60 | #define user_space(regs) (((regs)->iasq[1] != 0) ? 1 : 0) |
| 61 | #define instruction_pointer(regs) ((regs)->iaoq[0] & ~3) | 61 | #define instruction_pointer(regs) ((regs)->iaoq[0] & ~3) |
| 62 | #define user_stack_pointer(regs) ((regs)->gr[30]) | ||
| 62 | unsigned long profile_pc(struct pt_regs *); | 63 | unsigned long profile_pc(struct pt_regs *); |
| 63 | extern void show_regs(struct pt_regs *); | 64 | extern void show_regs(struct pt_regs *); |
| 64 | #endif | 65 | |
| 66 | |||
| 67 | #endif /* __KERNEL__ */ | ||
| 65 | 68 | ||
| 66 | #endif | 69 | #endif |
diff --git a/arch/parisc/include/asm/syscall.h b/arch/parisc/include/asm/syscall.h new file mode 100644 index 000000000000..8bdfd2c8c39f --- /dev/null +++ b/arch/parisc/include/asm/syscall.h | |||
| @@ -0,0 +1,40 @@ | |||
| 1 | /* syscall.h */ | ||
| 2 | |||
| 3 | #ifndef _ASM_PARISC_SYSCALL_H_ | ||
| 4 | #define _ASM_PARISC_SYSCALL_H_ | ||
| 5 | |||
| 6 | #include <linux/err.h> | ||
| 7 | #include <asm/ptrace.h> | ||
| 8 | |||
| 9 | static inline long syscall_get_nr(struct task_struct *tsk, | ||
| 10 | struct pt_regs *regs) | ||
| 11 | { | ||
| 12 | return regs->gr[20]; | ||
| 13 | } | ||
| 14 | |||
| 15 | static inline void syscall_get_arguments(struct task_struct *tsk, | ||
| 16 | struct pt_regs *regs, unsigned int i, | ||
| 17 | unsigned int n, unsigned long *args) | ||
| 18 | { | ||
| 19 | BUG_ON(i); | ||
| 20 | |||
| 21 | switch (n) { | ||
| 22 | case 6: | ||
| 23 | args[5] = regs->gr[21]; | ||
| 24 | case 5: | ||
| 25 | args[4] = regs->gr[22]; | ||
| 26 | case 4: | ||
| 27 | args[3] = regs->gr[23]; | ||
| 28 | case 3: | ||
| 29 | args[2] = regs->gr[24]; | ||
| 30 | case 2: | ||
| 31 | args[1] = regs->gr[25]; | ||
| 32 | case 1: | ||
| 33 | args[0] = regs->gr[26]; | ||
| 34 | break; | ||
| 35 | default: | ||
| 36 | BUG(); | ||
| 37 | } | ||
| 38 | } | ||
| 39 | |||
| 40 | #endif /*_ASM_PARISC_SYSCALL_H_*/ | ||
diff --git a/arch/parisc/include/asm/thread_info.h b/arch/parisc/include/asm/thread_info.h index ac775a76bff7..7ecc1039cfed 100644 --- a/arch/parisc/include/asm/thread_info.h +++ b/arch/parisc/include/asm/thread_info.h | |||
| @@ -32,6 +32,11 @@ struct thread_info { | |||
| 32 | #define init_thread_info (init_thread_union.thread_info) | 32 | #define init_thread_info (init_thread_union.thread_info) |
| 33 | #define init_stack (init_thread_union.stack) | 33 | #define init_stack (init_thread_union.stack) |
| 34 | 34 | ||
| 35 | /* how to get the thread information struct from C */ | ||
| 36 | #define current_thread_info() ((struct thread_info *)mfctl(30)) | ||
| 37 | |||
| 38 | #endif /* !__ASSEMBLY */ | ||
| 39 | |||
| 35 | /* thread information allocation */ | 40 | /* thread information allocation */ |
| 36 | 41 | ||
| 37 | #define THREAD_SIZE_ORDER 2 | 42 | #define THREAD_SIZE_ORDER 2 |
| @@ -40,11 +45,6 @@ struct thread_info { | |||
| 40 | #define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER) | 45 | #define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER) |
| 41 | #define THREAD_SHIFT (PAGE_SHIFT + THREAD_SIZE_ORDER) | 46 | #define THREAD_SHIFT (PAGE_SHIFT + THREAD_SIZE_ORDER) |
| 42 | 47 | ||
| 43 | /* how to get the thread information struct from C */ | ||
| 44 | #define current_thread_info() ((struct thread_info *)mfctl(30)) | ||
| 45 | |||
| 46 | #endif /* !__ASSEMBLY */ | ||
| 47 | |||
| 48 | #define PREEMPT_ACTIVE_BIT 28 | 48 | #define PREEMPT_ACTIVE_BIT 28 |
| 49 | #define PREEMPT_ACTIVE (1 << PREEMPT_ACTIVE_BIT) | 49 | #define PREEMPT_ACTIVE (1 << PREEMPT_ACTIVE_BIT) |
| 50 | 50 | ||
| @@ -60,6 +60,8 @@ struct thread_info { | |||
| 60 | #define TIF_RESTORE_SIGMASK 6 /* restore saved signal mask */ | 60 | #define TIF_RESTORE_SIGMASK 6 /* restore saved signal mask */ |
| 61 | #define TIF_FREEZE 7 /* is freezing for suspend */ | 61 | #define TIF_FREEZE 7 /* is freezing for suspend */ |
| 62 | #define TIF_NOTIFY_RESUME 8 /* callback before returning to user */ | 62 | #define TIF_NOTIFY_RESUME 8 /* callback before returning to user */ |
| 63 | #define TIF_SINGLESTEP 9 /* single stepping? */ | ||
| 64 | #define TIF_BLOCKSTEP 10 /* branch stepping? */ | ||
| 63 | 65 | ||
| 64 | #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) | 66 | #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) |
| 65 | #define _TIF_SIGPENDING (1 << TIF_SIGPENDING) | 67 | #define _TIF_SIGPENDING (1 << TIF_SIGPENDING) |
| @@ -69,6 +71,8 @@ struct thread_info { | |||
| 69 | #define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK) | 71 | #define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK) |
| 70 | #define _TIF_FREEZE (1 << TIF_FREEZE) | 72 | #define _TIF_FREEZE (1 << TIF_FREEZE) |
| 71 | #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) | 73 | #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) |
| 74 | #define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP) | ||
| 75 | #define _TIF_BLOCKSTEP (1 << TIF_BLOCKSTEP) | ||
| 72 | 76 | ||
| 73 | #define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | \ | 77 | #define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | \ |
| 74 | _TIF_NEED_RESCHED | _TIF_RESTORE_SIGMASK) | 78 | _TIF_NEED_RESCHED | _TIF_RESTORE_SIGMASK) |
diff --git a/arch/parisc/kernel/asm-offsets.c b/arch/parisc/kernel/asm-offsets.c index 699cf8ef2118..fcd3c707bf12 100644 --- a/arch/parisc/kernel/asm-offsets.c +++ b/arch/parisc/kernel/asm-offsets.c | |||
| @@ -270,8 +270,8 @@ int main(void) | |||
| 270 | DEFINE(DTLB_OFF_COUNT, offsetof(struct pdc_cache_info, dt_off_count)); | 270 | DEFINE(DTLB_OFF_COUNT, offsetof(struct pdc_cache_info, dt_off_count)); |
| 271 | DEFINE(DTLB_LOOP, offsetof(struct pdc_cache_info, dt_loop)); | 271 | DEFINE(DTLB_LOOP, offsetof(struct pdc_cache_info, dt_loop)); |
| 272 | BLANK(); | 272 | BLANK(); |
| 273 | DEFINE(PA_BLOCKSTEP_BIT, 31-PT_BLOCKSTEP_BIT); | 273 | DEFINE(TIF_BLOCKSTEP_PA_BIT, 31-TIF_BLOCKSTEP); |
| 274 | DEFINE(PA_SINGLESTEP_BIT, 31-PT_SINGLESTEP_BIT); | 274 | DEFINE(TIF_SINGLESTEP_PA_BIT, 31-TIF_SINGLESTEP); |
| 275 | BLANK(); | 275 | BLANK(); |
| 276 | DEFINE(ASM_PMD_SHIFT, PMD_SHIFT); | 276 | DEFINE(ASM_PMD_SHIFT, PMD_SHIFT); |
| 277 | DEFINE(ASM_PGDIR_SHIFT, PGDIR_SHIFT); | 277 | DEFINE(ASM_PGDIR_SHIFT, PGDIR_SHIFT); |
diff --git a/arch/parisc/kernel/entry.S b/arch/parisc/kernel/entry.S index 8c4712b74dc1..3a44f7f704fa 100644 --- a/arch/parisc/kernel/entry.S +++ b/arch/parisc/kernel/entry.S | |||
| @@ -2047,12 +2047,13 @@ syscall_do_signal: | |||
| 2047 | b,n syscall_check_sig | 2047 | b,n syscall_check_sig |
| 2048 | 2048 | ||
| 2049 | syscall_restore: | 2049 | syscall_restore: |
| 2050 | /* Are we being ptraced? */ | ||
| 2051 | LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 | 2050 | LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 |
| 2052 | 2051 | ||
| 2053 | ldw TASK_PTRACE(%r1), %r19 | 2052 | /* Are we being ptraced? */ |
| 2054 | bb,< %r19,31,syscall_restore_rfi | 2053 | ldw TASK_FLAGS(%r1),%r19 |
| 2055 | nop | 2054 | ldi (_TIF_SINGLESTEP|_TIF_BLOCKSTEP),%r2 |
| 2055 | and,COND(=) %r19,%r2,%r0 | ||
| 2056 | b,n syscall_restore_rfi | ||
| 2056 | 2057 | ||
| 2057 | ldo TASK_PT_FR31(%r1),%r19 /* reload fpregs */ | 2058 | ldo TASK_PT_FR31(%r1),%r19 /* reload fpregs */ |
| 2058 | rest_fp %r19 | 2059 | rest_fp %r19 |
| @@ -2113,16 +2114,16 @@ syscall_restore_rfi: | |||
| 2113 | ldi 0x0b,%r20 /* Create new PSW */ | 2114 | ldi 0x0b,%r20 /* Create new PSW */ |
| 2114 | depi -1,13,1,%r20 /* C, Q, D, and I bits */ | 2115 | depi -1,13,1,%r20 /* C, Q, D, and I bits */ |
| 2115 | 2116 | ||
| 2116 | /* The values of PA_SINGLESTEP_BIT and PA_BLOCKSTEP_BIT are | 2117 | /* The values of SINGLESTEP_BIT and BLOCKSTEP_BIT are |
| 2117 | * set in include/linux/ptrace.h and converted to PA bitmap | 2118 | * set in thread_info.h and converted to PA bitmap |
| 2118 | * numbers in asm-offsets.c */ | 2119 | * numbers in asm-offsets.c */ |
| 2119 | 2120 | ||
| 2120 | /* if ((%r19.PA_SINGLESTEP_BIT)) { %r20.27=1} */ | 2121 | /* if ((%r19.SINGLESTEP_BIT)) { %r20.27=1} */ |
| 2121 | extru,= %r19,PA_SINGLESTEP_BIT,1,%r0 | 2122 | extru,= %r19,TIF_SINGLESTEP_PA_BIT,1,%r0 |
| 2122 | depi -1,27,1,%r20 /* R bit */ | 2123 | depi -1,27,1,%r20 /* R bit */ |
| 2123 | 2124 | ||
| 2124 | /* if ((%r19.PA_BLOCKSTEP_BIT)) { %r20.7=1} */ | 2125 | /* if ((%r19.BLOCKSTEP_BIT)) { %r20.7=1} */ |
| 2125 | extru,= %r19,PA_BLOCKSTEP_BIT,1,%r0 | 2126 | extru,= %r19,TIF_BLOCKSTEP_PA_BIT,1,%r0 |
| 2126 | depi -1,7,1,%r20 /* T bit */ | 2127 | depi -1,7,1,%r20 /* T bit */ |
| 2127 | 2128 | ||
| 2128 | STREG %r20,TASK_PT_PSW(%r1) | 2129 | STREG %r20,TASK_PT_PSW(%r1) |
diff --git a/arch/parisc/kernel/irq.c b/arch/parisc/kernel/irq.c index 330f536a9324..2e7610cb33d5 100644 --- a/arch/parisc/kernel/irq.c +++ b/arch/parisc/kernel/irq.c | |||
| @@ -423,8 +423,3 @@ void __init init_IRQ(void) | |||
| 423 | set_eiem(cpu_eiem); /* EIEM : enable all external intr */ | 423 | set_eiem(cpu_eiem); /* EIEM : enable all external intr */ |
| 424 | 424 | ||
| 425 | } | 425 | } |
| 426 | |||
| 427 | void ack_bad_irq(unsigned int irq) | ||
| 428 | { | ||
| 429 | printk(KERN_WARNING "unexpected IRQ %d\n", irq); | ||
| 430 | } | ||
diff --git a/arch/parisc/kernel/module.c b/arch/parisc/kernel/module.c index 61ee0eec4e69..212074653df7 100644 --- a/arch/parisc/kernel/module.c +++ b/arch/parisc/kernel/module.c | |||
| @@ -893,7 +893,7 @@ int module_finalize(const Elf_Ehdr *hdr, | |||
| 893 | * ourselves */ | 893 | * ourselves */ |
| 894 | for (i = 1; i < hdr->e_shnum; i++) { | 894 | for (i = 1; i < hdr->e_shnum; i++) { |
| 895 | if(sechdrs[i].sh_type == SHT_SYMTAB | 895 | if(sechdrs[i].sh_type == SHT_SYMTAB |
| 896 | && (sechdrs[i].sh_type & SHF_ALLOC)) { | 896 | && (sechdrs[i].sh_flags & SHF_ALLOC)) { |
| 897 | int strindex = sechdrs[i].sh_link; | 897 | int strindex = sechdrs[i].sh_link; |
| 898 | /* FIXME: AWFUL HACK | 898 | /* FIXME: AWFUL HACK |
| 899 | * The cast is to drop the const from | 899 | * The cast is to drop the const from |
diff --git a/arch/parisc/kernel/ptrace.c b/arch/parisc/kernel/ptrace.c index 927db3668b6f..c4f49e45129d 100644 --- a/arch/parisc/kernel/ptrace.c +++ b/arch/parisc/kernel/ptrace.c | |||
| @@ -13,6 +13,7 @@ | |||
| 13 | #include <linux/smp.h> | 13 | #include <linux/smp.h> |
| 14 | #include <linux/errno.h> | 14 | #include <linux/errno.h> |
| 15 | #include <linux/ptrace.h> | 15 | #include <linux/ptrace.h> |
| 16 | #include <linux/tracehook.h> | ||
| 16 | #include <linux/user.h> | 17 | #include <linux/user.h> |
| 17 | #include <linux/personality.h> | 18 | #include <linux/personality.h> |
| 18 | #include <linux/security.h> | 19 | #include <linux/security.h> |
| @@ -35,7 +36,8 @@ | |||
| 35 | */ | 36 | */ |
| 36 | void ptrace_disable(struct task_struct *task) | 37 | void ptrace_disable(struct task_struct *task) |
| 37 | { | 38 | { |
| 38 | task->ptrace &= ~(PT_SINGLESTEP|PT_BLOCKSTEP); | 39 | clear_tsk_thread_flag(task, TIF_SINGLESTEP); |
| 40 | clear_tsk_thread_flag(task, TIF_BLOCKSTEP); | ||
| 39 | 41 | ||
| 40 | /* make sure the trap bits are not set */ | 42 | /* make sure the trap bits are not set */ |
| 41 | pa_psw(task)->r = 0; | 43 | pa_psw(task)->r = 0; |
| @@ -55,8 +57,8 @@ void user_disable_single_step(struct task_struct *task) | |||
| 55 | 57 | ||
| 56 | void user_enable_single_step(struct task_struct *task) | 58 | void user_enable_single_step(struct task_struct *task) |
| 57 | { | 59 | { |
| 58 | task->ptrace &= ~PT_BLOCKSTEP; | 60 | clear_tsk_thread_flag(task, TIF_BLOCKSTEP); |
| 59 | task->ptrace |= PT_SINGLESTEP; | 61 | set_tsk_thread_flag(task, TIF_SINGLESTEP); |
| 60 | 62 | ||
| 61 | if (pa_psw(task)->n) { | 63 | if (pa_psw(task)->n) { |
| 62 | struct siginfo si; | 64 | struct siginfo si; |
| @@ -98,8 +100,8 @@ void user_enable_single_step(struct task_struct *task) | |||
| 98 | 100 | ||
| 99 | void user_enable_block_step(struct task_struct *task) | 101 | void user_enable_block_step(struct task_struct *task) |
| 100 | { | 102 | { |
| 101 | task->ptrace &= ~PT_SINGLESTEP; | 103 | clear_tsk_thread_flag(task, TIF_SINGLESTEP); |
| 102 | task->ptrace |= PT_BLOCKSTEP; | 104 | set_tsk_thread_flag(task, TIF_BLOCKSTEP); |
| 103 | 105 | ||
| 104 | /* Enable taken branch trap. */ | 106 | /* Enable taken branch trap. */ |
| 105 | pa_psw(task)->r = 0; | 107 | pa_psw(task)->r = 0; |
| @@ -263,22 +265,20 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request, | |||
| 263 | } | 265 | } |
| 264 | #endif | 266 | #endif |
| 265 | 267 | ||
| 268 | long do_syscall_trace_enter(struct pt_regs *regs) | ||
| 269 | { | ||
| 270 | if (test_thread_flag(TIF_SYSCALL_TRACE) && | ||
| 271 | tracehook_report_syscall_entry(regs)) | ||
| 272 | return -1L; | ||
| 273 | |||
| 274 | return regs->gr[20]; | ||
| 275 | } | ||
| 266 | 276 | ||
| 267 | void syscall_trace(void) | 277 | void do_syscall_trace_exit(struct pt_regs *regs) |
| 268 | { | 278 | { |
| 269 | if (!test_thread_flag(TIF_SYSCALL_TRACE)) | 279 | int stepping = test_thread_flag(TIF_SINGLESTEP) || |
| 270 | return; | 280 | test_thread_flag(TIF_BLOCKSTEP); |
| 271 | if (!(current->ptrace & PT_PTRACED)) | 281 | |
| 272 | return; | 282 | if (stepping || test_thread_flag(TIF_SYSCALL_TRACE)) |
| 273 | ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD) | 283 | tracehook_report_syscall_exit(regs, stepping); |
| 274 | ? 0x80 : 0)); | ||
| 275 | /* | ||
| 276 | * this isn't the same as continuing with a signal, but it will do | ||
| 277 | * for normal use. strace only continues with a signal if the | ||
| 278 | * stopping signal is not SIGTRAP. -brl | ||
| 279 | */ | ||
| 280 | if (current->exit_code) { | ||
| 281 | send_sig(current->exit_code, current, 1); | ||
| 282 | current->exit_code = 0; | ||
| 283 | } | ||
| 284 | } | 284 | } |
diff --git a/arch/parisc/kernel/signal.c b/arch/parisc/kernel/signal.c index 8eb3c63c407a..e8467e4aa8d1 100644 --- a/arch/parisc/kernel/signal.c +++ b/arch/parisc/kernel/signal.c | |||
| @@ -21,6 +21,7 @@ | |||
| 21 | #include <linux/errno.h> | 21 | #include <linux/errno.h> |
| 22 | #include <linux/wait.h> | 22 | #include <linux/wait.h> |
| 23 | #include <linux/ptrace.h> | 23 | #include <linux/ptrace.h> |
| 24 | #include <linux/tracehook.h> | ||
| 24 | #include <linux/unistd.h> | 25 | #include <linux/unistd.h> |
| 25 | #include <linux/stddef.h> | 26 | #include <linux/stddef.h> |
| 26 | #include <linux/compat.h> | 27 | #include <linux/compat.h> |
| @@ -34,7 +35,6 @@ | |||
| 34 | #include <asm/asm-offsets.h> | 35 | #include <asm/asm-offsets.h> |
| 35 | 36 | ||
| 36 | #ifdef CONFIG_COMPAT | 37 | #ifdef CONFIG_COMPAT |
| 37 | #include <linux/compat.h> | ||
| 38 | #include "signal32.h" | 38 | #include "signal32.h" |
| 39 | #endif | 39 | #endif |
| 40 | 40 | ||
| @@ -468,6 +468,9 @@ handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka, | |||
| 468 | sigaddset(¤t->blocked,sig); | 468 | sigaddset(¤t->blocked,sig); |
| 469 | recalc_sigpending(); | 469 | recalc_sigpending(); |
| 470 | spin_unlock_irq(¤t->sighand->siglock); | 470 | spin_unlock_irq(¤t->sighand->siglock); |
| 471 | |||
| 472 | tracehook_signal_handler(sig, info, ka, regs, 0); | ||
| 473 | |||
| 471 | return 1; | 474 | return 1; |
| 472 | } | 475 | } |
| 473 | 476 | ||
diff --git a/arch/parisc/kernel/syscall.S b/arch/parisc/kernel/syscall.S index 59fc1a43ec3e..f5f96021caa0 100644 --- a/arch/parisc/kernel/syscall.S +++ b/arch/parisc/kernel/syscall.S | |||
| @@ -288,18 +288,23 @@ tracesys: | |||
| 288 | STREG %r18,PT_GR18(%r2) | 288 | STREG %r18,PT_GR18(%r2) |
| 289 | /* Finished saving things for the debugger */ | 289 | /* Finished saving things for the debugger */ |
| 290 | 290 | ||
| 291 | ldil L%syscall_trace,%r1 | 291 | copy %r2,%r26 |
| 292 | ldil L%do_syscall_trace_enter,%r1 | ||
| 292 | ldil L%tracesys_next,%r2 | 293 | ldil L%tracesys_next,%r2 |
| 293 | be R%syscall_trace(%sr7,%r1) | 294 | be R%do_syscall_trace_enter(%sr7,%r1) |
| 294 | ldo R%tracesys_next(%r2),%r2 | 295 | ldo R%tracesys_next(%r2),%r2 |
| 295 | 296 | ||
| 296 | tracesys_next: | 297 | tracesys_next: |
| 298 | /* do_syscall_trace_enter either returned the syscallno, or -1L, | ||
| 299 | * so we skip restoring the PT_GR20 below, since we pulled it from | ||
| 300 | * task->thread.regs.gr[20] above. | ||
| 301 | */ | ||
| 302 | copy %ret0,%r20 | ||
| 297 | ldil L%sys_call_table,%r1 | 303 | ldil L%sys_call_table,%r1 |
| 298 | ldo R%sys_call_table(%r1), %r19 | 304 | ldo R%sys_call_table(%r1), %r19 |
| 299 | 305 | ||
| 300 | ldo -THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 /* get task ptr */ | 306 | ldo -THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 /* get task ptr */ |
| 301 | LDREG TI_TASK(%r1), %r1 | 307 | LDREG TI_TASK(%r1), %r1 |
| 302 | LDREG TASK_PT_GR20(%r1), %r20 | ||
| 303 | LDREG TASK_PT_GR26(%r1), %r26 /* Restore the users args */ | 308 | LDREG TASK_PT_GR26(%r1), %r26 /* Restore the users args */ |
| 304 | LDREG TASK_PT_GR25(%r1), %r25 | 309 | LDREG TASK_PT_GR25(%r1), %r25 |
| 305 | LDREG TASK_PT_GR24(%r1), %r24 | 310 | LDREG TASK_PT_GR24(%r1), %r24 |
| @@ -336,7 +341,8 @@ tracesys_exit: | |||
| 336 | #ifdef CONFIG_64BIT | 341 | #ifdef CONFIG_64BIT |
| 337 | ldo -16(%r30),%r29 /* Reference param save area */ | 342 | ldo -16(%r30),%r29 /* Reference param save area */ |
| 338 | #endif | 343 | #endif |
| 339 | bl syscall_trace, %r2 | 344 | ldo TASK_REGS(%r1),%r26 |
| 345 | bl do_syscall_trace_exit,%r2 | ||
| 340 | STREG %r28,TASK_PT_GR28(%r1) /* save return value now */ | 346 | STREG %r28,TASK_PT_GR28(%r1) /* save return value now */ |
| 341 | ldo -THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 /* get task ptr */ | 347 | ldo -THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 /* get task ptr */ |
| 342 | LDREG TI_TASK(%r1), %r1 | 348 | LDREG TI_TASK(%r1), %r1 |
| @@ -353,12 +359,12 @@ tracesys_exit: | |||
| 353 | 359 | ||
| 354 | tracesys_sigexit: | 360 | tracesys_sigexit: |
| 355 | ldo -THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 /* get task ptr */ | 361 | ldo -THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 /* get task ptr */ |
| 356 | LDREG 0(%r1), %r1 | 362 | LDREG TI_TASK(%r1), %r1 |
| 357 | #ifdef CONFIG_64BIT | 363 | #ifdef CONFIG_64BIT |
| 358 | ldo -16(%r30),%r29 /* Reference param save area */ | 364 | ldo -16(%r30),%r29 /* Reference param save area */ |
| 359 | #endif | 365 | #endif |
| 360 | bl syscall_trace, %r2 | 366 | bl do_syscall_trace_exit,%r2 |
| 361 | nop | 367 | ldo TASK_REGS(%r1),%r26 |
| 362 | 368 | ||
| 363 | ldil L%syscall_exit_rfi,%r1 | 369 | ldil L%syscall_exit_rfi,%r1 |
| 364 | be,n R%syscall_exit_rfi(%sr7,%r1) | 370 | be,n R%syscall_exit_rfi(%sr7,%r1) |
diff --git a/arch/parisc/kernel/vmlinux.lds.S b/arch/parisc/kernel/vmlinux.lds.S index 775be2791bc2..fda4baa059b5 100644 --- a/arch/parisc/kernel/vmlinux.lds.S +++ b/arch/parisc/kernel/vmlinux.lds.S | |||
| @@ -28,6 +28,7 @@ | |||
| 28 | #include <asm/cache.h> | 28 | #include <asm/cache.h> |
| 29 | #include <asm/page.h> | 29 | #include <asm/page.h> |
| 30 | #include <asm/asm-offsets.h> | 30 | #include <asm/asm-offsets.h> |
| 31 | #include <asm/thread_info.h> | ||
| 31 | 32 | ||
| 32 | /* ld script to make hppa Linux kernel */ | 33 | /* ld script to make hppa Linux kernel */ |
| 33 | #ifndef CONFIG_64BIT | 34 | #ifndef CONFIG_64BIT |
| @@ -134,6 +135,15 @@ SECTIONS | |||
| 134 | __init_begin = .; | 135 | __init_begin = .; |
| 135 | INIT_TEXT_SECTION(16384) | 136 | INIT_TEXT_SECTION(16384) |
| 136 | INIT_DATA_SECTION(16) | 137 | INIT_DATA_SECTION(16) |
| 138 | /* we have to discard exit text and such at runtime, not link time */ | ||
| 139 | .exit.text : | ||
| 140 | { | ||
| 141 | EXIT_TEXT | ||
| 142 | } | ||
| 143 | .exit.data : | ||
| 144 | { | ||
| 145 | EXIT_DATA | ||
| 146 | } | ||
| 137 | 147 | ||
| 138 | PERCPU(PAGE_SIZE) | 148 | PERCPU(PAGE_SIZE) |
| 139 | . = ALIGN(PAGE_SIZE); | 149 | . = ALIGN(PAGE_SIZE); |
diff --git a/arch/parisc/mm/init.c b/arch/parisc/mm/init.c index d5aca31fddbb..13b6e3e59b99 100644 --- a/arch/parisc/mm/init.c +++ b/arch/parisc/mm/init.c | |||
| @@ -434,8 +434,8 @@ void mark_rodata_ro(void) | |||
| 434 | #define SET_MAP_OFFSET(x) ((void *)(((unsigned long)(x) + VM_MAP_OFFSET) \ | 434 | #define SET_MAP_OFFSET(x) ((void *)(((unsigned long)(x) + VM_MAP_OFFSET) \ |
| 435 | & ~(VM_MAP_OFFSET-1))) | 435 | & ~(VM_MAP_OFFSET-1))) |
| 436 | 436 | ||
| 437 | void *vmalloc_start __read_mostly; | 437 | void *parisc_vmalloc_start __read_mostly; |
| 438 | EXPORT_SYMBOL(vmalloc_start); | 438 | EXPORT_SYMBOL(parisc_vmalloc_start); |
| 439 | 439 | ||
| 440 | #ifdef CONFIG_PA11 | 440 | #ifdef CONFIG_PA11 |
| 441 | unsigned long pcxl_dma_start __read_mostly; | 441 | unsigned long pcxl_dma_start __read_mostly; |
| @@ -496,13 +496,14 @@ void __init mem_init(void) | |||
| 496 | #ifdef CONFIG_PA11 | 496 | #ifdef CONFIG_PA11 |
| 497 | if (hppa_dma_ops == &pcxl_dma_ops) { | 497 | if (hppa_dma_ops == &pcxl_dma_ops) { |
| 498 | pcxl_dma_start = (unsigned long)SET_MAP_OFFSET(MAP_START); | 498 | pcxl_dma_start = (unsigned long)SET_MAP_OFFSET(MAP_START); |
| 499 | vmalloc_start = SET_MAP_OFFSET(pcxl_dma_start + PCXL_DMA_MAP_SIZE); | 499 | parisc_vmalloc_start = SET_MAP_OFFSET(pcxl_dma_start |
| 500 | + PCXL_DMA_MAP_SIZE); | ||
| 500 | } else { | 501 | } else { |
| 501 | pcxl_dma_start = 0; | 502 | pcxl_dma_start = 0; |
| 502 | vmalloc_start = SET_MAP_OFFSET(MAP_START); | 503 | parisc_vmalloc_start = SET_MAP_OFFSET(MAP_START); |
| 503 | } | 504 | } |
| 504 | #else | 505 | #else |
| 505 | vmalloc_start = SET_MAP_OFFSET(MAP_START); | 506 | parisc_vmalloc_start = SET_MAP_OFFSET(MAP_START); |
| 506 | #endif | 507 | #endif |
| 507 | 508 | ||
| 508 | printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, %dk reserved, %dk data, %dk init)\n", | 509 | printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, %dk reserved, %dk data, %dk init)\n", |
diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h index ec5eee7c25d8..06cce8285ba0 100644 --- a/arch/s390/kvm/kvm-s390.h +++ b/arch/s390/kvm/kvm-s390.h | |||
| @@ -58,7 +58,7 @@ int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu, | |||
| 58 | int kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code); | 58 | int kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code); |
| 59 | int kvm_s390_inject_sigp_stop(struct kvm_vcpu *vcpu, int action); | 59 | int kvm_s390_inject_sigp_stop(struct kvm_vcpu *vcpu, int action); |
| 60 | 60 | ||
| 61 | static inline int kvm_s390_vcpu_get_memsize(struct kvm_vcpu *vcpu) | 61 | static inline long kvm_s390_vcpu_get_memsize(struct kvm_vcpu *vcpu) |
| 62 | { | 62 | { |
| 63 | return vcpu->arch.sie_block->gmslm | 63 | return vcpu->arch.sie_block->gmslm |
| 64 | - vcpu->arch.sie_block->gmsor | 64 | - vcpu->arch.sie_block->gmsor |
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig index ac45aab741a5..05ef5380a687 100644 --- a/arch/sparc/Kconfig +++ b/arch/sparc/Kconfig | |||
| @@ -26,6 +26,7 @@ config SPARC | |||
| 26 | select RTC_CLASS | 26 | select RTC_CLASS |
| 27 | select RTC_DRV_M48T59 | 27 | select RTC_DRV_M48T59 |
| 28 | select HAVE_PERF_EVENTS | 28 | select HAVE_PERF_EVENTS |
| 29 | select PERF_USE_VMALLOC | ||
| 29 | select HAVE_DMA_ATTRS | 30 | select HAVE_DMA_ATTRS |
| 30 | select HAVE_DMA_API_DEBUG | 31 | select HAVE_DMA_API_DEBUG |
| 31 | 32 | ||
| @@ -48,6 +49,7 @@ config SPARC64 | |||
| 48 | select RTC_DRV_SUN4V | 49 | select RTC_DRV_SUN4V |
| 49 | select RTC_DRV_STARFIRE | 50 | select RTC_DRV_STARFIRE |
| 50 | select HAVE_PERF_EVENTS | 51 | select HAVE_PERF_EVENTS |
| 52 | select PERF_USE_VMALLOC | ||
| 51 | 53 | ||
| 52 | config ARCH_DEFCONFIG | 54 | config ARCH_DEFCONFIG |
| 53 | string | 55 | string |
diff --git a/arch/sparc/include/asm/hardirq_32.h b/arch/sparc/include/asm/hardirq_32.h index 4f63ed8df551..162007643cdc 100644 --- a/arch/sparc/include/asm/hardirq_32.h +++ b/arch/sparc/include/asm/hardirq_32.h | |||
| @@ -7,17 +7,7 @@ | |||
| 7 | #ifndef __SPARC_HARDIRQ_H | 7 | #ifndef __SPARC_HARDIRQ_H |
| 8 | #define __SPARC_HARDIRQ_H | 8 | #define __SPARC_HARDIRQ_H |
| 9 | 9 | ||
| 10 | #include <linux/threads.h> | ||
| 11 | #include <linux/spinlock.h> | ||
| 12 | #include <linux/cache.h> | ||
| 13 | |||
| 14 | /* entry.S is sensitive to the offsets of these fields */ /* XXX P3 Is it? */ | ||
| 15 | typedef struct { | ||
| 16 | unsigned int __softirq_pending; | ||
| 17 | } ____cacheline_aligned irq_cpustat_t; | ||
| 18 | |||
| 19 | #include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */ | ||
| 20 | |||
| 21 | #define HARDIRQ_BITS 8 | 10 | #define HARDIRQ_BITS 8 |
| 11 | #include <asm-generic/hardirq.h> | ||
| 22 | 12 | ||
| 23 | #endif /* __SPARC_HARDIRQ_H */ | 13 | #endif /* __SPARC_HARDIRQ_H */ |
diff --git a/arch/sparc/include/asm/irq_32.h b/arch/sparc/include/asm/irq_32.h index ea43057d4763..cbf4801deaaf 100644 --- a/arch/sparc/include/asm/irq_32.h +++ b/arch/sparc/include/asm/irq_32.h | |||
| @@ -6,10 +6,10 @@ | |||
| 6 | #ifndef _SPARC_IRQ_H | 6 | #ifndef _SPARC_IRQ_H |
| 7 | #define _SPARC_IRQ_H | 7 | #define _SPARC_IRQ_H |
| 8 | 8 | ||
| 9 | #include <linux/interrupt.h> | ||
| 10 | |||
| 11 | #define NR_IRQS 16 | 9 | #define NR_IRQS 16 |
| 12 | 10 | ||
| 11 | #include <linux/interrupt.h> | ||
| 12 | |||
| 13 | #define irq_canonicalize(irq) (irq) | 13 | #define irq_canonicalize(irq) (irq) |
| 14 | 14 | ||
| 15 | extern void __init init_IRQ(void); | 15 | extern void __init init_IRQ(void); |
diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h index 0ff92fa22064..f3cb790fa2ae 100644 --- a/arch/sparc/include/asm/pgtable_64.h +++ b/arch/sparc/include/asm/pgtable_64.h | |||
| @@ -41,8 +41,8 @@ | |||
| 41 | #define LOW_OBP_ADDRESS _AC(0x00000000f0000000,UL) | 41 | #define LOW_OBP_ADDRESS _AC(0x00000000f0000000,UL) |
| 42 | #define HI_OBP_ADDRESS _AC(0x0000000100000000,UL) | 42 | #define HI_OBP_ADDRESS _AC(0x0000000100000000,UL) |
| 43 | #define VMALLOC_START _AC(0x0000000100000000,UL) | 43 | #define VMALLOC_START _AC(0x0000000100000000,UL) |
| 44 | #define VMALLOC_END _AC(0x0000000200000000,UL) | 44 | #define VMALLOC_END _AC(0x0000010000000000,UL) |
| 45 | #define VMEMMAP_BASE _AC(0x0000000200000000,UL) | 45 | #define VMEMMAP_BASE _AC(0x0000010000000000,UL) |
| 46 | 46 | ||
| 47 | #define vmemmap ((struct page *)VMEMMAP_BASE) | 47 | #define vmemmap ((struct page *)VMEMMAP_BASE) |
| 48 | 48 | ||
diff --git a/arch/sparc/kernel/ktlb.S b/arch/sparc/kernel/ktlb.S index 3ea6e8cde8c5..1d361477d7d6 100644 --- a/arch/sparc/kernel/ktlb.S +++ b/arch/sparc/kernel/ktlb.S | |||
| @@ -280,8 +280,8 @@ kvmap_dtlb_nonlinear: | |||
| 280 | 280 | ||
| 281 | #ifdef CONFIG_SPARSEMEM_VMEMMAP | 281 | #ifdef CONFIG_SPARSEMEM_VMEMMAP |
| 282 | /* Do not use the TSB for vmemmap. */ | 282 | /* Do not use the TSB for vmemmap. */ |
| 283 | mov (VMEMMAP_BASE >> 24), %g5 | 283 | mov (VMEMMAP_BASE >> 40), %g5 |
| 284 | sllx %g5, 24, %g5 | 284 | sllx %g5, 40, %g5 |
| 285 | cmp %g4,%g5 | 285 | cmp %g4,%g5 |
| 286 | bgeu,pn %xcc, kvmap_vmemmap | 286 | bgeu,pn %xcc, kvmap_vmemmap |
| 287 | nop | 287 | nop |
| @@ -293,8 +293,8 @@ kvmap_dtlb_tsbmiss: | |||
| 293 | sethi %hi(MODULES_VADDR), %g5 | 293 | sethi %hi(MODULES_VADDR), %g5 |
| 294 | cmp %g4, %g5 | 294 | cmp %g4, %g5 |
| 295 | blu,pn %xcc, kvmap_dtlb_longpath | 295 | blu,pn %xcc, kvmap_dtlb_longpath |
| 296 | mov (VMALLOC_END >> 24), %g5 | 296 | mov (VMALLOC_END >> 40), %g5 |
| 297 | sllx %g5, 24, %g5 | 297 | sllx %g5, 40, %g5 |
| 298 | cmp %g4, %g5 | 298 | cmp %g4, %g5 |
| 299 | bgeu,pn %xcc, kvmap_dtlb_longpath | 299 | bgeu,pn %xcc, kvmap_dtlb_longpath |
| 300 | nop | 300 | nop |
diff --git a/arch/sparc/kernel/perf_event.c b/arch/sparc/kernel/perf_event.c index 2d6a1b10c81d..04db92743896 100644 --- a/arch/sparc/kernel/perf_event.c +++ b/arch/sparc/kernel/perf_event.c | |||
| @@ -56,7 +56,8 @@ struct cpu_hw_events { | |||
| 56 | struct perf_event *events[MAX_HWEVENTS]; | 56 | struct perf_event *events[MAX_HWEVENTS]; |
| 57 | unsigned long used_mask[BITS_TO_LONGS(MAX_HWEVENTS)]; | 57 | unsigned long used_mask[BITS_TO_LONGS(MAX_HWEVENTS)]; |
| 58 | unsigned long active_mask[BITS_TO_LONGS(MAX_HWEVENTS)]; | 58 | unsigned long active_mask[BITS_TO_LONGS(MAX_HWEVENTS)]; |
| 59 | int enabled; | 59 | u64 pcr; |
| 60 | int enabled; | ||
| 60 | }; | 61 | }; |
| 61 | DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = { .enabled = 1, }; | 62 | DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = { .enabled = 1, }; |
| 62 | 63 | ||
| @@ -68,8 +69,30 @@ struct perf_event_map { | |||
| 68 | #define PIC_LOWER 0x02 | 69 | #define PIC_LOWER 0x02 |
| 69 | }; | 70 | }; |
| 70 | 71 | ||
| 72 | static unsigned long perf_event_encode(const struct perf_event_map *pmap) | ||
| 73 | { | ||
| 74 | return ((unsigned long) pmap->encoding << 16) | pmap->pic_mask; | ||
| 75 | } | ||
| 76 | |||
| 77 | static void perf_event_decode(unsigned long val, u16 *enc, u8 *msk) | ||
| 78 | { | ||
| 79 | *msk = val & 0xff; | ||
| 80 | *enc = val >> 16; | ||
| 81 | } | ||
| 82 | |||
| 83 | #define C(x) PERF_COUNT_HW_CACHE_##x | ||
| 84 | |||
| 85 | #define CACHE_OP_UNSUPPORTED 0xfffe | ||
| 86 | #define CACHE_OP_NONSENSE 0xffff | ||
| 87 | |||
| 88 | typedef struct perf_event_map cache_map_t | ||
| 89 | [PERF_COUNT_HW_CACHE_MAX] | ||
| 90 | [PERF_COUNT_HW_CACHE_OP_MAX] | ||
| 91 | [PERF_COUNT_HW_CACHE_RESULT_MAX]; | ||
| 92 | |||
| 71 | struct sparc_pmu { | 93 | struct sparc_pmu { |
| 72 | const struct perf_event_map *(*event_map)(int); | 94 | const struct perf_event_map *(*event_map)(int); |
| 95 | const cache_map_t *cache_map; | ||
| 73 | int max_events; | 96 | int max_events; |
| 74 | int upper_shift; | 97 | int upper_shift; |
| 75 | int lower_shift; | 98 | int lower_shift; |
| @@ -80,21 +103,109 @@ struct sparc_pmu { | |||
| 80 | int lower_nop; | 103 | int lower_nop; |
| 81 | }; | 104 | }; |
| 82 | 105 | ||
| 83 | static const struct perf_event_map ultra3i_perfmon_event_map[] = { | 106 | static const struct perf_event_map ultra3_perfmon_event_map[] = { |
| 84 | [PERF_COUNT_HW_CPU_CYCLES] = { 0x0000, PIC_UPPER | PIC_LOWER }, | 107 | [PERF_COUNT_HW_CPU_CYCLES] = { 0x0000, PIC_UPPER | PIC_LOWER }, |
| 85 | [PERF_COUNT_HW_INSTRUCTIONS] = { 0x0001, PIC_UPPER | PIC_LOWER }, | 108 | [PERF_COUNT_HW_INSTRUCTIONS] = { 0x0001, PIC_UPPER | PIC_LOWER }, |
| 86 | [PERF_COUNT_HW_CACHE_REFERENCES] = { 0x0009, PIC_LOWER }, | 109 | [PERF_COUNT_HW_CACHE_REFERENCES] = { 0x0009, PIC_LOWER }, |
| 87 | [PERF_COUNT_HW_CACHE_MISSES] = { 0x0009, PIC_UPPER }, | 110 | [PERF_COUNT_HW_CACHE_MISSES] = { 0x0009, PIC_UPPER }, |
| 88 | }; | 111 | }; |
| 89 | 112 | ||
| 90 | static const struct perf_event_map *ultra3i_event_map(int event_id) | 113 | static const struct perf_event_map *ultra3_event_map(int event_id) |
| 91 | { | 114 | { |
| 92 | return &ultra3i_perfmon_event_map[event_id]; | 115 | return &ultra3_perfmon_event_map[event_id]; |
| 93 | } | 116 | } |
| 94 | 117 | ||
| 95 | static const struct sparc_pmu ultra3i_pmu = { | 118 | static const cache_map_t ultra3_cache_map = { |
| 96 | .event_map = ultra3i_event_map, | 119 | [C(L1D)] = { |
| 97 | .max_events = ARRAY_SIZE(ultra3i_perfmon_event_map), | 120 | [C(OP_READ)] = { |
| 121 | [C(RESULT_ACCESS)] = { 0x09, PIC_LOWER, }, | ||
| 122 | [C(RESULT_MISS)] = { 0x09, PIC_UPPER, }, | ||
| 123 | }, | ||
| 124 | [C(OP_WRITE)] = { | ||
| 125 | [C(RESULT_ACCESS)] = { 0x0a, PIC_LOWER }, | ||
| 126 | [C(RESULT_MISS)] = { 0x0a, PIC_UPPER }, | ||
| 127 | }, | ||
| 128 | [C(OP_PREFETCH)] = { | ||
| 129 | [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, | ||
| 130 | [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED }, | ||
| 131 | }, | ||
| 132 | }, | ||
| 133 | [C(L1I)] = { | ||
| 134 | [C(OP_READ)] = { | ||
| 135 | [C(RESULT_ACCESS)] = { 0x09, PIC_LOWER, }, | ||
| 136 | [C(RESULT_MISS)] = { 0x09, PIC_UPPER, }, | ||
| 137 | }, | ||
| 138 | [ C(OP_WRITE) ] = { | ||
| 139 | [ C(RESULT_ACCESS) ] = { CACHE_OP_NONSENSE }, | ||
| 140 | [ C(RESULT_MISS) ] = { CACHE_OP_NONSENSE }, | ||
| 141 | }, | ||
| 142 | [ C(OP_PREFETCH) ] = { | ||
| 143 | [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, | ||
| 144 | [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, | ||
| 145 | }, | ||
| 146 | }, | ||
| 147 | [C(LL)] = { | ||
| 148 | [C(OP_READ)] = { | ||
| 149 | [C(RESULT_ACCESS)] = { 0x0c, PIC_LOWER, }, | ||
| 150 | [C(RESULT_MISS)] = { 0x0c, PIC_UPPER, }, | ||
| 151 | }, | ||
| 152 | [C(OP_WRITE)] = { | ||
| 153 | [C(RESULT_ACCESS)] = { 0x0c, PIC_LOWER }, | ||
| 154 | [C(RESULT_MISS)] = { 0x0c, PIC_UPPER }, | ||
| 155 | }, | ||
| 156 | [C(OP_PREFETCH)] = { | ||
| 157 | [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, | ||
| 158 | [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED }, | ||
| 159 | }, | ||
| 160 | }, | ||
| 161 | [C(DTLB)] = { | ||
| 162 | [C(OP_READ)] = { | ||
| 163 | [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, | ||
| 164 | [C(RESULT_MISS)] = { 0x12, PIC_UPPER, }, | ||
| 165 | }, | ||
| 166 | [ C(OP_WRITE) ] = { | ||
| 167 | [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, | ||
| 168 | [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, | ||
| 169 | }, | ||
| 170 | [ C(OP_PREFETCH) ] = { | ||
| 171 | [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, | ||
| 172 | [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, | ||
| 173 | }, | ||
| 174 | }, | ||
| 175 | [C(ITLB)] = { | ||
| 176 | [C(OP_READ)] = { | ||
| 177 | [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, | ||
| 178 | [C(RESULT_MISS)] = { 0x11, PIC_UPPER, }, | ||
| 179 | }, | ||
| 180 | [ C(OP_WRITE) ] = { | ||
| 181 | [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, | ||
| 182 | [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, | ||
| 183 | }, | ||
| 184 | [ C(OP_PREFETCH) ] = { | ||
| 185 | [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, | ||
| 186 | [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, | ||
| 187 | }, | ||
| 188 | }, | ||
| 189 | [C(BPU)] = { | ||
| 190 | [C(OP_READ)] = { | ||
| 191 | [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, | ||
| 192 | [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED }, | ||
| 193 | }, | ||
| 194 | [ C(OP_WRITE) ] = { | ||
| 195 | [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, | ||
| 196 | [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, | ||
| 197 | }, | ||
| 198 | [ C(OP_PREFETCH) ] = { | ||
| 199 | [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, | ||
| 200 | [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, | ||
| 201 | }, | ||
| 202 | }, | ||
| 203 | }; | ||
| 204 | |||
| 205 | static const struct sparc_pmu ultra3_pmu = { | ||
| 206 | .event_map = ultra3_event_map, | ||
| 207 | .cache_map = &ultra3_cache_map, | ||
| 208 | .max_events = ARRAY_SIZE(ultra3_perfmon_event_map), | ||
| 98 | .upper_shift = 11, | 209 | .upper_shift = 11, |
| 99 | .lower_shift = 4, | 210 | .lower_shift = 4, |
| 100 | .event_mask = 0x3f, | 211 | .event_mask = 0x3f, |
| @@ -102,6 +213,121 @@ static const struct sparc_pmu ultra3i_pmu = { | |||
| 102 | .lower_nop = 0x14, | 213 | .lower_nop = 0x14, |
| 103 | }; | 214 | }; |
| 104 | 215 | ||
| 216 | /* Niagara1 is very limited. The upper PIC is hard-locked to count | ||
| 217 | * only instructions, so it is free running which creates all kinds of | ||
| 218 | * problems. Some hardware designs make one wonder if the creator | ||
| 219 | * even looked at how this stuff gets used by software. | ||
| 220 | */ | ||
| 221 | static const struct perf_event_map niagara1_perfmon_event_map[] = { | ||
| 222 | [PERF_COUNT_HW_CPU_CYCLES] = { 0x00, PIC_UPPER }, | ||
| 223 | [PERF_COUNT_HW_INSTRUCTIONS] = { 0x00, PIC_UPPER }, | ||
| 224 | [PERF_COUNT_HW_CACHE_REFERENCES] = { 0, PIC_NONE }, | ||
| 225 | [PERF_COUNT_HW_CACHE_MISSES] = { 0x03, PIC_LOWER }, | ||
| 226 | }; | ||
| 227 | |||
| 228 | static const struct perf_event_map *niagara1_event_map(int event_id) | ||
| 229 | { | ||
| 230 | return &niagara1_perfmon_event_map[event_id]; | ||
| 231 | } | ||
| 232 | |||
| 233 | static const cache_map_t niagara1_cache_map = { | ||
| 234 | [C(L1D)] = { | ||
| 235 | [C(OP_READ)] = { | ||
| 236 | [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, | ||
| 237 | [C(RESULT_MISS)] = { 0x03, PIC_LOWER, }, | ||
| 238 | }, | ||
| 239 | [C(OP_WRITE)] = { | ||
| 240 | [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, | ||
| 241 | [C(RESULT_MISS)] = { 0x03, PIC_LOWER, }, | ||
| 242 | }, | ||
| 243 | [C(OP_PREFETCH)] = { | ||
| 244 | [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, | ||
| 245 | [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED }, | ||
| 246 | }, | ||
| 247 | }, | ||
| 248 | [C(L1I)] = { | ||
| 249 | [C(OP_READ)] = { | ||
| 250 | [C(RESULT_ACCESS)] = { 0x00, PIC_UPPER }, | ||
| 251 | [C(RESULT_MISS)] = { 0x02, PIC_LOWER, }, | ||
| 252 | }, | ||
| 253 | [ C(OP_WRITE) ] = { | ||
| 254 | [ C(RESULT_ACCESS) ] = { CACHE_OP_NONSENSE }, | ||
| 255 | [ C(RESULT_MISS) ] = { CACHE_OP_NONSENSE }, | ||
| 256 | }, | ||
| 257 | [ C(OP_PREFETCH) ] = { | ||
| 258 | [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, | ||
| 259 | [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, | ||
| 260 | }, | ||
| 261 | }, | ||
| 262 | [C(LL)] = { | ||
| 263 | [C(OP_READ)] = { | ||
| 264 | [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, | ||
| 265 | [C(RESULT_MISS)] = { 0x07, PIC_LOWER, }, | ||
| 266 | }, | ||
| 267 | [C(OP_WRITE)] = { | ||
| 268 | [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, | ||
| 269 | [C(RESULT_MISS)] = { 0x07, PIC_LOWER, }, | ||
| 270 | }, | ||
| 271 | [C(OP_PREFETCH)] = { | ||
| 272 | [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, | ||
| 273 | [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED }, | ||
| 274 | }, | ||
| 275 | }, | ||
| 276 | [C(DTLB)] = { | ||
| 277 | [C(OP_READ)] = { | ||
| 278 | [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, | ||
| 279 | [C(RESULT_MISS)] = { 0x05, PIC_LOWER, }, | ||
| 280 | }, | ||
| 281 | [ C(OP_WRITE) ] = { | ||
| 282 | [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, | ||
| 283 | [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, | ||
| 284 | }, | ||
| 285 | [ C(OP_PREFETCH) ] = { | ||
| 286 | [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, | ||
| 287 | [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, | ||
| 288 | }, | ||
| 289 | }, | ||
| 290 | [C(ITLB)] = { | ||
| 291 | [C(OP_READ)] = { | ||
| 292 | [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, | ||
| 293 | [C(RESULT_MISS)] = { 0x04, PIC_LOWER, }, | ||
| 294 | }, | ||
| 295 | [ C(OP_WRITE) ] = { | ||
| 296 | [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, | ||
| 297 | [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, | ||
| 298 | }, | ||
| 299 | [ C(OP_PREFETCH) ] = { | ||
| 300 | [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, | ||
| 301 | [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, | ||
| 302 | }, | ||
| 303 | }, | ||
| 304 | [C(BPU)] = { | ||
| 305 | [C(OP_READ)] = { | ||
| 306 | [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, | ||
| 307 | [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED }, | ||
| 308 | }, | ||
| 309 | [ C(OP_WRITE) ] = { | ||
| 310 | [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, | ||
| 311 | [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, | ||
| 312 | }, | ||
| 313 | [ C(OP_PREFETCH) ] = { | ||
| 314 | [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, | ||
| 315 | [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, | ||
| 316 | }, | ||
| 317 | }, | ||
| 318 | }; | ||
| 319 | |||
| 320 | static const struct sparc_pmu niagara1_pmu = { | ||
| 321 | .event_map = niagara1_event_map, | ||
| 322 | .cache_map = &niagara1_cache_map, | ||
| 323 | .max_events = ARRAY_SIZE(niagara1_perfmon_event_map), | ||
| 324 | .upper_shift = 0, | ||
| 325 | .lower_shift = 4, | ||
| 326 | .event_mask = 0x7, | ||
| 327 | .upper_nop = 0x0, | ||
| 328 | .lower_nop = 0x0, | ||
| 329 | }; | ||
| 330 | |||
| 105 | static const struct perf_event_map niagara2_perfmon_event_map[] = { | 331 | static const struct perf_event_map niagara2_perfmon_event_map[] = { |
| 106 | [PERF_COUNT_HW_CPU_CYCLES] = { 0x02ff, PIC_UPPER | PIC_LOWER }, | 332 | [PERF_COUNT_HW_CPU_CYCLES] = { 0x02ff, PIC_UPPER | PIC_LOWER }, |
| 107 | [PERF_COUNT_HW_INSTRUCTIONS] = { 0x02ff, PIC_UPPER | PIC_LOWER }, | 333 | [PERF_COUNT_HW_INSTRUCTIONS] = { 0x02ff, PIC_UPPER | PIC_LOWER }, |
| @@ -116,8 +342,96 @@ static const struct perf_event_map *niagara2_event_map(int event_id) | |||
| 116 | return &niagara2_perfmon_event_map[event_id]; | 342 | return &niagara2_perfmon_event_map[event_id]; |
| 117 | } | 343 | } |
| 118 | 344 | ||
| 345 | static const cache_map_t niagara2_cache_map = { | ||
| 346 | [C(L1D)] = { | ||
| 347 | [C(OP_READ)] = { | ||
| 348 | [C(RESULT_ACCESS)] = { 0x0208, PIC_UPPER | PIC_LOWER, }, | ||
| 349 | [C(RESULT_MISS)] = { 0x0302, PIC_UPPER | PIC_LOWER, }, | ||
| 350 | }, | ||
| 351 | [C(OP_WRITE)] = { | ||
| 352 | [C(RESULT_ACCESS)] = { 0x0210, PIC_UPPER | PIC_LOWER, }, | ||
| 353 | [C(RESULT_MISS)] = { 0x0302, PIC_UPPER | PIC_LOWER, }, | ||
| 354 | }, | ||
| 355 | [C(OP_PREFETCH)] = { | ||
| 356 | [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, | ||
| 357 | [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED }, | ||
| 358 | }, | ||
| 359 | }, | ||
| 360 | [C(L1I)] = { | ||
| 361 | [C(OP_READ)] = { | ||
| 362 | [C(RESULT_ACCESS)] = { 0x02ff, PIC_UPPER | PIC_LOWER, }, | ||
| 363 | [C(RESULT_MISS)] = { 0x0301, PIC_UPPER | PIC_LOWER, }, | ||
| 364 | }, | ||
| 365 | [ C(OP_WRITE) ] = { | ||
| 366 | [ C(RESULT_ACCESS) ] = { CACHE_OP_NONSENSE }, | ||
| 367 | [ C(RESULT_MISS) ] = { CACHE_OP_NONSENSE }, | ||
| 368 | }, | ||
| 369 | [ C(OP_PREFETCH) ] = { | ||
| 370 | [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, | ||
| 371 | [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, | ||
| 372 | }, | ||
| 373 | }, | ||
| 374 | [C(LL)] = { | ||
| 375 | [C(OP_READ)] = { | ||
| 376 | [C(RESULT_ACCESS)] = { 0x0208, PIC_UPPER | PIC_LOWER, }, | ||
| 377 | [C(RESULT_MISS)] = { 0x0330, PIC_UPPER | PIC_LOWER, }, | ||
| 378 | }, | ||
| 379 | [C(OP_WRITE)] = { | ||
| 380 | [C(RESULT_ACCESS)] = { 0x0210, PIC_UPPER | PIC_LOWER, }, | ||
| 381 | [C(RESULT_MISS)] = { 0x0320, PIC_UPPER | PIC_LOWER, }, | ||
| 382 | }, | ||
| 383 | [C(OP_PREFETCH)] = { | ||
| 384 | [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, | ||
| 385 | [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED }, | ||
| 386 | }, | ||
| 387 | }, | ||
| 388 | [C(DTLB)] = { | ||
| 389 | [C(OP_READ)] = { | ||
| 390 | [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, | ||
| 391 | [C(RESULT_MISS)] = { 0x0b08, PIC_UPPER | PIC_LOWER, }, | ||
| 392 | }, | ||
| 393 | [ C(OP_WRITE) ] = { | ||
| 394 | [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, | ||
| 395 | [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, | ||
| 396 | }, | ||
| 397 | [ C(OP_PREFETCH) ] = { | ||
| 398 | [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, | ||
| 399 | [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, | ||
| 400 | }, | ||
| 401 | }, | ||
| 402 | [C(ITLB)] = { | ||
| 403 | [C(OP_READ)] = { | ||
| 404 | [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, | ||
| 405 | [C(RESULT_MISS)] = { 0xb04, PIC_UPPER | PIC_LOWER, }, | ||
| 406 | }, | ||
| 407 | [ C(OP_WRITE) ] = { | ||
| 408 | [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, | ||
| 409 | [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, | ||
| 410 | }, | ||
| 411 | [ C(OP_PREFETCH) ] = { | ||
| 412 | [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, | ||
| 413 | [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, | ||
| 414 | }, | ||
| 415 | }, | ||
| 416 | [C(BPU)] = { | ||
| 417 | [C(OP_READ)] = { | ||
| 418 | [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, | ||
| 419 | [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED }, | ||
| 420 | }, | ||
| 421 | [ C(OP_WRITE) ] = { | ||
| 422 | [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, | ||
| 423 | [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, | ||
| 424 | }, | ||
| 425 | [ C(OP_PREFETCH) ] = { | ||
| 426 | [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, | ||
| 427 | [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, | ||
| 428 | }, | ||
| 429 | }, | ||
| 430 | }; | ||
| 431 | |||
| 119 | static const struct sparc_pmu niagara2_pmu = { | 432 | static const struct sparc_pmu niagara2_pmu = { |
| 120 | .event_map = niagara2_event_map, | 433 | .event_map = niagara2_event_map, |
| 434 | .cache_map = &niagara2_cache_map, | ||
| 121 | .max_events = ARRAY_SIZE(niagara2_perfmon_event_map), | 435 | .max_events = ARRAY_SIZE(niagara2_perfmon_event_map), |
| 122 | .upper_shift = 19, | 436 | .upper_shift = 19, |
| 123 | .lower_shift = 6, | 437 | .lower_shift = 6, |
| @@ -151,23 +465,30 @@ static u64 nop_for_index(int idx) | |||
| 151 | sparc_pmu->lower_nop, idx); | 465 | sparc_pmu->lower_nop, idx); |
| 152 | } | 466 | } |
| 153 | 467 | ||
| 154 | static inline void sparc_pmu_enable_event(struct hw_perf_event *hwc, | 468 | static inline void sparc_pmu_enable_event(struct cpu_hw_events *cpuc, struct hw_perf_event *hwc, int idx) |
| 155 | int idx) | ||
| 156 | { | 469 | { |
| 157 | u64 val, mask = mask_for_index(idx); | 470 | u64 val, mask = mask_for_index(idx); |
| 158 | 471 | ||
| 159 | val = pcr_ops->read(); | 472 | val = cpuc->pcr; |
| 160 | pcr_ops->write((val & ~mask) | hwc->config); | 473 | val &= ~mask; |
| 474 | val |= hwc->config; | ||
| 475 | cpuc->pcr = val; | ||
| 476 | |||
| 477 | pcr_ops->write(cpuc->pcr); | ||
| 161 | } | 478 | } |
| 162 | 479 | ||
| 163 | static inline void sparc_pmu_disable_event(struct hw_perf_event *hwc, | 480 | static inline void sparc_pmu_disable_event(struct cpu_hw_events *cpuc, struct hw_perf_event *hwc, int idx) |
| 164 | int idx) | ||
| 165 | { | 481 | { |
| 166 | u64 mask = mask_for_index(idx); | 482 | u64 mask = mask_for_index(idx); |
| 167 | u64 nop = nop_for_index(idx); | 483 | u64 nop = nop_for_index(idx); |
| 168 | u64 val = pcr_ops->read(); | 484 | u64 val; |
| 169 | 485 | ||
| 170 | pcr_ops->write((val & ~mask) | nop); | 486 | val = cpuc->pcr; |
| 487 | val &= ~mask; | ||
| 488 | val |= nop; | ||
| 489 | cpuc->pcr = val; | ||
| 490 | |||
| 491 | pcr_ops->write(cpuc->pcr); | ||
| 171 | } | 492 | } |
| 172 | 493 | ||
| 173 | void hw_perf_enable(void) | 494 | void hw_perf_enable(void) |
| @@ -182,7 +503,7 @@ void hw_perf_enable(void) | |||
| 182 | cpuc->enabled = 1; | 503 | cpuc->enabled = 1; |
| 183 | barrier(); | 504 | barrier(); |
| 184 | 505 | ||
| 185 | val = pcr_ops->read(); | 506 | val = cpuc->pcr; |
| 186 | 507 | ||
| 187 | for (i = 0; i < MAX_HWEVENTS; i++) { | 508 | for (i = 0; i < MAX_HWEVENTS; i++) { |
| 188 | struct perf_event *cp = cpuc->events[i]; | 509 | struct perf_event *cp = cpuc->events[i]; |
| @@ -194,7 +515,9 @@ void hw_perf_enable(void) | |||
| 194 | val |= hwc->config_base; | 515 | val |= hwc->config_base; |
| 195 | } | 516 | } |
| 196 | 517 | ||
| 197 | pcr_ops->write(val); | 518 | cpuc->pcr = val; |
| 519 | |||
| 520 | pcr_ops->write(cpuc->pcr); | ||
| 198 | } | 521 | } |
| 199 | 522 | ||
| 200 | void hw_perf_disable(void) | 523 | void hw_perf_disable(void) |
| @@ -207,10 +530,12 @@ void hw_perf_disable(void) | |||
| 207 | 530 | ||
| 208 | cpuc->enabled = 0; | 531 | cpuc->enabled = 0; |
| 209 | 532 | ||
| 210 | val = pcr_ops->read(); | 533 | val = cpuc->pcr; |
| 211 | val &= ~(PCR_UTRACE | PCR_STRACE | | 534 | val &= ~(PCR_UTRACE | PCR_STRACE | |
| 212 | sparc_pmu->hv_bit | sparc_pmu->irq_bit); | 535 | sparc_pmu->hv_bit | sparc_pmu->irq_bit); |
| 213 | pcr_ops->write(val); | 536 | cpuc->pcr = val; |
| 537 | |||
| 538 | pcr_ops->write(cpuc->pcr); | ||
| 214 | } | 539 | } |
| 215 | 540 | ||
| 216 | static u32 read_pmc(int idx) | 541 | static u32 read_pmc(int idx) |
| @@ -242,7 +567,7 @@ static void write_pmc(int idx, u64 val) | |||
| 242 | } | 567 | } |
| 243 | 568 | ||
| 244 | static int sparc_perf_event_set_period(struct perf_event *event, | 569 | static int sparc_perf_event_set_period(struct perf_event *event, |
| 245 | struct hw_perf_event *hwc, int idx) | 570 | struct hw_perf_event *hwc, int idx) |
| 246 | { | 571 | { |
| 247 | s64 left = atomic64_read(&hwc->period_left); | 572 | s64 left = atomic64_read(&hwc->period_left); |
| 248 | s64 period = hwc->sample_period; | 573 | s64 period = hwc->sample_period; |
| @@ -282,19 +607,19 @@ static int sparc_pmu_enable(struct perf_event *event) | |||
| 282 | if (test_and_set_bit(idx, cpuc->used_mask)) | 607 | if (test_and_set_bit(idx, cpuc->used_mask)) |
| 283 | return -EAGAIN; | 608 | return -EAGAIN; |
| 284 | 609 | ||
| 285 | sparc_pmu_disable_event(hwc, idx); | 610 | sparc_pmu_disable_event(cpuc, hwc, idx); |
| 286 | 611 | ||
| 287 | cpuc->events[idx] = event; | 612 | cpuc->events[idx] = event; |
| 288 | set_bit(idx, cpuc->active_mask); | 613 | set_bit(idx, cpuc->active_mask); |
| 289 | 614 | ||
| 290 | sparc_perf_event_set_period(event, hwc, idx); | 615 | sparc_perf_event_set_period(event, hwc, idx); |
| 291 | sparc_pmu_enable_event(hwc, idx); | 616 | sparc_pmu_enable_event(cpuc, hwc, idx); |
| 292 | perf_event_update_userpage(event); | 617 | perf_event_update_userpage(event); |
| 293 | return 0; | 618 | return 0; |
| 294 | } | 619 | } |
| 295 | 620 | ||
| 296 | static u64 sparc_perf_event_update(struct perf_event *event, | 621 | static u64 sparc_perf_event_update(struct perf_event *event, |
| 297 | struct hw_perf_event *hwc, int idx) | 622 | struct hw_perf_event *hwc, int idx) |
| 298 | { | 623 | { |
| 299 | int shift = 64 - 32; | 624 | int shift = 64 - 32; |
| 300 | u64 prev_raw_count, new_raw_count; | 625 | u64 prev_raw_count, new_raw_count; |
| @@ -324,7 +649,7 @@ static void sparc_pmu_disable(struct perf_event *event) | |||
| 324 | int idx = hwc->idx; | 649 | int idx = hwc->idx; |
| 325 | 650 | ||
| 326 | clear_bit(idx, cpuc->active_mask); | 651 | clear_bit(idx, cpuc->active_mask); |
| 327 | sparc_pmu_disable_event(hwc, idx); | 652 | sparc_pmu_disable_event(cpuc, hwc, idx); |
| 328 | 653 | ||
| 329 | barrier(); | 654 | barrier(); |
| 330 | 655 | ||
| @@ -338,18 +663,29 @@ static void sparc_pmu_disable(struct perf_event *event) | |||
| 338 | static void sparc_pmu_read(struct perf_event *event) | 663 | static void sparc_pmu_read(struct perf_event *event) |
| 339 | { | 664 | { |
| 340 | struct hw_perf_event *hwc = &event->hw; | 665 | struct hw_perf_event *hwc = &event->hw; |
| 666 | |||
| 341 | sparc_perf_event_update(event, hwc, hwc->idx); | 667 | sparc_perf_event_update(event, hwc, hwc->idx); |
| 342 | } | 668 | } |
| 343 | 669 | ||
| 344 | static void sparc_pmu_unthrottle(struct perf_event *event) | 670 | static void sparc_pmu_unthrottle(struct perf_event *event) |
| 345 | { | 671 | { |
| 672 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | ||
| 346 | struct hw_perf_event *hwc = &event->hw; | 673 | struct hw_perf_event *hwc = &event->hw; |
| 347 | sparc_pmu_enable_event(hwc, hwc->idx); | 674 | |
| 675 | sparc_pmu_enable_event(cpuc, hwc, hwc->idx); | ||
| 348 | } | 676 | } |
| 349 | 677 | ||
| 350 | static atomic_t active_events = ATOMIC_INIT(0); | 678 | static atomic_t active_events = ATOMIC_INIT(0); |
| 351 | static DEFINE_MUTEX(pmc_grab_mutex); | 679 | static DEFINE_MUTEX(pmc_grab_mutex); |
| 352 | 680 | ||
| 681 | static void perf_stop_nmi_watchdog(void *unused) | ||
| 682 | { | ||
| 683 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | ||
| 684 | |||
| 685 | stop_nmi_watchdog(NULL); | ||
| 686 | cpuc->pcr = pcr_ops->read(); | ||
| 687 | } | ||
| 688 | |||
| 353 | void perf_event_grab_pmc(void) | 689 | void perf_event_grab_pmc(void) |
| 354 | { | 690 | { |
| 355 | if (atomic_inc_not_zero(&active_events)) | 691 | if (atomic_inc_not_zero(&active_events)) |
| @@ -358,7 +694,7 @@ void perf_event_grab_pmc(void) | |||
| 358 | mutex_lock(&pmc_grab_mutex); | 694 | mutex_lock(&pmc_grab_mutex); |
| 359 | if (atomic_read(&active_events) == 0) { | 695 | if (atomic_read(&active_events) == 0) { |
| 360 | if (atomic_read(&nmi_active) > 0) { | 696 | if (atomic_read(&nmi_active) > 0) { |
| 361 | on_each_cpu(stop_nmi_watchdog, NULL, 1); | 697 | on_each_cpu(perf_stop_nmi_watchdog, NULL, 1); |
| 362 | BUG_ON(atomic_read(&nmi_active) != 0); | 698 | BUG_ON(atomic_read(&nmi_active) != 0); |
| 363 | } | 699 | } |
| 364 | atomic_inc(&active_events); | 700 | atomic_inc(&active_events); |
| @@ -375,30 +711,160 @@ void perf_event_release_pmc(void) | |||
| 375 | } | 711 | } |
| 376 | } | 712 | } |
| 377 | 713 | ||
| 714 | static const struct perf_event_map *sparc_map_cache_event(u64 config) | ||
| 715 | { | ||
| 716 | unsigned int cache_type, cache_op, cache_result; | ||
| 717 | const struct perf_event_map *pmap; | ||
| 718 | |||
| 719 | if (!sparc_pmu->cache_map) | ||
| 720 | return ERR_PTR(-ENOENT); | ||
| 721 | |||
| 722 | cache_type = (config >> 0) & 0xff; | ||
| 723 | if (cache_type >= PERF_COUNT_HW_CACHE_MAX) | ||
| 724 | return ERR_PTR(-EINVAL); | ||
| 725 | |||
| 726 | cache_op = (config >> 8) & 0xff; | ||
| 727 | if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX) | ||
| 728 | return ERR_PTR(-EINVAL); | ||
| 729 | |||
| 730 | cache_result = (config >> 16) & 0xff; | ||
| 731 | if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX) | ||
| 732 | return ERR_PTR(-EINVAL); | ||
| 733 | |||
| 734 | pmap = &((*sparc_pmu->cache_map)[cache_type][cache_op][cache_result]); | ||
| 735 | |||
| 736 | if (pmap->encoding == CACHE_OP_UNSUPPORTED) | ||
| 737 | return ERR_PTR(-ENOENT); | ||
| 738 | |||
| 739 | if (pmap->encoding == CACHE_OP_NONSENSE) | ||
| 740 | return ERR_PTR(-EINVAL); | ||
| 741 | |||
| 742 | return pmap; | ||
| 743 | } | ||
| 744 | |||
| 378 | static void hw_perf_event_destroy(struct perf_event *event) | 745 | static void hw_perf_event_destroy(struct perf_event *event) |
| 379 | { | 746 | { |
| 380 | perf_event_release_pmc(); | 747 | perf_event_release_pmc(); |
| 381 | } | 748 | } |
| 382 | 749 | ||
| 750 | /* Make sure all events can be scheduled into the hardware at | ||
| 751 | * the same time. This is simplified by the fact that we only | ||
| 752 | * need to support 2 simultaneous HW events. | ||
| 753 | */ | ||
| 754 | static int sparc_check_constraints(unsigned long *events, int n_ev) | ||
| 755 | { | ||
| 756 | if (n_ev <= perf_max_events) { | ||
| 757 | u8 msk1, msk2; | ||
| 758 | u16 dummy; | ||
| 759 | |||
| 760 | if (n_ev == 1) | ||
| 761 | return 0; | ||
| 762 | BUG_ON(n_ev != 2); | ||
| 763 | perf_event_decode(events[0], &dummy, &msk1); | ||
| 764 | perf_event_decode(events[1], &dummy, &msk2); | ||
| 765 | |||
| 766 | /* If both events can go on any counter, OK. */ | ||
| 767 | if (msk1 == (PIC_UPPER | PIC_LOWER) && | ||
| 768 | msk2 == (PIC_UPPER | PIC_LOWER)) | ||
| 769 | return 0; | ||
| 770 | |||
| 771 | /* If one event is limited to a specific counter, | ||
| 772 | * and the other can go on both, OK. | ||
| 773 | */ | ||
| 774 | if ((msk1 == PIC_UPPER || msk1 == PIC_LOWER) && | ||
| 775 | msk2 == (PIC_UPPER | PIC_LOWER)) | ||
| 776 | return 0; | ||
| 777 | if ((msk2 == PIC_UPPER || msk2 == PIC_LOWER) && | ||
| 778 | msk1 == (PIC_UPPER | PIC_LOWER)) | ||
| 779 | return 0; | ||
| 780 | |||
| 781 | /* If the events are fixed to different counters, OK. */ | ||
| 782 | if ((msk1 == PIC_UPPER && msk2 == PIC_LOWER) || | ||
| 783 | (msk1 == PIC_LOWER && msk2 == PIC_UPPER)) | ||
| 784 | return 0; | ||
| 785 | |||
| 786 | /* Otherwise, there is a conflict. */ | ||
| 787 | } | ||
| 788 | |||
| 789 | return -1; | ||
| 790 | } | ||
| 791 | |||
| 792 | static int check_excludes(struct perf_event **evts, int n_prev, int n_new) | ||
| 793 | { | ||
| 794 | int eu = 0, ek = 0, eh = 0; | ||
| 795 | struct perf_event *event; | ||
| 796 | int i, n, first; | ||
| 797 | |||
| 798 | n = n_prev + n_new; | ||
| 799 | if (n <= 1) | ||
| 800 | return 0; | ||
| 801 | |||
| 802 | first = 1; | ||
| 803 | for (i = 0; i < n; i++) { | ||
| 804 | event = evts[i]; | ||
| 805 | if (first) { | ||
| 806 | eu = event->attr.exclude_user; | ||
| 807 | ek = event->attr.exclude_kernel; | ||
| 808 | eh = event->attr.exclude_hv; | ||
| 809 | first = 0; | ||
| 810 | } else if (event->attr.exclude_user != eu || | ||
| 811 | event->attr.exclude_kernel != ek || | ||
| 812 | event->attr.exclude_hv != eh) { | ||
| 813 | return -EAGAIN; | ||
| 814 | } | ||
| 815 | } | ||
| 816 | |||
| 817 | return 0; | ||
| 818 | } | ||
| 819 | |||
| 820 | static int collect_events(struct perf_event *group, int max_count, | ||
| 821 | struct perf_event *evts[], unsigned long *events) | ||
| 822 | { | ||
| 823 | struct perf_event *event; | ||
| 824 | int n = 0; | ||
| 825 | |||
| 826 | if (!is_software_event(group)) { | ||
| 827 | if (n >= max_count) | ||
| 828 | return -1; | ||
| 829 | evts[n] = group; | ||
| 830 | events[n++] = group->hw.event_base; | ||
| 831 | } | ||
| 832 | list_for_each_entry(event, &group->sibling_list, group_entry) { | ||
| 833 | if (!is_software_event(event) && | ||
| 834 | event->state != PERF_EVENT_STATE_OFF) { | ||
| 835 | if (n >= max_count) | ||
| 836 | return -1; | ||
| 837 | evts[n] = event; | ||
| 838 | events[n++] = event->hw.event_base; | ||
| 839 | } | ||
| 840 | } | ||
| 841 | return n; | ||
| 842 | } | ||
| 843 | |||
| 383 | static int __hw_perf_event_init(struct perf_event *event) | 844 | static int __hw_perf_event_init(struct perf_event *event) |
| 384 | { | 845 | { |
| 385 | struct perf_event_attr *attr = &event->attr; | 846 | struct perf_event_attr *attr = &event->attr; |
| 847 | struct perf_event *evts[MAX_HWEVENTS]; | ||
| 386 | struct hw_perf_event *hwc = &event->hw; | 848 | struct hw_perf_event *hwc = &event->hw; |
| 849 | unsigned long events[MAX_HWEVENTS]; | ||
| 387 | const struct perf_event_map *pmap; | 850 | const struct perf_event_map *pmap; |
| 388 | u64 enc; | 851 | u64 enc; |
| 852 | int n; | ||
| 389 | 853 | ||
| 390 | if (atomic_read(&nmi_active) < 0) | 854 | if (atomic_read(&nmi_active) < 0) |
| 391 | return -ENODEV; | 855 | return -ENODEV; |
| 392 | 856 | ||
| 393 | if (attr->type != PERF_TYPE_HARDWARE) | 857 | if (attr->type == PERF_TYPE_HARDWARE) { |
| 858 | if (attr->config >= sparc_pmu->max_events) | ||
| 859 | return -EINVAL; | ||
| 860 | pmap = sparc_pmu->event_map(attr->config); | ||
| 861 | } else if (attr->type == PERF_TYPE_HW_CACHE) { | ||
| 862 | pmap = sparc_map_cache_event(attr->config); | ||
| 863 | if (IS_ERR(pmap)) | ||
| 864 | return PTR_ERR(pmap); | ||
| 865 | } else | ||
| 394 | return -EOPNOTSUPP; | 866 | return -EOPNOTSUPP; |
| 395 | 867 | ||
| 396 | if (attr->config >= sparc_pmu->max_events) | ||
| 397 | return -EINVAL; | ||
| 398 | |||
| 399 | perf_event_grab_pmc(); | ||
| 400 | event->destroy = hw_perf_event_destroy; | ||
| 401 | |||
| 402 | /* We save the enable bits in the config_base. So to | 868 | /* We save the enable bits in the config_base. So to |
| 403 | * turn off sampling just write 'config', and to enable | 869 | * turn off sampling just write 'config', and to enable |
| 404 | * things write 'config | config_base'. | 870 | * things write 'config | config_base'. |
| @@ -411,15 +877,39 @@ static int __hw_perf_event_init(struct perf_event *event) | |||
| 411 | if (!attr->exclude_hv) | 877 | if (!attr->exclude_hv) |
| 412 | hwc->config_base |= sparc_pmu->hv_bit; | 878 | hwc->config_base |= sparc_pmu->hv_bit; |
| 413 | 879 | ||
| 880 | hwc->event_base = perf_event_encode(pmap); | ||
| 881 | |||
| 882 | enc = pmap->encoding; | ||
| 883 | |||
| 884 | n = 0; | ||
| 885 | if (event->group_leader != event) { | ||
| 886 | n = collect_events(event->group_leader, | ||
| 887 | perf_max_events - 1, | ||
| 888 | evts, events); | ||
| 889 | if (n < 0) | ||
| 890 | return -EINVAL; | ||
| 891 | } | ||
| 892 | events[n] = hwc->event_base; | ||
| 893 | evts[n] = event; | ||
| 894 | |||
| 895 | if (check_excludes(evts, n, 1)) | ||
| 896 | return -EINVAL; | ||
| 897 | |||
| 898 | if (sparc_check_constraints(events, n + 1)) | ||
| 899 | return -EINVAL; | ||
| 900 | |||
| 901 | /* Try to do all error checking before this point, as unwinding | ||
| 902 | * state after grabbing the PMC is difficult. | ||
| 903 | */ | ||
| 904 | perf_event_grab_pmc(); | ||
| 905 | event->destroy = hw_perf_event_destroy; | ||
| 906 | |||
| 414 | if (!hwc->sample_period) { | 907 | if (!hwc->sample_period) { |
| 415 | hwc->sample_period = MAX_PERIOD; | 908 | hwc->sample_period = MAX_PERIOD; |
| 416 | hwc->last_period = hwc->sample_period; | 909 | hwc->last_period = hwc->sample_period; |
| 417 | atomic64_set(&hwc->period_left, hwc->sample_period); | 910 | atomic64_set(&hwc->period_left, hwc->sample_period); |
| 418 | } | 911 | } |
| 419 | 912 | ||
| 420 | pmap = sparc_pmu->event_map(attr->config); | ||
| 421 | |||
| 422 | enc = pmap->encoding; | ||
| 423 | if (pmap->pic_mask & PIC_UPPER) { | 913 | if (pmap->pic_mask & PIC_UPPER) { |
| 424 | hwc->idx = PIC_UPPER_INDEX; | 914 | hwc->idx = PIC_UPPER_INDEX; |
| 425 | enc <<= sparc_pmu->upper_shift; | 915 | enc <<= sparc_pmu->upper_shift; |
| @@ -472,7 +962,7 @@ void perf_event_print_debug(void) | |||
| 472 | } | 962 | } |
| 473 | 963 | ||
| 474 | static int __kprobes perf_event_nmi_handler(struct notifier_block *self, | 964 | static int __kprobes perf_event_nmi_handler(struct notifier_block *self, |
| 475 | unsigned long cmd, void *__args) | 965 | unsigned long cmd, void *__args) |
| 476 | { | 966 | { |
| 477 | struct die_args *args = __args; | 967 | struct die_args *args = __args; |
| 478 | struct perf_sample_data data; | 968 | struct perf_sample_data data; |
| @@ -513,7 +1003,7 @@ static int __kprobes perf_event_nmi_handler(struct notifier_block *self, | |||
| 513 | continue; | 1003 | continue; |
| 514 | 1004 | ||
| 515 | if (perf_event_overflow(event, 1, &data, regs)) | 1005 | if (perf_event_overflow(event, 1, &data, regs)) |
| 516 | sparc_pmu_disable_event(hwc, idx); | 1006 | sparc_pmu_disable_event(cpuc, hwc, idx); |
| 517 | } | 1007 | } |
| 518 | 1008 | ||
| 519 | return NOTIFY_STOP; | 1009 | return NOTIFY_STOP; |
| @@ -525,8 +1015,15 @@ static __read_mostly struct notifier_block perf_event_nmi_notifier = { | |||
| 525 | 1015 | ||
| 526 | static bool __init supported_pmu(void) | 1016 | static bool __init supported_pmu(void) |
| 527 | { | 1017 | { |
| 528 | if (!strcmp(sparc_pmu_type, "ultra3i")) { | 1018 | if (!strcmp(sparc_pmu_type, "ultra3") || |
| 529 | sparc_pmu = &ultra3i_pmu; | 1019 | !strcmp(sparc_pmu_type, "ultra3+") || |
| 1020 | !strcmp(sparc_pmu_type, "ultra3i") || | ||
| 1021 | !strcmp(sparc_pmu_type, "ultra4+")) { | ||
| 1022 | sparc_pmu = &ultra3_pmu; | ||
| 1023 | return true; | ||
| 1024 | } | ||
| 1025 | if (!strcmp(sparc_pmu_type, "niagara")) { | ||
| 1026 | sparc_pmu = &niagara1_pmu; | ||
| 530 | return true; | 1027 | return true; |
| 531 | } | 1028 | } |
| 532 | if (!strcmp(sparc_pmu_type, "niagara2")) { | 1029 | if (!strcmp(sparc_pmu_type, "niagara2")) { |
diff --git a/arch/sparc/oprofile/init.c b/arch/sparc/oprofile/init.c index f97cb8b6ee5f..f9024bccff16 100644 --- a/arch/sparc/oprofile/init.c +++ b/arch/sparc/oprofile/init.c | |||
| @@ -11,6 +11,7 @@ | |||
| 11 | #include <linux/oprofile.h> | 11 | #include <linux/oprofile.h> |
| 12 | #include <linux/errno.h> | 12 | #include <linux/errno.h> |
| 13 | #include <linux/init.h> | 13 | #include <linux/init.h> |
| 14 | #include <linux/param.h> /* for HZ */ | ||
| 14 | 15 | ||
| 15 | #ifdef CONFIG_SPARC64 | 16 | #ifdef CONFIG_SPARC64 |
| 16 | #include <linux/notifier.h> | 17 | #include <linux/notifier.h> |
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 8da93745c087..c876bace8fdc 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig | |||
| @@ -86,10 +86,6 @@ config STACKTRACE_SUPPORT | |||
| 86 | config HAVE_LATENCYTOP_SUPPORT | 86 | config HAVE_LATENCYTOP_SUPPORT |
| 87 | def_bool y | 87 | def_bool y |
| 88 | 88 | ||
| 89 | config FAST_CMPXCHG_LOCAL | ||
| 90 | bool | ||
| 91 | default y | ||
| 92 | |||
| 93 | config MMU | 89 | config MMU |
| 94 | def_bool y | 90 | def_bool y |
| 95 | 91 | ||
diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu index 527519b8a9f9..f2824fb8c79c 100644 --- a/arch/x86/Kconfig.cpu +++ b/arch/x86/Kconfig.cpu | |||
| @@ -400,7 +400,7 @@ config X86_TSC | |||
| 400 | 400 | ||
| 401 | config X86_CMPXCHG64 | 401 | config X86_CMPXCHG64 |
| 402 | def_bool y | 402 | def_bool y |
| 403 | depends on X86_PAE || X86_64 | 403 | depends on X86_PAE || X86_64 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MATOM |
| 404 | 404 | ||
| 405 | # this should be set for all -march=.. options where the compiler | 405 | # this should be set for all -march=.. options where the compiler |
| 406 | # generates cmov. | 406 | # generates cmov. |
| @@ -412,6 +412,7 @@ config X86_MINIMUM_CPU_FAMILY | |||
| 412 | int | 412 | int |
| 413 | default "64" if X86_64 | 413 | default "64" if X86_64 |
| 414 | default "6" if X86_32 && X86_P6_NOP | 414 | default "6" if X86_32 && X86_P6_NOP |
| 415 | default "5" if X86_32 && X86_CMPXCHG64 | ||
| 415 | default "4" if X86_32 && (X86_XADD || X86_CMPXCHG || X86_BSWAP || X86_WP_WORKS_OK) | 416 | default "4" if X86_32 && (X86_XADD || X86_CMPXCHG || X86_BSWAP || X86_WP_WORKS_OK) |
| 416 | default "3" | 417 | default "3" |
| 417 | 418 | ||
diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S index 74619c4f9fda..1733f9f65e82 100644 --- a/arch/x86/ia32/ia32entry.S +++ b/arch/x86/ia32/ia32entry.S | |||
| @@ -21,8 +21,8 @@ | |||
| 21 | #define __AUDIT_ARCH_LE 0x40000000 | 21 | #define __AUDIT_ARCH_LE 0x40000000 |
| 22 | 22 | ||
| 23 | #ifndef CONFIG_AUDITSYSCALL | 23 | #ifndef CONFIG_AUDITSYSCALL |
| 24 | #define sysexit_audit int_ret_from_sys_call | 24 | #define sysexit_audit ia32_ret_from_sys_call |
| 25 | #define sysretl_audit int_ret_from_sys_call | 25 | #define sysretl_audit ia32_ret_from_sys_call |
| 26 | #endif | 26 | #endif |
| 27 | 27 | ||
| 28 | #define IA32_NR_syscalls ((ia32_syscall_end - ia32_sys_call_table)/8) | 28 | #define IA32_NR_syscalls ((ia32_syscall_end - ia32_sys_call_table)/8) |
| @@ -39,12 +39,12 @@ | |||
| 39 | .endm | 39 | .endm |
| 40 | 40 | ||
| 41 | /* clobbers %eax */ | 41 | /* clobbers %eax */ |
| 42 | .macro CLEAR_RREGS _r9=rax | 42 | .macro CLEAR_RREGS offset=0, _r9=rax |
| 43 | xorl %eax,%eax | 43 | xorl %eax,%eax |
| 44 | movq %rax,R11(%rsp) | 44 | movq %rax,\offset+R11(%rsp) |
| 45 | movq %rax,R10(%rsp) | 45 | movq %rax,\offset+R10(%rsp) |
| 46 | movq %\_r9,R9(%rsp) | 46 | movq %\_r9,\offset+R9(%rsp) |
| 47 | movq %rax,R8(%rsp) | 47 | movq %rax,\offset+R8(%rsp) |
| 48 | .endm | 48 | .endm |
| 49 | 49 | ||
| 50 | /* | 50 | /* |
| @@ -172,6 +172,10 @@ sysexit_from_sys_call: | |||
| 172 | movl RIP-R11(%rsp),%edx /* User %eip */ | 172 | movl RIP-R11(%rsp),%edx /* User %eip */ |
| 173 | CFI_REGISTER rip,rdx | 173 | CFI_REGISTER rip,rdx |
| 174 | RESTORE_ARGS 1,24,1,1,1,1 | 174 | RESTORE_ARGS 1,24,1,1,1,1 |
| 175 | xorq %r8,%r8 | ||
| 176 | xorq %r9,%r9 | ||
| 177 | xorq %r10,%r10 | ||
| 178 | xorq %r11,%r11 | ||
| 175 | popfq | 179 | popfq |
| 176 | CFI_ADJUST_CFA_OFFSET -8 | 180 | CFI_ADJUST_CFA_OFFSET -8 |
| 177 | /*CFI_RESTORE rflags*/ | 181 | /*CFI_RESTORE rflags*/ |
| @@ -202,7 +206,7 @@ sysexit_from_sys_call: | |||
| 202 | 206 | ||
| 203 | .macro auditsys_exit exit,ebpsave=RBP | 207 | .macro auditsys_exit exit,ebpsave=RBP |
| 204 | testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags(%r10) | 208 | testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags(%r10) |
| 205 | jnz int_ret_from_sys_call | 209 | jnz ia32_ret_from_sys_call |
| 206 | TRACE_IRQS_ON | 210 | TRACE_IRQS_ON |
| 207 | sti | 211 | sti |
| 208 | movl %eax,%esi /* second arg, syscall return value */ | 212 | movl %eax,%esi /* second arg, syscall return value */ |
| @@ -218,8 +222,9 @@ sysexit_from_sys_call: | |||
| 218 | cli | 222 | cli |
| 219 | TRACE_IRQS_OFF | 223 | TRACE_IRQS_OFF |
| 220 | testl %edi,TI_flags(%r10) | 224 | testl %edi,TI_flags(%r10) |
| 221 | jnz int_with_check | 225 | jz \exit |
| 222 | jmp \exit | 226 | CLEAR_RREGS -ARGOFFSET |
| 227 | jmp int_with_check | ||
| 223 | .endm | 228 | .endm |
| 224 | 229 | ||
| 225 | sysenter_auditsys: | 230 | sysenter_auditsys: |
| @@ -329,6 +334,9 @@ sysretl_from_sys_call: | |||
| 329 | CFI_REGISTER rip,rcx | 334 | CFI_REGISTER rip,rcx |
| 330 | movl EFLAGS-ARGOFFSET(%rsp),%r11d | 335 | movl EFLAGS-ARGOFFSET(%rsp),%r11d |
| 331 | /*CFI_REGISTER rflags,r11*/ | 336 | /*CFI_REGISTER rflags,r11*/ |
| 337 | xorq %r10,%r10 | ||
| 338 | xorq %r9,%r9 | ||
| 339 | xorq %r8,%r8 | ||
| 332 | TRACE_IRQS_ON | 340 | TRACE_IRQS_ON |
| 333 | movl RSP-ARGOFFSET(%rsp),%esp | 341 | movl RSP-ARGOFFSET(%rsp),%esp |
| 334 | CFI_RESTORE rsp | 342 | CFI_RESTORE rsp |
| @@ -353,7 +361,7 @@ cstar_tracesys: | |||
| 353 | #endif | 361 | #endif |
| 354 | xchgl %r9d,%ebp | 362 | xchgl %r9d,%ebp |
| 355 | SAVE_REST | 363 | SAVE_REST |
| 356 | CLEAR_RREGS r9 | 364 | CLEAR_RREGS 0, r9 |
| 357 | movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */ | 365 | movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */ |
| 358 | movq %rsp,%rdi /* &pt_regs -> arg1 */ | 366 | movq %rsp,%rdi /* &pt_regs -> arg1 */ |
| 359 | call syscall_trace_enter | 367 | call syscall_trace_enter |
| @@ -425,6 +433,8 @@ ia32_do_call: | |||
| 425 | call *ia32_sys_call_table(,%rax,8) # xxx: rip relative | 433 | call *ia32_sys_call_table(,%rax,8) # xxx: rip relative |
| 426 | ia32_sysret: | 434 | ia32_sysret: |
| 427 | movq %rax,RAX-ARGOFFSET(%rsp) | 435 | movq %rax,RAX-ARGOFFSET(%rsp) |
| 436 | ia32_ret_from_sys_call: | ||
| 437 | CLEAR_RREGS -ARGOFFSET | ||
| 428 | jmp int_ret_from_sys_call | 438 | jmp int_ret_from_sys_call |
| 429 | 439 | ||
| 430 | ia32_tracesys: | 440 | ia32_tracesys: |
| @@ -442,8 +452,8 @@ END(ia32_syscall) | |||
| 442 | 452 | ||
| 443 | ia32_badsys: | 453 | ia32_badsys: |
| 444 | movq $0,ORIG_RAX-ARGOFFSET(%rsp) | 454 | movq $0,ORIG_RAX-ARGOFFSET(%rsp) |
| 445 | movq $-ENOSYS,RAX-ARGOFFSET(%rsp) | 455 | movq $-ENOSYS,%rax |
| 446 | jmp int_ret_from_sys_call | 456 | jmp ia32_sysret |
| 447 | 457 | ||
| 448 | quiet_ni_syscall: | 458 | quiet_ni_syscall: |
| 449 | movq $-ENOSYS,%rax | 459 | movq $-ENOSYS,%rax |
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 3be000435fad..d83892226f73 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h | |||
| @@ -796,6 +796,7 @@ asmlinkage void kvm_handle_fault_on_reboot(void); | |||
| 796 | #define KVM_ARCH_WANT_MMU_NOTIFIER | 796 | #define KVM_ARCH_WANT_MMU_NOTIFIER |
| 797 | int kvm_unmap_hva(struct kvm *kvm, unsigned long hva); | 797 | int kvm_unmap_hva(struct kvm *kvm, unsigned long hva); |
| 798 | int kvm_age_hva(struct kvm *kvm, unsigned long hva); | 798 | int kvm_age_hva(struct kvm *kvm, unsigned long hva); |
| 799 | void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte); | ||
| 799 | int cpuid_maxphyaddr(struct kvm_vcpu *vcpu); | 800 | int cpuid_maxphyaddr(struct kvm_vcpu *vcpu); |
| 800 | int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu); | 801 | int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu); |
| 801 | int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu); | 802 | int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu); |
diff --git a/arch/x86/kernel/early_printk.c b/arch/x86/kernel/early_printk.c index 41fd965c80c6..b9c830c12b4a 100644 --- a/arch/x86/kernel/early_printk.c +++ b/arch/x86/kernel/early_printk.c | |||
| @@ -206,8 +206,11 @@ static int __init setup_early_printk(char *buf) | |||
| 206 | 206 | ||
| 207 | while (*buf != '\0') { | 207 | while (*buf != '\0') { |
| 208 | if (!strncmp(buf, "serial", 6)) { | 208 | if (!strncmp(buf, "serial", 6)) { |
| 209 | early_serial_init(buf + 6); | 209 | buf += 6; |
| 210 | early_serial_init(buf); | ||
| 210 | early_console_register(&early_serial_console, keep); | 211 | early_console_register(&early_serial_console, keep); |
| 212 | if (!strncmp(buf, ",ttyS", 5)) | ||
| 213 | buf += 5; | ||
| 211 | } | 214 | } |
| 212 | if (!strncmp(buf, "ttyS", 4)) { | 215 | if (!strncmp(buf, "ttyS", 4)) { |
| 213 | early_serial_init(buf + 4); | 216 | early_serial_init(buf + 4); |
diff --git a/arch/x86/kernel/i386_ksyms_32.c b/arch/x86/kernel/i386_ksyms_32.c index 1736c5a725aa..9c3bd4a2050e 100644 --- a/arch/x86/kernel/i386_ksyms_32.c +++ b/arch/x86/kernel/i386_ksyms_32.c | |||
| @@ -15,8 +15,10 @@ EXPORT_SYMBOL(mcount); | |||
| 15 | * the export, but dont use it from C code, it is used | 15 | * the export, but dont use it from C code, it is used |
| 16 | * by assembly code and is not using C calling convention! | 16 | * by assembly code and is not using C calling convention! |
| 17 | */ | 17 | */ |
| 18 | #ifndef CONFIG_X86_CMPXCHG64 | ||
| 18 | extern void cmpxchg8b_emu(void); | 19 | extern void cmpxchg8b_emu(void); |
| 19 | EXPORT_SYMBOL(cmpxchg8b_emu); | 20 | EXPORT_SYMBOL(cmpxchg8b_emu); |
| 21 | #endif | ||
| 20 | 22 | ||
| 21 | /* Networking helper routines. */ | 23 | /* Networking helper routines. */ |
| 22 | EXPORT_SYMBOL(csum_partial_copy_generic); | 24 | EXPORT_SYMBOL(csum_partial_copy_generic); |
diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c index 74656d1d4e30..391206199515 100644 --- a/arch/x86/kernel/irq.c +++ b/arch/x86/kernel/irq.c | |||
| @@ -244,6 +244,7 @@ unsigned int __irq_entry do_IRQ(struct pt_regs *regs) | |||
| 244 | __func__, smp_processor_id(), vector, irq); | 244 | __func__, smp_processor_id(), vector, irq); |
| 245 | } | 245 | } |
| 246 | 246 | ||
| 247 | run_local_timers(); | ||
| 247 | irq_exit(); | 248 | irq_exit(); |
| 248 | 249 | ||
| 249 | set_irq_regs(old_regs); | 250 | set_irq_regs(old_regs); |
| @@ -268,6 +269,7 @@ void smp_generic_interrupt(struct pt_regs *regs) | |||
| 268 | if (generic_interrupt_extension) | 269 | if (generic_interrupt_extension) |
| 269 | generic_interrupt_extension(); | 270 | generic_interrupt_extension(); |
| 270 | 271 | ||
| 272 | run_local_timers(); | ||
| 271 | irq_exit(); | 273 | irq_exit(); |
| 272 | 274 | ||
| 273 | set_irq_regs(old_regs); | 275 | set_irq_regs(old_regs); |
diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c index ec1de97600e7..d915d956e66d 100644 --- a/arch/x86/kernel/smp.c +++ b/arch/x86/kernel/smp.c | |||
| @@ -198,6 +198,7 @@ void smp_reschedule_interrupt(struct pt_regs *regs) | |||
| 198 | { | 198 | { |
| 199 | ack_APIC_irq(); | 199 | ack_APIC_irq(); |
| 200 | inc_irq_stat(irq_resched_count); | 200 | inc_irq_stat(irq_resched_count); |
| 201 | run_local_timers(); | ||
| 201 | /* | 202 | /* |
| 202 | * KVM uses this interrupt to force a cpu out of guest mode | 203 | * KVM uses this interrupt to force a cpu out of guest mode |
| 203 | */ | 204 | */ |
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index 1ae5ceba7eb2..7024224f0fc8 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c | |||
| @@ -664,7 +664,7 @@ static void start_apic_timer(struct kvm_lapic *apic) | |||
| 664 | { | 664 | { |
| 665 | ktime_t now = apic->lapic_timer.timer.base->get_time(); | 665 | ktime_t now = apic->lapic_timer.timer.base->get_time(); |
| 666 | 666 | ||
| 667 | apic->lapic_timer.period = apic_get_reg(apic, APIC_TMICT) * | 667 | apic->lapic_timer.period = (u64)apic_get_reg(apic, APIC_TMICT) * |
| 668 | APIC_BUS_CYCLE_NS * apic->divide_count; | 668 | APIC_BUS_CYCLE_NS * apic->divide_count; |
| 669 | atomic_set(&apic->lapic_timer.pending, 0); | 669 | atomic_set(&apic->lapic_timer.pending, 0); |
| 670 | 670 | ||
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index eca41ae9f453..685a4ffac8e6 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c | |||
| @@ -156,6 +156,8 @@ module_param(oos_shadow, bool, 0644); | |||
| 156 | #define CREATE_TRACE_POINTS | 156 | #define CREATE_TRACE_POINTS |
| 157 | #include "mmutrace.h" | 157 | #include "mmutrace.h" |
| 158 | 158 | ||
| 159 | #define SPTE_HOST_WRITEABLE (1ULL << PT_FIRST_AVAIL_BITS_SHIFT) | ||
| 160 | |||
| 159 | #define SHADOW_PT_INDEX(addr, level) PT64_INDEX(addr, level) | 161 | #define SHADOW_PT_INDEX(addr, level) PT64_INDEX(addr, level) |
| 160 | 162 | ||
| 161 | struct kvm_rmap_desc { | 163 | struct kvm_rmap_desc { |
| @@ -634,9 +636,7 @@ static void rmap_remove(struct kvm *kvm, u64 *spte) | |||
| 634 | if (*spte & shadow_accessed_mask) | 636 | if (*spte & shadow_accessed_mask) |
| 635 | kvm_set_pfn_accessed(pfn); | 637 | kvm_set_pfn_accessed(pfn); |
| 636 | if (is_writeble_pte(*spte)) | 638 | if (is_writeble_pte(*spte)) |
| 637 | kvm_release_pfn_dirty(pfn); | 639 | kvm_set_pfn_dirty(pfn); |
| 638 | else | ||
| 639 | kvm_release_pfn_clean(pfn); | ||
| 640 | rmapp = gfn_to_rmap(kvm, sp->gfns[spte - sp->spt], sp->role.level); | 640 | rmapp = gfn_to_rmap(kvm, sp->gfns[spte - sp->spt], sp->role.level); |
| 641 | if (!*rmapp) { | 641 | if (!*rmapp) { |
| 642 | printk(KERN_ERR "rmap_remove: %p %llx 0->BUG\n", spte, *spte); | 642 | printk(KERN_ERR "rmap_remove: %p %llx 0->BUG\n", spte, *spte); |
| @@ -748,7 +748,7 @@ static int rmap_write_protect(struct kvm *kvm, u64 gfn) | |||
| 748 | return write_protected; | 748 | return write_protected; |
| 749 | } | 749 | } |
| 750 | 750 | ||
| 751 | static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp) | 751 | static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp, u64 data) |
| 752 | { | 752 | { |
| 753 | u64 *spte; | 753 | u64 *spte; |
| 754 | int need_tlb_flush = 0; | 754 | int need_tlb_flush = 0; |
| @@ -763,8 +763,45 @@ static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp) | |||
| 763 | return need_tlb_flush; | 763 | return need_tlb_flush; |
| 764 | } | 764 | } |
| 765 | 765 | ||
| 766 | static int kvm_handle_hva(struct kvm *kvm, unsigned long hva, | 766 | static int kvm_set_pte_rmapp(struct kvm *kvm, unsigned long *rmapp, u64 data) |
| 767 | int (*handler)(struct kvm *kvm, unsigned long *rmapp)) | 767 | { |
| 768 | int need_flush = 0; | ||
| 769 | u64 *spte, new_spte; | ||
| 770 | pte_t *ptep = (pte_t *)data; | ||
| 771 | pfn_t new_pfn; | ||
| 772 | |||
| 773 | WARN_ON(pte_huge(*ptep)); | ||
| 774 | new_pfn = pte_pfn(*ptep); | ||
| 775 | spte = rmap_next(kvm, rmapp, NULL); | ||
| 776 | while (spte) { | ||
| 777 | BUG_ON(!is_shadow_present_pte(*spte)); | ||
| 778 | rmap_printk("kvm_set_pte_rmapp: spte %p %llx\n", spte, *spte); | ||
| 779 | need_flush = 1; | ||
| 780 | if (pte_write(*ptep)) { | ||
| 781 | rmap_remove(kvm, spte); | ||
| 782 | __set_spte(spte, shadow_trap_nonpresent_pte); | ||
| 783 | spte = rmap_next(kvm, rmapp, NULL); | ||
| 784 | } else { | ||
| 785 | new_spte = *spte &~ (PT64_BASE_ADDR_MASK); | ||
| 786 | new_spte |= (u64)new_pfn << PAGE_SHIFT; | ||
| 787 | |||
| 788 | new_spte &= ~PT_WRITABLE_MASK; | ||
| 789 | new_spte &= ~SPTE_HOST_WRITEABLE; | ||
| 790 | if (is_writeble_pte(*spte)) | ||
| 791 | kvm_set_pfn_dirty(spte_to_pfn(*spte)); | ||
| 792 | __set_spte(spte, new_spte); | ||
| 793 | spte = rmap_next(kvm, rmapp, spte); | ||
| 794 | } | ||
| 795 | } | ||
| 796 | if (need_flush) | ||
| 797 | kvm_flush_remote_tlbs(kvm); | ||
| 798 | |||
| 799 | return 0; | ||
| 800 | } | ||
| 801 | |||
| 802 | static int kvm_handle_hva(struct kvm *kvm, unsigned long hva, u64 data, | ||
| 803 | int (*handler)(struct kvm *kvm, unsigned long *rmapp, | ||
| 804 | u64 data)) | ||
| 768 | { | 805 | { |
| 769 | int i, j; | 806 | int i, j; |
| 770 | int retval = 0; | 807 | int retval = 0; |
| @@ -786,13 +823,15 @@ static int kvm_handle_hva(struct kvm *kvm, unsigned long hva, | |||
| 786 | if (hva >= start && hva < end) { | 823 | if (hva >= start && hva < end) { |
| 787 | gfn_t gfn_offset = (hva - start) >> PAGE_SHIFT; | 824 | gfn_t gfn_offset = (hva - start) >> PAGE_SHIFT; |
| 788 | 825 | ||
| 789 | retval |= handler(kvm, &memslot->rmap[gfn_offset]); | 826 | retval |= handler(kvm, &memslot->rmap[gfn_offset], |
| 827 | data); | ||
| 790 | 828 | ||
| 791 | for (j = 0; j < KVM_NR_PAGE_SIZES - 1; ++j) { | 829 | for (j = 0; j < KVM_NR_PAGE_SIZES - 1; ++j) { |
| 792 | int idx = gfn_offset; | 830 | int idx = gfn_offset; |
| 793 | idx /= KVM_PAGES_PER_HPAGE(PT_DIRECTORY_LEVEL + j); | 831 | idx /= KVM_PAGES_PER_HPAGE(PT_DIRECTORY_LEVEL + j); |
| 794 | retval |= handler(kvm, | 832 | retval |= handler(kvm, |
| 795 | &memslot->lpage_info[j][idx].rmap_pde); | 833 | &memslot->lpage_info[j][idx].rmap_pde, |
| 834 | data); | ||
| 796 | } | 835 | } |
| 797 | } | 836 | } |
| 798 | } | 837 | } |
| @@ -802,10 +841,15 @@ static int kvm_handle_hva(struct kvm *kvm, unsigned long hva, | |||
| 802 | 841 | ||
| 803 | int kvm_unmap_hva(struct kvm *kvm, unsigned long hva) | 842 | int kvm_unmap_hva(struct kvm *kvm, unsigned long hva) |
| 804 | { | 843 | { |
| 805 | return kvm_handle_hva(kvm, hva, kvm_unmap_rmapp); | 844 | return kvm_handle_hva(kvm, hva, 0, kvm_unmap_rmapp); |
| 806 | } | 845 | } |
| 807 | 846 | ||
| 808 | static int kvm_age_rmapp(struct kvm *kvm, unsigned long *rmapp) | 847 | void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte) |
| 848 | { | ||
| 849 | kvm_handle_hva(kvm, hva, (u64)&pte, kvm_set_pte_rmapp); | ||
| 850 | } | ||
| 851 | |||
| 852 | static int kvm_age_rmapp(struct kvm *kvm, unsigned long *rmapp, u64 data) | ||
| 809 | { | 853 | { |
| 810 | u64 *spte; | 854 | u64 *spte; |
| 811 | int young = 0; | 855 | int young = 0; |
| @@ -841,13 +885,13 @@ static void rmap_recycle(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn) | |||
| 841 | gfn = unalias_gfn(vcpu->kvm, gfn); | 885 | gfn = unalias_gfn(vcpu->kvm, gfn); |
| 842 | rmapp = gfn_to_rmap(vcpu->kvm, gfn, sp->role.level); | 886 | rmapp = gfn_to_rmap(vcpu->kvm, gfn, sp->role.level); |
| 843 | 887 | ||
| 844 | kvm_unmap_rmapp(vcpu->kvm, rmapp); | 888 | kvm_unmap_rmapp(vcpu->kvm, rmapp, 0); |
| 845 | kvm_flush_remote_tlbs(vcpu->kvm); | 889 | kvm_flush_remote_tlbs(vcpu->kvm); |
| 846 | } | 890 | } |
| 847 | 891 | ||
| 848 | int kvm_age_hva(struct kvm *kvm, unsigned long hva) | 892 | int kvm_age_hva(struct kvm *kvm, unsigned long hva) |
| 849 | { | 893 | { |
| 850 | return kvm_handle_hva(kvm, hva, kvm_age_rmapp); | 894 | return kvm_handle_hva(kvm, hva, 0, kvm_age_rmapp); |
| 851 | } | 895 | } |
| 852 | 896 | ||
| 853 | #ifdef MMU_DEBUG | 897 | #ifdef MMU_DEBUG |
| @@ -1756,7 +1800,7 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep, | |||
| 1756 | unsigned pte_access, int user_fault, | 1800 | unsigned pte_access, int user_fault, |
| 1757 | int write_fault, int dirty, int level, | 1801 | int write_fault, int dirty, int level, |
| 1758 | gfn_t gfn, pfn_t pfn, bool speculative, | 1802 | gfn_t gfn, pfn_t pfn, bool speculative, |
| 1759 | bool can_unsync) | 1803 | bool can_unsync, bool reset_host_protection) |
| 1760 | { | 1804 | { |
| 1761 | u64 spte; | 1805 | u64 spte; |
| 1762 | int ret = 0; | 1806 | int ret = 0; |
| @@ -1783,6 +1827,9 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep, | |||
| 1783 | spte |= kvm_x86_ops->get_mt_mask(vcpu, gfn, | 1827 | spte |= kvm_x86_ops->get_mt_mask(vcpu, gfn, |
| 1784 | kvm_is_mmio_pfn(pfn)); | 1828 | kvm_is_mmio_pfn(pfn)); |
| 1785 | 1829 | ||
| 1830 | if (reset_host_protection) | ||
| 1831 | spte |= SPTE_HOST_WRITEABLE; | ||
| 1832 | |||
| 1786 | spte |= (u64)pfn << PAGE_SHIFT; | 1833 | spte |= (u64)pfn << PAGE_SHIFT; |
| 1787 | 1834 | ||
| 1788 | if ((pte_access & ACC_WRITE_MASK) | 1835 | if ((pte_access & ACC_WRITE_MASK) |
| @@ -1828,7 +1875,8 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep, | |||
| 1828 | unsigned pt_access, unsigned pte_access, | 1875 | unsigned pt_access, unsigned pte_access, |
| 1829 | int user_fault, int write_fault, int dirty, | 1876 | int user_fault, int write_fault, int dirty, |
| 1830 | int *ptwrite, int level, gfn_t gfn, | 1877 | int *ptwrite, int level, gfn_t gfn, |
| 1831 | pfn_t pfn, bool speculative) | 1878 | pfn_t pfn, bool speculative, |
| 1879 | bool reset_host_protection) | ||
| 1832 | { | 1880 | { |
| 1833 | int was_rmapped = 0; | 1881 | int was_rmapped = 0; |
| 1834 | int was_writeble = is_writeble_pte(*sptep); | 1882 | int was_writeble = is_writeble_pte(*sptep); |
| @@ -1860,7 +1908,8 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep, | |||
| 1860 | } | 1908 | } |
| 1861 | 1909 | ||
| 1862 | if (set_spte(vcpu, sptep, pte_access, user_fault, write_fault, | 1910 | if (set_spte(vcpu, sptep, pte_access, user_fault, write_fault, |
| 1863 | dirty, level, gfn, pfn, speculative, true)) { | 1911 | dirty, level, gfn, pfn, speculative, true, |
| 1912 | reset_host_protection)) { | ||
| 1864 | if (write_fault) | 1913 | if (write_fault) |
| 1865 | *ptwrite = 1; | 1914 | *ptwrite = 1; |
| 1866 | kvm_x86_ops->tlb_flush(vcpu); | 1915 | kvm_x86_ops->tlb_flush(vcpu); |
| @@ -1877,8 +1926,7 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep, | |||
| 1877 | page_header_update_slot(vcpu->kvm, sptep, gfn); | 1926 | page_header_update_slot(vcpu->kvm, sptep, gfn); |
| 1878 | if (!was_rmapped) { | 1927 | if (!was_rmapped) { |
| 1879 | rmap_count = rmap_add(vcpu, sptep, gfn); | 1928 | rmap_count = rmap_add(vcpu, sptep, gfn); |
| 1880 | if (!is_rmap_spte(*sptep)) | 1929 | kvm_release_pfn_clean(pfn); |
| 1881 | kvm_release_pfn_clean(pfn); | ||
| 1882 | if (rmap_count > RMAP_RECYCLE_THRESHOLD) | 1930 | if (rmap_count > RMAP_RECYCLE_THRESHOLD) |
| 1883 | rmap_recycle(vcpu, sptep, gfn); | 1931 | rmap_recycle(vcpu, sptep, gfn); |
| 1884 | } else { | 1932 | } else { |
| @@ -1909,7 +1957,7 @@ static int __direct_map(struct kvm_vcpu *vcpu, gpa_t v, int write, | |||
| 1909 | if (iterator.level == level) { | 1957 | if (iterator.level == level) { |
| 1910 | mmu_set_spte(vcpu, iterator.sptep, ACC_ALL, ACC_ALL, | 1958 | mmu_set_spte(vcpu, iterator.sptep, ACC_ALL, ACC_ALL, |
| 1911 | 0, write, 1, &pt_write, | 1959 | 0, write, 1, &pt_write, |
| 1912 | level, gfn, pfn, false); | 1960 | level, gfn, pfn, false, true); |
| 1913 | ++vcpu->stat.pf_fixed; | 1961 | ++vcpu->stat.pf_fixed; |
| 1914 | break; | 1962 | break; |
| 1915 | } | 1963 | } |
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h index d2fec9c12d22..72558f8ff3f5 100644 --- a/arch/x86/kvm/paging_tmpl.h +++ b/arch/x86/kvm/paging_tmpl.h | |||
| @@ -273,9 +273,13 @@ static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *page, | |||
| 273 | if (mmu_notifier_retry(vcpu, vcpu->arch.update_pte.mmu_seq)) | 273 | if (mmu_notifier_retry(vcpu, vcpu->arch.update_pte.mmu_seq)) |
| 274 | return; | 274 | return; |
| 275 | kvm_get_pfn(pfn); | 275 | kvm_get_pfn(pfn); |
| 276 | /* | ||
| 277 | * we call mmu_set_spte() with reset_host_protection = true beacuse that | ||
| 278 | * vcpu->arch.update_pte.pfn was fetched from get_user_pages(write = 1). | ||
| 279 | */ | ||
| 276 | mmu_set_spte(vcpu, spte, page->role.access, pte_access, 0, 0, | 280 | mmu_set_spte(vcpu, spte, page->role.access, pte_access, 0, 0, |
| 277 | gpte & PT_DIRTY_MASK, NULL, PT_PAGE_TABLE_LEVEL, | 281 | gpte & PT_DIRTY_MASK, NULL, PT_PAGE_TABLE_LEVEL, |
| 278 | gpte_to_gfn(gpte), pfn, true); | 282 | gpte_to_gfn(gpte), pfn, true, true); |
| 279 | } | 283 | } |
| 280 | 284 | ||
| 281 | /* | 285 | /* |
| @@ -308,7 +312,7 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr, | |||
| 308 | user_fault, write_fault, | 312 | user_fault, write_fault, |
| 309 | gw->ptes[gw->level-1] & PT_DIRTY_MASK, | 313 | gw->ptes[gw->level-1] & PT_DIRTY_MASK, |
| 310 | ptwrite, level, | 314 | ptwrite, level, |
| 311 | gw->gfn, pfn, false); | 315 | gw->gfn, pfn, false, true); |
| 312 | break; | 316 | break; |
| 313 | } | 317 | } |
| 314 | 318 | ||
| @@ -558,6 +562,7 @@ static void FNAME(prefetch_page)(struct kvm_vcpu *vcpu, | |||
| 558 | static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) | 562 | static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) |
| 559 | { | 563 | { |
| 560 | int i, offset, nr_present; | 564 | int i, offset, nr_present; |
| 565 | bool reset_host_protection; | ||
| 561 | 566 | ||
| 562 | offset = nr_present = 0; | 567 | offset = nr_present = 0; |
| 563 | 568 | ||
| @@ -595,9 +600,16 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) | |||
| 595 | 600 | ||
| 596 | nr_present++; | 601 | nr_present++; |
| 597 | pte_access = sp->role.access & FNAME(gpte_access)(vcpu, gpte); | 602 | pte_access = sp->role.access & FNAME(gpte_access)(vcpu, gpte); |
| 603 | if (!(sp->spt[i] & SPTE_HOST_WRITEABLE)) { | ||
| 604 | pte_access &= ~ACC_WRITE_MASK; | ||
| 605 | reset_host_protection = 0; | ||
| 606 | } else { | ||
| 607 | reset_host_protection = 1; | ||
| 608 | } | ||
| 598 | set_spte(vcpu, &sp->spt[i], pte_access, 0, 0, | 609 | set_spte(vcpu, &sp->spt[i], pte_access, 0, 0, |
| 599 | is_dirty_gpte(gpte), PT_PAGE_TABLE_LEVEL, gfn, | 610 | is_dirty_gpte(gpte), PT_PAGE_TABLE_LEVEL, gfn, |
| 600 | spte_to_pfn(sp->spt[i]), true, false); | 611 | spte_to_pfn(sp->spt[i]), true, false, |
| 612 | reset_host_protection); | ||
| 601 | } | 613 | } |
| 602 | 614 | ||
| 603 | return !nr_present; | 615 | return !nr_present; |
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index 944cc9c04b3c..c17404add91f 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c | |||
| @@ -767,6 +767,8 @@ static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu) | |||
| 767 | rdtscll(tsc_this); | 767 | rdtscll(tsc_this); |
| 768 | delta = vcpu->arch.host_tsc - tsc_this; | 768 | delta = vcpu->arch.host_tsc - tsc_this; |
| 769 | svm->vmcb->control.tsc_offset += delta; | 769 | svm->vmcb->control.tsc_offset += delta; |
| 770 | if (is_nested(svm)) | ||
| 771 | svm->nested.hsave->control.tsc_offset += delta; | ||
| 770 | vcpu->cpu = cpu; | 772 | vcpu->cpu = cpu; |
| 771 | kvm_migrate_timers(vcpu); | 773 | kvm_migrate_timers(vcpu); |
| 772 | svm->asid_generation = 0; | 774 | svm->asid_generation = 0; |
| @@ -2057,10 +2059,14 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 *data) | |||
| 2057 | 2059 | ||
| 2058 | switch (ecx) { | 2060 | switch (ecx) { |
| 2059 | case MSR_IA32_TSC: { | 2061 | case MSR_IA32_TSC: { |
| 2060 | u64 tsc; | 2062 | u64 tsc_offset; |
| 2063 | |||
| 2064 | if (is_nested(svm)) | ||
| 2065 | tsc_offset = svm->nested.hsave->control.tsc_offset; | ||
| 2066 | else | ||
| 2067 | tsc_offset = svm->vmcb->control.tsc_offset; | ||
| 2061 | 2068 | ||
| 2062 | rdtscll(tsc); | 2069 | *data = tsc_offset + native_read_tsc(); |
| 2063 | *data = svm->vmcb->control.tsc_offset + tsc; | ||
| 2064 | break; | 2070 | break; |
| 2065 | } | 2071 | } |
| 2066 | case MSR_K6_STAR: | 2072 | case MSR_K6_STAR: |
| @@ -2146,10 +2152,17 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 data) | |||
| 2146 | 2152 | ||
| 2147 | switch (ecx) { | 2153 | switch (ecx) { |
| 2148 | case MSR_IA32_TSC: { | 2154 | case MSR_IA32_TSC: { |
| 2149 | u64 tsc; | 2155 | u64 tsc_offset = data - native_read_tsc(); |
| 2156 | u64 g_tsc_offset = 0; | ||
| 2157 | |||
| 2158 | if (is_nested(svm)) { | ||
| 2159 | g_tsc_offset = svm->vmcb->control.tsc_offset - | ||
| 2160 | svm->nested.hsave->control.tsc_offset; | ||
| 2161 | svm->nested.hsave->control.tsc_offset = tsc_offset; | ||
| 2162 | } | ||
| 2163 | |||
| 2164 | svm->vmcb->control.tsc_offset = tsc_offset + g_tsc_offset; | ||
| 2150 | 2165 | ||
| 2151 | rdtscll(tsc); | ||
| 2152 | svm->vmcb->control.tsc_offset = data - tsc; | ||
| 2153 | break; | 2166 | break; |
| 2154 | } | 2167 | } |
| 2155 | case MSR_K6_STAR: | 2168 | case MSR_K6_STAR: |
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index f3812014bd0b..ed53b42caba1 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c | |||
| @@ -709,7 +709,7 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu) | |||
| 709 | if (vcpu->cpu != cpu) { | 709 | if (vcpu->cpu != cpu) { |
| 710 | vcpu_clear(vmx); | 710 | vcpu_clear(vmx); |
| 711 | kvm_migrate_timers(vcpu); | 711 | kvm_migrate_timers(vcpu); |
| 712 | vpid_sync_vcpu_all(vmx); | 712 | set_bit(KVM_REQ_TLB_FLUSH, &vcpu->requests); |
| 713 | local_irq_disable(); | 713 | local_irq_disable(); |
| 714 | list_add(&vmx->local_vcpus_link, | 714 | list_add(&vmx->local_vcpus_link, |
| 715 | &per_cpu(vcpus_on_cpu, cpu)); | 715 | &per_cpu(vcpus_on_cpu, cpu)); |
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index be451ee44249..9b9695322f56 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c | |||
| @@ -1591,6 +1591,8 @@ static int kvm_dev_ioctl_get_supported_cpuid(struct kvm_cpuid2 *cpuid, | |||
| 1591 | 1591 | ||
| 1592 | if (cpuid->nent < 1) | 1592 | if (cpuid->nent < 1) |
| 1593 | goto out; | 1593 | goto out; |
| 1594 | if (cpuid->nent > KVM_MAX_CPUID_ENTRIES) | ||
| 1595 | cpuid->nent = KVM_MAX_CPUID_ENTRIES; | ||
| 1594 | r = -ENOMEM; | 1596 | r = -ENOMEM; |
| 1595 | cpuid_entries = vmalloc(sizeof(struct kvm_cpuid_entry2) * cpuid->nent); | 1597 | cpuid_entries = vmalloc(sizeof(struct kvm_cpuid_entry2) * cpuid->nent); |
| 1596 | if (!cpuid_entries) | 1598 | if (!cpuid_entries) |
diff --git a/arch/x86/lib/Makefile b/arch/x86/lib/Makefile index 3e549b8ec8c9..85f5db95c60f 100644 --- a/arch/x86/lib/Makefile +++ b/arch/x86/lib/Makefile | |||
| @@ -15,8 +15,10 @@ ifeq ($(CONFIG_X86_32),y) | |||
| 15 | obj-y += atomic64_32.o | 15 | obj-y += atomic64_32.o |
| 16 | lib-y += checksum_32.o | 16 | lib-y += checksum_32.o |
| 17 | lib-y += strstr_32.o | 17 | lib-y += strstr_32.o |
| 18 | lib-y += semaphore_32.o string_32.o cmpxchg8b_emu.o | 18 | lib-y += semaphore_32.o string_32.o |
| 19 | 19 | ifneq ($(CONFIG_X86_CMPXCHG64),y) | |
| 20 | lib-y += cmpxchg8b_emu.o | ||
| 21 | endif | ||
| 20 | lib-$(CONFIG_X86_USE_3DNOW) += mmx_32.o | 22 | lib-$(CONFIG_X86_USE_3DNOW) += mmx_32.o |
| 21 | else | 23 | else |
| 22 | obj-y += io_64.o iomap_copy_64.o | 24 | obj-y += io_64.o iomap_copy_64.o |
diff --git a/block/blk-barrier.c b/block/blk-barrier.c index 6593ab39cfe9..8873b9b439ff 100644 --- a/block/blk-barrier.c +++ b/block/blk-barrier.c | |||
| @@ -350,6 +350,7 @@ static void blkdev_discard_end_io(struct bio *bio, int err) | |||
| 350 | 350 | ||
| 351 | if (bio->bi_private) | 351 | if (bio->bi_private) |
| 352 | complete(bio->bi_private); | 352 | complete(bio->bi_private); |
| 353 | __free_page(bio_page(bio)); | ||
| 353 | 354 | ||
| 354 | bio_put(bio); | 355 | bio_put(bio); |
| 355 | } | 356 | } |
| @@ -372,30 +373,50 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector, | |||
| 372 | struct request_queue *q = bdev_get_queue(bdev); | 373 | struct request_queue *q = bdev_get_queue(bdev); |
| 373 | int type = flags & DISCARD_FL_BARRIER ? | 374 | int type = flags & DISCARD_FL_BARRIER ? |
| 374 | DISCARD_BARRIER : DISCARD_NOBARRIER; | 375 | DISCARD_BARRIER : DISCARD_NOBARRIER; |
| 376 | struct bio *bio; | ||
| 377 | struct page *page; | ||
| 375 | int ret = 0; | 378 | int ret = 0; |
| 376 | 379 | ||
| 377 | if (!q) | 380 | if (!q) |
| 378 | return -ENXIO; | 381 | return -ENXIO; |
| 379 | 382 | ||
| 380 | if (!q->prepare_discard_fn) | 383 | if (!blk_queue_discard(q)) |
| 381 | return -EOPNOTSUPP; | 384 | return -EOPNOTSUPP; |
| 382 | 385 | ||
| 383 | while (nr_sects && !ret) { | 386 | while (nr_sects && !ret) { |
| 384 | struct bio *bio = bio_alloc(gfp_mask, 0); | 387 | unsigned int sector_size = q->limits.logical_block_size; |
| 385 | if (!bio) | 388 | unsigned int max_discard_sectors = |
| 386 | return -ENOMEM; | 389 | min(q->limits.max_discard_sectors, UINT_MAX >> 9); |
| 387 | 390 | ||
| 391 | bio = bio_alloc(gfp_mask, 1); | ||
| 392 | if (!bio) | ||
| 393 | goto out; | ||
| 394 | bio->bi_sector = sector; | ||
| 388 | bio->bi_end_io = blkdev_discard_end_io; | 395 | bio->bi_end_io = blkdev_discard_end_io; |
| 389 | bio->bi_bdev = bdev; | 396 | bio->bi_bdev = bdev; |
| 390 | if (flags & DISCARD_FL_WAIT) | 397 | if (flags & DISCARD_FL_WAIT) |
| 391 | bio->bi_private = &wait; | 398 | bio->bi_private = &wait; |
| 392 | 399 | ||
| 393 | bio->bi_sector = sector; | 400 | /* |
| 401 | * Add a zeroed one-sector payload as that's what | ||
| 402 | * our current implementations need. If we'll ever need | ||
| 403 | * more the interface will need revisiting. | ||
| 404 | */ | ||
| 405 | page = alloc_page(GFP_KERNEL | __GFP_ZERO); | ||
| 406 | if (!page) | ||
| 407 | goto out_free_bio; | ||
| 408 | if (bio_add_pc_page(q, bio, page, sector_size, 0) < sector_size) | ||
| 409 | goto out_free_page; | ||
| 394 | 410 | ||
| 395 | if (nr_sects > queue_max_hw_sectors(q)) { | 411 | /* |
| 396 | bio->bi_size = queue_max_hw_sectors(q) << 9; | 412 | * And override the bio size - the way discard works we |
| 397 | nr_sects -= queue_max_hw_sectors(q); | 413 | * touch many more blocks on disk than the actual payload |
| 398 | sector += queue_max_hw_sectors(q); | 414 | * length. |
| 415 | */ | ||
| 416 | if (nr_sects > max_discard_sectors) { | ||
| 417 | bio->bi_size = max_discard_sectors << 9; | ||
| 418 | nr_sects -= max_discard_sectors; | ||
| 419 | sector += max_discard_sectors; | ||
| 399 | } else { | 420 | } else { |
| 400 | bio->bi_size = nr_sects << 9; | 421 | bio->bi_size = nr_sects << 9; |
| 401 | nr_sects = 0; | 422 | nr_sects = 0; |
| @@ -414,5 +435,11 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector, | |||
| 414 | bio_put(bio); | 435 | bio_put(bio); |
| 415 | } | 436 | } |
| 416 | return ret; | 437 | return ret; |
| 438 | out_free_page: | ||
| 439 | __free_page(page); | ||
| 440 | out_free_bio: | ||
| 441 | bio_put(bio); | ||
| 442 | out: | ||
| 443 | return -ENOMEM; | ||
| 417 | } | 444 | } |
| 418 | EXPORT_SYMBOL(blkdev_issue_discard); | 445 | EXPORT_SYMBOL(blkdev_issue_discard); |
diff --git a/block/blk-core.c b/block/blk-core.c index 8135228e4b29..81f34311659a 100644 --- a/block/blk-core.c +++ b/block/blk-core.c | |||
| @@ -34,6 +34,7 @@ | |||
| 34 | #include "blk.h" | 34 | #include "blk.h" |
| 35 | 35 | ||
| 36 | EXPORT_TRACEPOINT_SYMBOL_GPL(block_remap); | 36 | EXPORT_TRACEPOINT_SYMBOL_GPL(block_remap); |
| 37 | EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_remap); | ||
| 37 | EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_complete); | 38 | EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_complete); |
| 38 | 39 | ||
| 39 | static int __make_request(struct request_queue *q, struct bio *bio); | 40 | static int __make_request(struct request_queue *q, struct bio *bio); |
| @@ -69,7 +70,7 @@ static void drive_stat_acct(struct request *rq, int new_io) | |||
| 69 | part_stat_inc(cpu, part, merges[rw]); | 70 | part_stat_inc(cpu, part, merges[rw]); |
| 70 | else { | 71 | else { |
| 71 | part_round_stats(cpu, part); | 72 | part_round_stats(cpu, part); |
| 72 | part_inc_in_flight(part, rw); | 73 | part_inc_in_flight(part); |
| 73 | } | 74 | } |
| 74 | 75 | ||
| 75 | part_stat_unlock(); | 76 | part_stat_unlock(); |
| @@ -1031,7 +1032,7 @@ static void part_round_stats_single(int cpu, struct hd_struct *part, | |||
| 1031 | 1032 | ||
| 1032 | if (part->in_flight) { | 1033 | if (part->in_flight) { |
| 1033 | __part_stat_add(cpu, part, time_in_queue, | 1034 | __part_stat_add(cpu, part, time_in_queue, |
| 1034 | part_in_flight(part) * (now - part->stamp)); | 1035 | part->in_flight * (now - part->stamp)); |
| 1035 | __part_stat_add(cpu, part, io_ticks, (now - part->stamp)); | 1036 | __part_stat_add(cpu, part, io_ticks, (now - part->stamp)); |
| 1036 | } | 1037 | } |
| 1037 | part->stamp = now; | 1038 | part->stamp = now; |
| @@ -1124,7 +1125,6 @@ void init_request_from_bio(struct request *req, struct bio *bio) | |||
| 1124 | req->cmd_flags |= REQ_DISCARD; | 1125 | req->cmd_flags |= REQ_DISCARD; |
| 1125 | if (bio_rw_flagged(bio, BIO_RW_BARRIER)) | 1126 | if (bio_rw_flagged(bio, BIO_RW_BARRIER)) |
| 1126 | req->cmd_flags |= REQ_SOFTBARRIER; | 1127 | req->cmd_flags |= REQ_SOFTBARRIER; |
| 1127 | req->q->prepare_discard_fn(req->q, req); | ||
| 1128 | } else if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER))) | 1128 | } else if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER))) |
| 1129 | req->cmd_flags |= REQ_HARDBARRIER; | 1129 | req->cmd_flags |= REQ_HARDBARRIER; |
| 1130 | 1130 | ||
| @@ -1437,7 +1437,8 @@ static inline void __generic_make_request(struct bio *bio) | |||
| 1437 | goto end_io; | 1437 | goto end_io; |
| 1438 | } | 1438 | } |
| 1439 | 1439 | ||
| 1440 | if (unlikely(nr_sectors > queue_max_hw_sectors(q))) { | 1440 | if (unlikely(!bio_rw_flagged(bio, BIO_RW_DISCARD) && |
| 1441 | nr_sectors > queue_max_hw_sectors(q))) { | ||
| 1441 | printk(KERN_ERR "bio too big device %s (%u > %u)\n", | 1442 | printk(KERN_ERR "bio too big device %s (%u > %u)\n", |
| 1442 | bdevname(bio->bi_bdev, b), | 1443 | bdevname(bio->bi_bdev, b), |
| 1443 | bio_sectors(bio), | 1444 | bio_sectors(bio), |
| @@ -1470,7 +1471,7 @@ static inline void __generic_make_request(struct bio *bio) | |||
| 1470 | goto end_io; | 1471 | goto end_io; |
| 1471 | 1472 | ||
| 1472 | if (bio_rw_flagged(bio, BIO_RW_DISCARD) && | 1473 | if (bio_rw_flagged(bio, BIO_RW_DISCARD) && |
| 1473 | !q->prepare_discard_fn) { | 1474 | !blk_queue_discard(q)) { |
| 1474 | err = -EOPNOTSUPP; | 1475 | err = -EOPNOTSUPP; |
| 1475 | goto end_io; | 1476 | goto end_io; |
| 1476 | } | 1477 | } |
| @@ -1738,7 +1739,7 @@ static void blk_account_io_done(struct request *req) | |||
| 1738 | part_stat_inc(cpu, part, ios[rw]); | 1739 | part_stat_inc(cpu, part, ios[rw]); |
| 1739 | part_stat_add(cpu, part, ticks[rw], duration); | 1740 | part_stat_add(cpu, part, ticks[rw], duration); |
| 1740 | part_round_stats(cpu, part); | 1741 | part_round_stats(cpu, part); |
| 1741 | part_dec_in_flight(part, rw); | 1742 | part_dec_in_flight(part); |
| 1742 | 1743 | ||
| 1743 | part_stat_unlock(); | 1744 | part_stat_unlock(); |
| 1744 | } | 1745 | } |
| @@ -2491,6 +2492,14 @@ int kblockd_schedule_work(struct request_queue *q, struct work_struct *work) | |||
| 2491 | } | 2492 | } |
| 2492 | EXPORT_SYMBOL(kblockd_schedule_work); | 2493 | EXPORT_SYMBOL(kblockd_schedule_work); |
| 2493 | 2494 | ||
| 2495 | int kblockd_schedule_delayed_work(struct request_queue *q, | ||
| 2496 | struct delayed_work *work, | ||
| 2497 | unsigned long delay) | ||
| 2498 | { | ||
| 2499 | return queue_delayed_work(kblockd_workqueue, work, delay); | ||
| 2500 | } | ||
| 2501 | EXPORT_SYMBOL(kblockd_schedule_delayed_work); | ||
| 2502 | |||
| 2494 | int __init blk_dev_init(void) | 2503 | int __init blk_dev_init(void) |
| 2495 | { | 2504 | { |
| 2496 | BUILD_BUG_ON(__REQ_NR_BITS > 8 * | 2505 | BUILD_BUG_ON(__REQ_NR_BITS > 8 * |
diff --git a/block/blk-merge.c b/block/blk-merge.c index 99cb5cf1f447..b0de8574fdc8 100644 --- a/block/blk-merge.c +++ b/block/blk-merge.c | |||
| @@ -351,7 +351,7 @@ static void blk_account_io_merge(struct request *req) | |||
| 351 | part = disk_map_sector_rcu(req->rq_disk, blk_rq_pos(req)); | 351 | part = disk_map_sector_rcu(req->rq_disk, blk_rq_pos(req)); |
| 352 | 352 | ||
| 353 | part_round_stats(cpu, part); | 353 | part_round_stats(cpu, part); |
| 354 | part_dec_in_flight(part, rq_data_dir(req)); | 354 | part_dec_in_flight(part); |
| 355 | 355 | ||
| 356 | part_stat_unlock(); | 356 | part_stat_unlock(); |
| 357 | } | 357 | } |
diff --git a/block/blk-settings.c b/block/blk-settings.c index 83413ff83739..e0695bca7027 100644 --- a/block/blk-settings.c +++ b/block/blk-settings.c | |||
| @@ -34,23 +34,6 @@ void blk_queue_prep_rq(struct request_queue *q, prep_rq_fn *pfn) | |||
| 34 | EXPORT_SYMBOL(blk_queue_prep_rq); | 34 | EXPORT_SYMBOL(blk_queue_prep_rq); |
| 35 | 35 | ||
| 36 | /** | 36 | /** |
| 37 | * blk_queue_set_discard - set a discard_sectors function for queue | ||
| 38 | * @q: queue | ||
| 39 | * @dfn: prepare_discard function | ||
| 40 | * | ||
| 41 | * It's possible for a queue to register a discard callback which is used | ||
| 42 | * to transform a discard request into the appropriate type for the | ||
| 43 | * hardware. If none is registered, then discard requests are failed | ||
| 44 | * with %EOPNOTSUPP. | ||
| 45 | * | ||
| 46 | */ | ||
| 47 | void blk_queue_set_discard(struct request_queue *q, prepare_discard_fn *dfn) | ||
| 48 | { | ||
| 49 | q->prepare_discard_fn = dfn; | ||
| 50 | } | ||
| 51 | EXPORT_SYMBOL(blk_queue_set_discard); | ||
| 52 | |||
| 53 | /** | ||
| 54 | * blk_queue_merge_bvec - set a merge_bvec function for queue | 37 | * blk_queue_merge_bvec - set a merge_bvec function for queue |
| 55 | * @q: queue | 38 | * @q: queue |
| 56 | * @mbfn: merge_bvec_fn | 39 | * @mbfn: merge_bvec_fn |
| @@ -111,7 +94,9 @@ void blk_set_default_limits(struct queue_limits *lim) | |||
| 111 | lim->max_hw_segments = MAX_HW_SEGMENTS; | 94 | lim->max_hw_segments = MAX_HW_SEGMENTS; |
| 112 | lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK; | 95 | lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK; |
| 113 | lim->max_segment_size = MAX_SEGMENT_SIZE; | 96 | lim->max_segment_size = MAX_SEGMENT_SIZE; |
| 114 | lim->max_sectors = lim->max_hw_sectors = SAFE_MAX_SECTORS; | 97 | lim->max_sectors = BLK_DEF_MAX_SECTORS; |
| 98 | lim->max_hw_sectors = INT_MAX; | ||
| 99 | lim->max_discard_sectors = SAFE_MAX_SECTORS; | ||
| 115 | lim->logical_block_size = lim->physical_block_size = lim->io_min = 512; | 100 | lim->logical_block_size = lim->physical_block_size = lim->io_min = 512; |
| 116 | lim->bounce_pfn = (unsigned long)(BLK_BOUNCE_ANY >> PAGE_SHIFT); | 101 | lim->bounce_pfn = (unsigned long)(BLK_BOUNCE_ANY >> PAGE_SHIFT); |
| 117 | lim->alignment_offset = 0; | 102 | lim->alignment_offset = 0; |
| @@ -164,6 +149,7 @@ void blk_queue_make_request(struct request_queue *q, make_request_fn *mfn) | |||
| 164 | q->unplug_timer.data = (unsigned long)q; | 149 | q->unplug_timer.data = (unsigned long)q; |
| 165 | 150 | ||
| 166 | blk_set_default_limits(&q->limits); | 151 | blk_set_default_limits(&q->limits); |
| 152 | blk_queue_max_sectors(q, SAFE_MAX_SECTORS); | ||
| 167 | 153 | ||
| 168 | /* | 154 | /* |
| 169 | * If the caller didn't supply a lock, fall back to our embedded | 155 | * If the caller didn't supply a lock, fall back to our embedded |
| @@ -254,6 +240,18 @@ void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_sectors) | |||
| 254 | EXPORT_SYMBOL(blk_queue_max_hw_sectors); | 240 | EXPORT_SYMBOL(blk_queue_max_hw_sectors); |
| 255 | 241 | ||
| 256 | /** | 242 | /** |
| 243 | * blk_queue_max_discard_sectors - set max sectors for a single discard | ||
| 244 | * @q: the request queue for the device | ||
| 245 | * @max_discard: maximum number of sectors to discard | ||
| 246 | **/ | ||
| 247 | void blk_queue_max_discard_sectors(struct request_queue *q, | ||
| 248 | unsigned int max_discard_sectors) | ||
| 249 | { | ||
| 250 | q->limits.max_discard_sectors = max_discard_sectors; | ||
| 251 | } | ||
| 252 | EXPORT_SYMBOL(blk_queue_max_discard_sectors); | ||
| 253 | |||
| 254 | /** | ||
| 257 | * blk_queue_max_phys_segments - set max phys segments for a request for this queue | 255 | * blk_queue_max_phys_segments - set max phys segments for a request for this queue |
| 258 | * @q: the request queue for the device | 256 | * @q: the request queue for the device |
| 259 | * @max_segments: max number of segments | 257 | * @max_segments: max number of segments |
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c index b78c9c3e2670..8a6d81afb284 100644 --- a/block/blk-sysfs.c +++ b/block/blk-sysfs.c | |||
| @@ -452,6 +452,7 @@ int blk_register_queue(struct gendisk *disk) | |||
| 452 | if (ret) { | 452 | if (ret) { |
| 453 | kobject_uevent(&q->kobj, KOBJ_REMOVE); | 453 | kobject_uevent(&q->kobj, KOBJ_REMOVE); |
| 454 | kobject_del(&q->kobj); | 454 | kobject_del(&q->kobj); |
| 455 | blk_trace_remove_sysfs(disk_to_dev(disk)); | ||
| 455 | return ret; | 456 | return ret; |
| 456 | } | 457 | } |
| 457 | 458 | ||
| @@ -465,11 +466,11 @@ void blk_unregister_queue(struct gendisk *disk) | |||
| 465 | if (WARN_ON(!q)) | 466 | if (WARN_ON(!q)) |
| 466 | return; | 467 | return; |
| 467 | 468 | ||
| 468 | if (q->request_fn) { | 469 | if (q->request_fn) |
| 469 | elv_unregister_queue(q); | 470 | elv_unregister_queue(q); |
| 470 | 471 | ||
| 471 | kobject_uevent(&q->kobj, KOBJ_REMOVE); | 472 | kobject_uevent(&q->kobj, KOBJ_REMOVE); |
| 472 | kobject_del(&q->kobj); | 473 | kobject_del(&q->kobj); |
| 473 | kobject_put(&disk_to_dev(disk)->kobj); | 474 | blk_trace_remove_sysfs(disk_to_dev(disk)); |
| 474 | } | 475 | kobject_put(&disk_to_dev(disk)->kobj); |
| 475 | } | 476 | } |
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index 1ca813b16e78..9c4b679908f4 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c | |||
| @@ -150,7 +150,7 @@ struct cfq_data { | |||
| 150 | * idle window management | 150 | * idle window management |
| 151 | */ | 151 | */ |
| 152 | struct timer_list idle_slice_timer; | 152 | struct timer_list idle_slice_timer; |
| 153 | struct work_struct unplug_work; | 153 | struct delayed_work unplug_work; |
| 154 | 154 | ||
| 155 | struct cfq_queue *active_queue; | 155 | struct cfq_queue *active_queue; |
| 156 | struct cfq_io_context *active_cic; | 156 | struct cfq_io_context *active_cic; |
| @@ -173,6 +173,7 @@ struct cfq_data { | |||
| 173 | unsigned int cfq_slice[2]; | 173 | unsigned int cfq_slice[2]; |
| 174 | unsigned int cfq_slice_async_rq; | 174 | unsigned int cfq_slice_async_rq; |
| 175 | unsigned int cfq_slice_idle; | 175 | unsigned int cfq_slice_idle; |
| 176 | unsigned int cfq_latency; | ||
| 176 | 177 | ||
| 177 | struct list_head cic_list; | 178 | struct list_head cic_list; |
| 178 | 179 | ||
| @@ -180,6 +181,8 @@ struct cfq_data { | |||
| 180 | * Fallback dummy cfqq for extreme OOM conditions | 181 | * Fallback dummy cfqq for extreme OOM conditions |
| 181 | */ | 182 | */ |
| 182 | struct cfq_queue oom_cfqq; | 183 | struct cfq_queue oom_cfqq; |
| 184 | |||
| 185 | unsigned long last_end_sync_rq; | ||
| 183 | }; | 186 | }; |
| 184 | 187 | ||
| 185 | enum cfqq_state_flags { | 188 | enum cfqq_state_flags { |
| @@ -265,11 +268,13 @@ static inline int cfq_bio_sync(struct bio *bio) | |||
| 265 | * scheduler run of queue, if there are requests pending and no one in the | 268 | * scheduler run of queue, if there are requests pending and no one in the |
| 266 | * driver that will restart queueing | 269 | * driver that will restart queueing |
| 267 | */ | 270 | */ |
| 268 | static inline void cfq_schedule_dispatch(struct cfq_data *cfqd) | 271 | static inline void cfq_schedule_dispatch(struct cfq_data *cfqd, |
| 272 | unsigned long delay) | ||
| 269 | { | 273 | { |
| 270 | if (cfqd->busy_queues) { | 274 | if (cfqd->busy_queues) { |
| 271 | cfq_log(cfqd, "schedule dispatch"); | 275 | cfq_log(cfqd, "schedule dispatch"); |
| 272 | kblockd_schedule_work(cfqd->queue, &cfqd->unplug_work); | 276 | kblockd_schedule_delayed_work(cfqd->queue, &cfqd->unplug_work, |
| 277 | delay); | ||
| 273 | } | 278 | } |
| 274 | } | 279 | } |
| 275 | 280 | ||
| @@ -1326,12 +1331,30 @@ static int cfq_dispatch_requests(struct request_queue *q, int force) | |||
| 1326 | return 0; | 1331 | return 0; |
| 1327 | 1332 | ||
| 1328 | /* | 1333 | /* |
| 1329 | * we are the only queue, allow up to 4 times of 'quantum' | 1334 | * Sole queue user, allow bigger slice |
| 1330 | */ | 1335 | */ |
| 1331 | if (cfqq->dispatched >= 4 * max_dispatch) | 1336 | max_dispatch *= 4; |
| 1332 | return 0; | 1337 | } |
| 1338 | |||
| 1339 | /* | ||
| 1340 | * Async queues must wait a bit before being allowed dispatch. | ||
| 1341 | * We also ramp up the dispatch depth gradually for async IO, | ||
| 1342 | * based on the last sync IO we serviced | ||
| 1343 | */ | ||
| 1344 | if (!cfq_cfqq_sync(cfqq) && cfqd->cfq_latency) { | ||
| 1345 | unsigned long last_sync = jiffies - cfqd->last_end_sync_rq; | ||
| 1346 | unsigned int depth; | ||
| 1347 | |||
| 1348 | depth = last_sync / cfqd->cfq_slice[1]; | ||
| 1349 | if (!depth && !cfqq->dispatched) | ||
| 1350 | depth = 1; | ||
| 1351 | if (depth < max_dispatch) | ||
| 1352 | max_dispatch = depth; | ||
| 1333 | } | 1353 | } |
| 1334 | 1354 | ||
| 1355 | if (cfqq->dispatched >= max_dispatch) | ||
| 1356 | return 0; | ||
| 1357 | |||
| 1335 | /* | 1358 | /* |
| 1336 | * Dispatch a request from this cfqq | 1359 | * Dispatch a request from this cfqq |
| 1337 | */ | 1360 | */ |
| @@ -1376,7 +1399,7 @@ static void cfq_put_queue(struct cfq_queue *cfqq) | |||
| 1376 | 1399 | ||
| 1377 | if (unlikely(cfqd->active_queue == cfqq)) { | 1400 | if (unlikely(cfqd->active_queue == cfqq)) { |
| 1378 | __cfq_slice_expired(cfqd, cfqq, 0); | 1401 | __cfq_slice_expired(cfqd, cfqq, 0); |
| 1379 | cfq_schedule_dispatch(cfqd); | 1402 | cfq_schedule_dispatch(cfqd, 0); |
| 1380 | } | 1403 | } |
| 1381 | 1404 | ||
| 1382 | kmem_cache_free(cfq_pool, cfqq); | 1405 | kmem_cache_free(cfq_pool, cfqq); |
| @@ -1471,7 +1494,7 @@ static void cfq_exit_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq) | |||
| 1471 | { | 1494 | { |
| 1472 | if (unlikely(cfqq == cfqd->active_queue)) { | 1495 | if (unlikely(cfqq == cfqd->active_queue)) { |
| 1473 | __cfq_slice_expired(cfqd, cfqq, 0); | 1496 | __cfq_slice_expired(cfqd, cfqq, 0); |
| 1474 | cfq_schedule_dispatch(cfqd); | 1497 | cfq_schedule_dispatch(cfqd, 0); |
| 1475 | } | 1498 | } |
| 1476 | 1499 | ||
| 1477 | cfq_put_queue(cfqq); | 1500 | cfq_put_queue(cfqq); |
| @@ -1951,7 +1974,7 @@ cfq_update_idle_window(struct cfq_data *cfqd, struct cfq_queue *cfqq, | |||
| 1951 | enable_idle = old_idle = cfq_cfqq_idle_window(cfqq); | 1974 | enable_idle = old_idle = cfq_cfqq_idle_window(cfqq); |
| 1952 | 1975 | ||
| 1953 | if (!atomic_read(&cic->ioc->nr_tasks) || !cfqd->cfq_slice_idle || | 1976 | if (!atomic_read(&cic->ioc->nr_tasks) || !cfqd->cfq_slice_idle || |
| 1954 | (cfqd->hw_tag && CIC_SEEKY(cic))) | 1977 | (!cfqd->cfq_latency && cfqd->hw_tag && CIC_SEEKY(cic))) |
| 1955 | enable_idle = 0; | 1978 | enable_idle = 0; |
| 1956 | else if (sample_valid(cic->ttime_samples)) { | 1979 | else if (sample_valid(cic->ttime_samples)) { |
| 1957 | if (cic->ttime_mean > cfqd->cfq_slice_idle) | 1980 | if (cic->ttime_mean > cfqd->cfq_slice_idle) |
| @@ -2157,8 +2180,10 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq) | |||
| 2157 | if (cfq_cfqq_sync(cfqq)) | 2180 | if (cfq_cfqq_sync(cfqq)) |
| 2158 | cfqd->sync_flight--; | 2181 | cfqd->sync_flight--; |
| 2159 | 2182 | ||
| 2160 | if (sync) | 2183 | if (sync) { |
| 2161 | RQ_CIC(rq)->last_end_request = now; | 2184 | RQ_CIC(rq)->last_end_request = now; |
| 2185 | cfqd->last_end_sync_rq = now; | ||
| 2186 | } | ||
| 2162 | 2187 | ||
| 2163 | /* | 2188 | /* |
| 2164 | * If this is the active queue, check if it needs to be expired, | 2189 | * If this is the active queue, check if it needs to be expired, |
| @@ -2186,7 +2211,7 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq) | |||
| 2186 | } | 2211 | } |
| 2187 | 2212 | ||
| 2188 | if (!rq_in_driver(cfqd)) | 2213 | if (!rq_in_driver(cfqd)) |
| 2189 | cfq_schedule_dispatch(cfqd); | 2214 | cfq_schedule_dispatch(cfqd, 0); |
| 2190 | } | 2215 | } |
| 2191 | 2216 | ||
| 2192 | /* | 2217 | /* |
| @@ -2316,7 +2341,7 @@ queue_fail: | |||
| 2316 | if (cic) | 2341 | if (cic) |
| 2317 | put_io_context(cic->ioc); | 2342 | put_io_context(cic->ioc); |
| 2318 | 2343 | ||
| 2319 | cfq_schedule_dispatch(cfqd); | 2344 | cfq_schedule_dispatch(cfqd, 0); |
| 2320 | spin_unlock_irqrestore(q->queue_lock, flags); | 2345 | spin_unlock_irqrestore(q->queue_lock, flags); |
| 2321 | cfq_log(cfqd, "set_request fail"); | 2346 | cfq_log(cfqd, "set_request fail"); |
| 2322 | return 1; | 2347 | return 1; |
| @@ -2325,7 +2350,7 @@ queue_fail: | |||
| 2325 | static void cfq_kick_queue(struct work_struct *work) | 2350 | static void cfq_kick_queue(struct work_struct *work) |
| 2326 | { | 2351 | { |
| 2327 | struct cfq_data *cfqd = | 2352 | struct cfq_data *cfqd = |
| 2328 | container_of(work, struct cfq_data, unplug_work); | 2353 | container_of(work, struct cfq_data, unplug_work.work); |
| 2329 | struct request_queue *q = cfqd->queue; | 2354 | struct request_queue *q = cfqd->queue; |
| 2330 | 2355 | ||
| 2331 | spin_lock_irq(q->queue_lock); | 2356 | spin_lock_irq(q->queue_lock); |
| @@ -2379,7 +2404,7 @@ static void cfq_idle_slice_timer(unsigned long data) | |||
| 2379 | expire: | 2404 | expire: |
| 2380 | cfq_slice_expired(cfqd, timed_out); | 2405 | cfq_slice_expired(cfqd, timed_out); |
| 2381 | out_kick: | 2406 | out_kick: |
| 2382 | cfq_schedule_dispatch(cfqd); | 2407 | cfq_schedule_dispatch(cfqd, 0); |
| 2383 | out_cont: | 2408 | out_cont: |
| 2384 | spin_unlock_irqrestore(cfqd->queue->queue_lock, flags); | 2409 | spin_unlock_irqrestore(cfqd->queue->queue_lock, flags); |
| 2385 | } | 2410 | } |
| @@ -2387,7 +2412,7 @@ out_cont: | |||
| 2387 | static void cfq_shutdown_timer_wq(struct cfq_data *cfqd) | 2412 | static void cfq_shutdown_timer_wq(struct cfq_data *cfqd) |
| 2388 | { | 2413 | { |
| 2389 | del_timer_sync(&cfqd->idle_slice_timer); | 2414 | del_timer_sync(&cfqd->idle_slice_timer); |
| 2390 | cancel_work_sync(&cfqd->unplug_work); | 2415 | cancel_delayed_work_sync(&cfqd->unplug_work); |
| 2391 | } | 2416 | } |
| 2392 | 2417 | ||
| 2393 | static void cfq_put_async_queues(struct cfq_data *cfqd) | 2418 | static void cfq_put_async_queues(struct cfq_data *cfqd) |
| @@ -2469,7 +2494,7 @@ static void *cfq_init_queue(struct request_queue *q) | |||
| 2469 | cfqd->idle_slice_timer.function = cfq_idle_slice_timer; | 2494 | cfqd->idle_slice_timer.function = cfq_idle_slice_timer; |
| 2470 | cfqd->idle_slice_timer.data = (unsigned long) cfqd; | 2495 | cfqd->idle_slice_timer.data = (unsigned long) cfqd; |
| 2471 | 2496 | ||
| 2472 | INIT_WORK(&cfqd->unplug_work, cfq_kick_queue); | 2497 | INIT_DELAYED_WORK(&cfqd->unplug_work, cfq_kick_queue); |
| 2473 | 2498 | ||
| 2474 | cfqd->cfq_quantum = cfq_quantum; | 2499 | cfqd->cfq_quantum = cfq_quantum; |
| 2475 | cfqd->cfq_fifo_expire[0] = cfq_fifo_expire[0]; | 2500 | cfqd->cfq_fifo_expire[0] = cfq_fifo_expire[0]; |
| @@ -2480,8 +2505,9 @@ static void *cfq_init_queue(struct request_queue *q) | |||
| 2480 | cfqd->cfq_slice[1] = cfq_slice_sync; | 2505 | cfqd->cfq_slice[1] = cfq_slice_sync; |
| 2481 | cfqd->cfq_slice_async_rq = cfq_slice_async_rq; | 2506 | cfqd->cfq_slice_async_rq = cfq_slice_async_rq; |
| 2482 | cfqd->cfq_slice_idle = cfq_slice_idle; | 2507 | cfqd->cfq_slice_idle = cfq_slice_idle; |
| 2508 | cfqd->cfq_latency = 1; | ||
| 2483 | cfqd->hw_tag = 1; | 2509 | cfqd->hw_tag = 1; |
| 2484 | 2510 | cfqd->last_end_sync_rq = jiffies; | |
| 2485 | return cfqd; | 2511 | return cfqd; |
| 2486 | } | 2512 | } |
| 2487 | 2513 | ||
| @@ -2549,6 +2575,7 @@ SHOW_FUNCTION(cfq_slice_idle_show, cfqd->cfq_slice_idle, 1); | |||
| 2549 | SHOW_FUNCTION(cfq_slice_sync_show, cfqd->cfq_slice[1], 1); | 2575 | SHOW_FUNCTION(cfq_slice_sync_show, cfqd->cfq_slice[1], 1); |
| 2550 | SHOW_FUNCTION(cfq_slice_async_show, cfqd->cfq_slice[0], 1); | 2576 | SHOW_FUNCTION(cfq_slice_async_show, cfqd->cfq_slice[0], 1); |
| 2551 | SHOW_FUNCTION(cfq_slice_async_rq_show, cfqd->cfq_slice_async_rq, 0); | 2577 | SHOW_FUNCTION(cfq_slice_async_rq_show, cfqd->cfq_slice_async_rq, 0); |
| 2578 | SHOW_FUNCTION(cfq_low_latency_show, cfqd->cfq_latency, 0); | ||
| 2552 | #undef SHOW_FUNCTION | 2579 | #undef SHOW_FUNCTION |
| 2553 | 2580 | ||
| 2554 | #define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \ | 2581 | #define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \ |
| @@ -2580,6 +2607,7 @@ STORE_FUNCTION(cfq_slice_sync_store, &cfqd->cfq_slice[1], 1, UINT_MAX, 1); | |||
| 2580 | STORE_FUNCTION(cfq_slice_async_store, &cfqd->cfq_slice[0], 1, UINT_MAX, 1); | 2607 | STORE_FUNCTION(cfq_slice_async_store, &cfqd->cfq_slice[0], 1, UINT_MAX, 1); |
| 2581 | STORE_FUNCTION(cfq_slice_async_rq_store, &cfqd->cfq_slice_async_rq, 1, | 2608 | STORE_FUNCTION(cfq_slice_async_rq_store, &cfqd->cfq_slice_async_rq, 1, |
| 2582 | UINT_MAX, 0); | 2609 | UINT_MAX, 0); |
| 2610 | STORE_FUNCTION(cfq_low_latency_store, &cfqd->cfq_latency, 0, 1, 0); | ||
| 2583 | #undef STORE_FUNCTION | 2611 | #undef STORE_FUNCTION |
| 2584 | 2612 | ||
| 2585 | #define CFQ_ATTR(name) \ | 2613 | #define CFQ_ATTR(name) \ |
| @@ -2595,6 +2623,7 @@ static struct elv_fs_entry cfq_attrs[] = { | |||
| 2595 | CFQ_ATTR(slice_async), | 2623 | CFQ_ATTR(slice_async), |
| 2596 | CFQ_ATTR(slice_async_rq), | 2624 | CFQ_ATTR(slice_async_rq), |
| 2597 | CFQ_ATTR(slice_idle), | 2625 | CFQ_ATTR(slice_idle), |
| 2626 | CFQ_ATTR(low_latency), | ||
| 2598 | __ATTR_NULL | 2627 | __ATTR_NULL |
| 2599 | }; | 2628 | }; |
| 2600 | 2629 | ||
diff --git a/block/compat_ioctl.c b/block/compat_ioctl.c index 7865a34e0faa..9bd086c1a4d5 100644 --- a/block/compat_ioctl.c +++ b/block/compat_ioctl.c | |||
| @@ -21,6 +21,11 @@ static int compat_put_int(unsigned long arg, int val) | |||
| 21 | return put_user(val, (compat_int_t __user *)compat_ptr(arg)); | 21 | return put_user(val, (compat_int_t __user *)compat_ptr(arg)); |
| 22 | } | 22 | } |
| 23 | 23 | ||
| 24 | static int compat_put_uint(unsigned long arg, unsigned int val) | ||
| 25 | { | ||
| 26 | return put_user(val, (compat_uint_t __user *)compat_ptr(arg)); | ||
| 27 | } | ||
| 28 | |||
| 24 | static int compat_put_long(unsigned long arg, long val) | 29 | static int compat_put_long(unsigned long arg, long val) |
| 25 | { | 30 | { |
| 26 | return put_user(val, (compat_long_t __user *)compat_ptr(arg)); | 31 | return put_user(val, (compat_long_t __user *)compat_ptr(arg)); |
| @@ -734,6 +739,14 @@ long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg) | |||
| 734 | switch (cmd) { | 739 | switch (cmd) { |
| 735 | case HDIO_GETGEO: | 740 | case HDIO_GETGEO: |
| 736 | return compat_hdio_getgeo(disk, bdev, compat_ptr(arg)); | 741 | return compat_hdio_getgeo(disk, bdev, compat_ptr(arg)); |
| 742 | case BLKPBSZGET: | ||
| 743 | return compat_put_uint(arg, bdev_physical_block_size(bdev)); | ||
| 744 | case BLKIOMIN: | ||
| 745 | return compat_put_uint(arg, bdev_io_min(bdev)); | ||
| 746 | case BLKIOOPT: | ||
| 747 | return compat_put_uint(arg, bdev_io_opt(bdev)); | ||
| 748 | case BLKALIGNOFF: | ||
| 749 | return compat_put_int(arg, bdev_alignment_offset(bdev)); | ||
| 737 | case BLKFLSBUF: | 750 | case BLKFLSBUF: |
| 738 | case BLKROSET: | 751 | case BLKROSET: |
| 739 | case BLKDISCARD: | 752 | case BLKDISCARD: |
diff --git a/block/genhd.c b/block/genhd.c index 517e4332cb37..5a0861da324d 100644 --- a/block/genhd.c +++ b/block/genhd.c | |||
| @@ -869,7 +869,6 @@ static DEVICE_ATTR(size, S_IRUGO, part_size_show, NULL); | |||
| 869 | static DEVICE_ATTR(alignment_offset, S_IRUGO, disk_alignment_offset_show, NULL); | 869 | static DEVICE_ATTR(alignment_offset, S_IRUGO, disk_alignment_offset_show, NULL); |
| 870 | static DEVICE_ATTR(capability, S_IRUGO, disk_capability_show, NULL); | 870 | static DEVICE_ATTR(capability, S_IRUGO, disk_capability_show, NULL); |
| 871 | static DEVICE_ATTR(stat, S_IRUGO, part_stat_show, NULL); | 871 | static DEVICE_ATTR(stat, S_IRUGO, part_stat_show, NULL); |
| 872 | static DEVICE_ATTR(inflight, S_IRUGO, part_inflight_show, NULL); | ||
| 873 | #ifdef CONFIG_FAIL_MAKE_REQUEST | 872 | #ifdef CONFIG_FAIL_MAKE_REQUEST |
| 874 | static struct device_attribute dev_attr_fail = | 873 | static struct device_attribute dev_attr_fail = |
| 875 | __ATTR(make-it-fail, S_IRUGO|S_IWUSR, part_fail_show, part_fail_store); | 874 | __ATTR(make-it-fail, S_IRUGO|S_IWUSR, part_fail_show, part_fail_store); |
| @@ -889,7 +888,6 @@ static struct attribute *disk_attrs[] = { | |||
| 889 | &dev_attr_alignment_offset.attr, | 888 | &dev_attr_alignment_offset.attr, |
| 890 | &dev_attr_capability.attr, | 889 | &dev_attr_capability.attr, |
| 891 | &dev_attr_stat.attr, | 890 | &dev_attr_stat.attr, |
| 892 | &dev_attr_inflight.attr, | ||
| 893 | #ifdef CONFIG_FAIL_MAKE_REQUEST | 891 | #ifdef CONFIG_FAIL_MAKE_REQUEST |
| 894 | &dev_attr_fail.attr, | 892 | &dev_attr_fail.attr, |
| 895 | #endif | 893 | #endif |
| @@ -1055,7 +1053,7 @@ static int diskstats_show(struct seq_file *seqf, void *v) | |||
| 1055 | part_stat_read(hd, merges[1]), | 1053 | part_stat_read(hd, merges[1]), |
| 1056 | (unsigned long long)part_stat_read(hd, sectors[1]), | 1054 | (unsigned long long)part_stat_read(hd, sectors[1]), |
| 1057 | jiffies_to_msecs(part_stat_read(hd, ticks[1])), | 1055 | jiffies_to_msecs(part_stat_read(hd, ticks[1])), |
| 1058 | part_in_flight(hd), | 1056 | hd->in_flight, |
| 1059 | jiffies_to_msecs(part_stat_read(hd, io_ticks)), | 1057 | jiffies_to_msecs(part_stat_read(hd, io_ticks)), |
| 1060 | jiffies_to_msecs(part_stat_read(hd, time_in_queue)) | 1058 | jiffies_to_msecs(part_stat_read(hd, time_in_queue)) |
| 1061 | ); | 1059 | ); |
diff --git a/block/ioctl.c b/block/ioctl.c index d3e6b5827a34..1f4d1de12b09 100644 --- a/block/ioctl.c +++ b/block/ioctl.c | |||
| @@ -138,6 +138,11 @@ static int put_int(unsigned long arg, int val) | |||
| 138 | return put_user(val, (int __user *)arg); | 138 | return put_user(val, (int __user *)arg); |
| 139 | } | 139 | } |
| 140 | 140 | ||
| 141 | static int put_uint(unsigned long arg, unsigned int val) | ||
| 142 | { | ||
| 143 | return put_user(val, (unsigned int __user *)arg); | ||
| 144 | } | ||
| 145 | |||
| 141 | static int put_long(unsigned long arg, long val) | 146 | static int put_long(unsigned long arg, long val) |
| 142 | { | 147 | { |
| 143 | return put_user(val, (long __user *)arg); | 148 | return put_user(val, (long __user *)arg); |
| @@ -263,10 +268,18 @@ int blkdev_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd, | |||
| 263 | return put_long(arg, (bdi->ra_pages * PAGE_CACHE_SIZE) / 512); | 268 | return put_long(arg, (bdi->ra_pages * PAGE_CACHE_SIZE) / 512); |
| 264 | case BLKROGET: | 269 | case BLKROGET: |
| 265 | return put_int(arg, bdev_read_only(bdev) != 0); | 270 | return put_int(arg, bdev_read_only(bdev) != 0); |
| 266 | case BLKBSZGET: /* get the logical block size (cf. BLKSSZGET) */ | 271 | case BLKBSZGET: /* get block device soft block size (cf. BLKSSZGET) */ |
| 267 | return put_int(arg, block_size(bdev)); | 272 | return put_int(arg, block_size(bdev)); |
| 268 | case BLKSSZGET: /* get block device hardware sector size */ | 273 | case BLKSSZGET: /* get block device logical block size */ |
| 269 | return put_int(arg, bdev_logical_block_size(bdev)); | 274 | return put_int(arg, bdev_logical_block_size(bdev)); |
| 275 | case BLKPBSZGET: /* get block device physical block size */ | ||
| 276 | return put_uint(arg, bdev_physical_block_size(bdev)); | ||
| 277 | case BLKIOMIN: | ||
| 278 | return put_uint(arg, bdev_io_min(bdev)); | ||
| 279 | case BLKIOOPT: | ||
| 280 | return put_uint(arg, bdev_io_opt(bdev)); | ||
| 281 | case BLKALIGNOFF: | ||
| 282 | return put_int(arg, bdev_alignment_offset(bdev)); | ||
| 270 | case BLKSECTGET: | 283 | case BLKSECTGET: |
| 271 | return put_ushort(arg, queue_max_sectors(bdev_get_queue(bdev))); | 284 | return put_ushort(arg, queue_max_sectors(bdev_get_queue(bdev))); |
| 272 | case BLKRASET: | 285 | case BLKRASET: |
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig index dd8729d674e5..0ed42d8870c7 100644 --- a/drivers/acpi/Kconfig +++ b/drivers/acpi/Kconfig | |||
| @@ -211,6 +211,18 @@ config ACPI_HOTPLUG_CPU | |||
| 211 | select ACPI_CONTAINER | 211 | select ACPI_CONTAINER |
| 212 | default y | 212 | default y |
| 213 | 213 | ||
| 214 | config ACPI_PROCESSOR_AGGREGATOR | ||
| 215 | tristate "Processor Aggregator" | ||
| 216 | depends on ACPI_PROCESSOR | ||
| 217 | depends on EXPERIMENTAL | ||
| 218 | depends on X86 | ||
| 219 | help | ||
| 220 | ACPI 4.0 defines processor Aggregator, which enables OS to perform | ||
| 221 | specfic processor configuration and control that applies to all | ||
| 222 | processors in the platform. Currently only logical processor idling | ||
| 223 | is defined, which is to reduce power consumption. This driver | ||
| 224 | support the new device. | ||
| 225 | |||
| 214 | config ACPI_THERMAL | 226 | config ACPI_THERMAL |
| 215 | tristate "Thermal Zone" | 227 | tristate "Thermal Zone" |
| 216 | depends on ACPI_PROCESSOR | 228 | depends on ACPI_PROCESSOR |
diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile index 82cd49dc603b..7702118509a0 100644 --- a/drivers/acpi/Makefile +++ b/drivers/acpi/Makefile | |||
| @@ -62,3 +62,5 @@ obj-$(CONFIG_ACPI_POWER_METER) += power_meter.o | |||
| 62 | processor-y := processor_core.o processor_throttling.o | 62 | processor-y := processor_core.o processor_throttling.o |
| 63 | processor-y += processor_idle.o processor_thermal.o | 63 | processor-y += processor_idle.o processor_thermal.o |
| 64 | processor-$(CONFIG_CPU_FREQ) += processor_perflib.o | 64 | processor-$(CONFIG_CPU_FREQ) += processor_perflib.o |
| 65 | |||
| 66 | obj-$(CONFIG_ACPI_PROCESSOR_AGGREGATOR) += acpi_pad.o | ||
diff --git a/drivers/acpi/acpi_pad.c b/drivers/acpi/acpi_pad.c new file mode 100644 index 000000000000..0d2cdb86158b --- /dev/null +++ b/drivers/acpi/acpi_pad.c | |||
| @@ -0,0 +1,514 @@ | |||
| 1 | /* | ||
| 2 | * acpi_pad.c ACPI Processor Aggregator Driver | ||
| 3 | * | ||
| 4 | * Copyright (c) 2009, Intel Corporation. | ||
| 5 | * | ||
| 6 | * This program is free software; you can redistribute it and/or modify it | ||
| 7 | * under the terms and conditions of the GNU General Public License, | ||
| 8 | * version 2, as published by the Free Software Foundation. | ||
| 9 | * | ||
| 10 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
| 11 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
| 12 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
| 13 | * more details. | ||
| 14 | * | ||
| 15 | * You should have received a copy of the GNU General Public License along with | ||
| 16 | * this program; if not, write to the Free Software Foundation, Inc., | ||
| 17 | * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. | ||
| 18 | * | ||
| 19 | */ | ||
| 20 | |||
| 21 | #include <linux/kernel.h> | ||
| 22 | #include <linux/cpumask.h> | ||
| 23 | #include <linux/module.h> | ||
| 24 | #include <linux/init.h> | ||
| 25 | #include <linux/types.h> | ||
| 26 | #include <linux/kthread.h> | ||
| 27 | #include <linux/freezer.h> | ||
| 28 | #include <linux/cpu.h> | ||
| 29 | #include <linux/clockchips.h> | ||
| 30 | #include <acpi/acpi_bus.h> | ||
| 31 | #include <acpi/acpi_drivers.h> | ||
| 32 | |||
| 33 | #define ACPI_PROCESSOR_AGGREGATOR_CLASS "processor_aggregator" | ||
| 34 | #define ACPI_PROCESSOR_AGGREGATOR_DEVICE_NAME "Processor Aggregator" | ||
| 35 | #define ACPI_PROCESSOR_AGGREGATOR_NOTIFY 0x80 | ||
| 36 | static DEFINE_MUTEX(isolated_cpus_lock); | ||
| 37 | |||
| 38 | #define MWAIT_SUBSTATE_MASK (0xf) | ||
| 39 | #define MWAIT_CSTATE_MASK (0xf) | ||
| 40 | #define MWAIT_SUBSTATE_SIZE (4) | ||
| 41 | #define CPUID_MWAIT_LEAF (5) | ||
| 42 | #define CPUID5_ECX_EXTENSIONS_SUPPORTED (0x1) | ||
| 43 | #define CPUID5_ECX_INTERRUPT_BREAK (0x2) | ||
| 44 | static unsigned long power_saving_mwait_eax; | ||
| 45 | static void power_saving_mwait_init(void) | ||
| 46 | { | ||
| 47 | unsigned int eax, ebx, ecx, edx; | ||
| 48 | unsigned int highest_cstate = 0; | ||
| 49 | unsigned int highest_subcstate = 0; | ||
| 50 | int i; | ||
| 51 | |||
| 52 | if (!boot_cpu_has(X86_FEATURE_MWAIT)) | ||
| 53 | return; | ||
| 54 | if (boot_cpu_data.cpuid_level < CPUID_MWAIT_LEAF) | ||
| 55 | return; | ||
| 56 | |||
| 57 | cpuid(CPUID_MWAIT_LEAF, &eax, &ebx, &ecx, &edx); | ||
| 58 | |||
| 59 | if (!(ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED) || | ||
| 60 | !(ecx & CPUID5_ECX_INTERRUPT_BREAK)) | ||
| 61 | return; | ||
| 62 | |||
| 63 | edx >>= MWAIT_SUBSTATE_SIZE; | ||
| 64 | for (i = 0; i < 7 && edx; i++, edx >>= MWAIT_SUBSTATE_SIZE) { | ||
| 65 | if (edx & MWAIT_SUBSTATE_MASK) { | ||
| 66 | highest_cstate = i; | ||
| 67 | highest_subcstate = edx & MWAIT_SUBSTATE_MASK; | ||
| 68 | } | ||
| 69 | } | ||
| 70 | power_saving_mwait_eax = (highest_cstate << MWAIT_SUBSTATE_SIZE) | | ||
| 71 | (highest_subcstate - 1); | ||
| 72 | |||
| 73 | for_each_online_cpu(i) | ||
| 74 | clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ON, &i); | ||
| 75 | |||
| 76 | #if defined(CONFIG_GENERIC_TIME) && defined(CONFIG_X86) | ||
| 77 | switch (boot_cpu_data.x86_vendor) { | ||
| 78 | case X86_VENDOR_AMD: | ||
| 79 | case X86_VENDOR_INTEL: | ||
| 80 | /* | ||
| 81 | * AMD Fam10h TSC will tick in all | ||
| 82 | * C/P/S0/S1 states when this bit is set. | ||
| 83 | */ | ||
| 84 | if (boot_cpu_has(X86_FEATURE_NONSTOP_TSC)) | ||
| 85 | return; | ||
| 86 | |||
| 87 | /*FALL THROUGH*/ | ||
| 88 | default: | ||
| 89 | /* TSC could halt in idle, so notify users */ | ||
| 90 | mark_tsc_unstable("TSC halts in idle"); | ||
| 91 | } | ||
| 92 | #endif | ||
| 93 | } | ||
| 94 | |||
| 95 | static unsigned long cpu_weight[NR_CPUS]; | ||
| 96 | static int tsk_in_cpu[NR_CPUS] = {[0 ... NR_CPUS-1] = -1}; | ||
| 97 | static DECLARE_BITMAP(pad_busy_cpus_bits, NR_CPUS); | ||
| 98 | static void round_robin_cpu(unsigned int tsk_index) | ||
| 99 | { | ||
| 100 | struct cpumask *pad_busy_cpus = to_cpumask(pad_busy_cpus_bits); | ||
| 101 | cpumask_var_t tmp; | ||
| 102 | int cpu; | ||
| 103 | unsigned long min_weight = -1, preferred_cpu; | ||
| 104 | |||
| 105 | if (!alloc_cpumask_var(&tmp, GFP_KERNEL)) | ||
| 106 | return; | ||
| 107 | |||
| 108 | mutex_lock(&isolated_cpus_lock); | ||
| 109 | cpumask_clear(tmp); | ||
| 110 | for_each_cpu(cpu, pad_busy_cpus) | ||
| 111 | cpumask_or(tmp, tmp, topology_thread_cpumask(cpu)); | ||
| 112 | cpumask_andnot(tmp, cpu_online_mask, tmp); | ||
| 113 | /* avoid HT sibilings if possible */ | ||
| 114 | if (cpumask_empty(tmp)) | ||
| 115 | cpumask_andnot(tmp, cpu_online_mask, pad_busy_cpus); | ||
| 116 | if (cpumask_empty(tmp)) { | ||
| 117 | mutex_unlock(&isolated_cpus_lock); | ||
| 118 | return; | ||
| 119 | } | ||
| 120 | for_each_cpu(cpu, tmp) { | ||
| 121 | if (cpu_weight[cpu] < min_weight) { | ||
| 122 | min_weight = cpu_weight[cpu]; | ||
| 123 | preferred_cpu = cpu; | ||
| 124 | } | ||
| 125 | } | ||
| 126 | |||
| 127 | if (tsk_in_cpu[tsk_index] != -1) | ||
| 128 | cpumask_clear_cpu(tsk_in_cpu[tsk_index], pad_busy_cpus); | ||
| 129 | tsk_in_cpu[tsk_index] = preferred_cpu; | ||
| 130 | cpumask_set_cpu(preferred_cpu, pad_busy_cpus); | ||
| 131 | cpu_weight[preferred_cpu]++; | ||
| 132 | mutex_unlock(&isolated_cpus_lock); | ||
| 133 | |||
| 134 | set_cpus_allowed_ptr(current, cpumask_of(preferred_cpu)); | ||
| 135 | } | ||
| 136 | |||
| 137 | static void exit_round_robin(unsigned int tsk_index) | ||
| 138 | { | ||
| 139 | struct cpumask *pad_busy_cpus = to_cpumask(pad_busy_cpus_bits); | ||
| 140 | cpumask_clear_cpu(tsk_in_cpu[tsk_index], pad_busy_cpus); | ||
| 141 | tsk_in_cpu[tsk_index] = -1; | ||
| 142 | } | ||
| 143 | |||
| 144 | static unsigned int idle_pct = 5; /* percentage */ | ||
| 145 | static unsigned int round_robin_time = 10; /* second */ | ||
| 146 | static int power_saving_thread(void *data) | ||
| 147 | { | ||
| 148 | struct sched_param param = {.sched_priority = 1}; | ||
| 149 | int do_sleep; | ||
| 150 | unsigned int tsk_index = (unsigned long)data; | ||
| 151 | u64 last_jiffies = 0; | ||
| 152 | |||
| 153 | sched_setscheduler(current, SCHED_RR, ¶m); | ||
| 154 | |||
| 155 | while (!kthread_should_stop()) { | ||
| 156 | int cpu; | ||
| 157 | u64 expire_time; | ||
| 158 | |||
| 159 | try_to_freeze(); | ||
| 160 | |||
| 161 | /* round robin to cpus */ | ||
| 162 | if (last_jiffies + round_robin_time * HZ < jiffies) { | ||
| 163 | last_jiffies = jiffies; | ||
| 164 | round_robin_cpu(tsk_index); | ||
| 165 | } | ||
| 166 | |||
| 167 | do_sleep = 0; | ||
| 168 | |||
| 169 | current_thread_info()->status &= ~TS_POLLING; | ||
| 170 | /* | ||
| 171 | * TS_POLLING-cleared state must be visible before we test | ||
| 172 | * NEED_RESCHED: | ||
| 173 | */ | ||
| 174 | smp_mb(); | ||
| 175 | |||
| 176 | expire_time = jiffies + HZ * (100 - idle_pct) / 100; | ||
| 177 | |||
| 178 | while (!need_resched()) { | ||
| 179 | local_irq_disable(); | ||
| 180 | cpu = smp_processor_id(); | ||
| 181 | clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, | ||
| 182 | &cpu); | ||
| 183 | stop_critical_timings(); | ||
| 184 | |||
| 185 | __monitor((void *)¤t_thread_info()->flags, 0, 0); | ||
| 186 | smp_mb(); | ||
| 187 | if (!need_resched()) | ||
| 188 | __mwait(power_saving_mwait_eax, 1); | ||
| 189 | |||
| 190 | start_critical_timings(); | ||
| 191 | clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, | ||
| 192 | &cpu); | ||
| 193 | local_irq_enable(); | ||
| 194 | |||
| 195 | if (jiffies > expire_time) { | ||
| 196 | do_sleep = 1; | ||
| 197 | break; | ||
| 198 | } | ||
| 199 | } | ||
| 200 | |||
| 201 | current_thread_info()->status |= TS_POLLING; | ||
| 202 | |||
| 203 | /* | ||
| 204 | * current sched_rt has threshold for rt task running time. | ||
| 205 | * When a rt task uses 95% CPU time, the rt thread will be | ||
| 206 | * scheduled out for 5% CPU time to not starve other tasks. But | ||
| 207 | * the mechanism only works when all CPUs have RT task running, | ||
| 208 | * as if one CPU hasn't RT task, RT task from other CPUs will | ||
| 209 | * borrow CPU time from this CPU and cause RT task use > 95% | ||
| 210 | * CPU time. To make 'avoid staration' work, takes a nap here. | ||
| 211 | */ | ||
| 212 | if (do_sleep) | ||
| 213 | schedule_timeout_killable(HZ * idle_pct / 100); | ||
| 214 | } | ||
| 215 | |||
| 216 | exit_round_robin(tsk_index); | ||
| 217 | return 0; | ||
| 218 | } | ||
| 219 | |||
| 220 | static struct task_struct *ps_tsks[NR_CPUS]; | ||
| 221 | static unsigned int ps_tsk_num; | ||
| 222 | static int create_power_saving_task(void) | ||
| 223 | { | ||
| 224 | ps_tsks[ps_tsk_num] = kthread_run(power_saving_thread, | ||
| 225 | (void *)(unsigned long)ps_tsk_num, | ||
| 226 | "power_saving/%d", ps_tsk_num); | ||
| 227 | if (ps_tsks[ps_tsk_num]) { | ||
| 228 | ps_tsk_num++; | ||
| 229 | return 0; | ||
| 230 | } | ||
| 231 | return -EINVAL; | ||
| 232 | } | ||
| 233 | |||
| 234 | static void destroy_power_saving_task(void) | ||
| 235 | { | ||
| 236 | if (ps_tsk_num > 0) { | ||
| 237 | ps_tsk_num--; | ||
| 238 | kthread_stop(ps_tsks[ps_tsk_num]); | ||
| 239 | } | ||
| 240 | } | ||
| 241 | |||
| 242 | static void set_power_saving_task_num(unsigned int num) | ||
| 243 | { | ||
| 244 | if (num > ps_tsk_num) { | ||
| 245 | while (ps_tsk_num < num) { | ||
| 246 | if (create_power_saving_task()) | ||
| 247 | return; | ||
| 248 | } | ||
| 249 | } else if (num < ps_tsk_num) { | ||
| 250 | while (ps_tsk_num > num) | ||
| 251 | destroy_power_saving_task(); | ||
| 252 | } | ||
| 253 | } | ||
| 254 | |||
| 255 | static int acpi_pad_idle_cpus(unsigned int num_cpus) | ||
| 256 | { | ||
| 257 | get_online_cpus(); | ||
| 258 | |||
| 259 | num_cpus = min_t(unsigned int, num_cpus, num_online_cpus()); | ||
| 260 | set_power_saving_task_num(num_cpus); | ||
| 261 | |||
| 262 | put_online_cpus(); | ||
| 263 | return 0; | ||
| 264 | } | ||
| 265 | |||
| 266 | static uint32_t acpi_pad_idle_cpus_num(void) | ||
| 267 | { | ||
| 268 | return ps_tsk_num; | ||
| 269 | } | ||
| 270 | |||
| 271 | static ssize_t acpi_pad_rrtime_store(struct device *dev, | ||
| 272 | struct device_attribute *attr, const char *buf, size_t count) | ||
| 273 | { | ||
| 274 | unsigned long num; | ||
| 275 | if (strict_strtoul(buf, 0, &num)) | ||
| 276 | return -EINVAL; | ||
| 277 | if (num < 1 || num >= 100) | ||
| 278 | return -EINVAL; | ||
| 279 | mutex_lock(&isolated_cpus_lock); | ||
| 280 | round_robin_time = num; | ||
| 281 | mutex_unlock(&isolated_cpus_lock); | ||
| 282 | return count; | ||
| 283 | } | ||
| 284 | |||
| 285 | static ssize_t acpi_pad_rrtime_show(struct device *dev, | ||
| 286 | struct device_attribute *attr, char *buf) | ||
| 287 | { | ||
| 288 | return scnprintf(buf, PAGE_SIZE, "%d", round_robin_time); | ||
| 289 | } | ||
| 290 | static DEVICE_ATTR(rrtime, S_IRUGO|S_IWUSR, | ||
| 291 | acpi_pad_rrtime_show, | ||
| 292 | acpi_pad_rrtime_store); | ||
| 293 | |||
| 294 | static ssize_t acpi_pad_idlepct_store(struct device *dev, | ||
| 295 | struct device_attribute *attr, const char *buf, size_t count) | ||
| 296 | { | ||
| 297 | unsigned long num; | ||
| 298 | if (strict_strtoul(buf, 0, &num)) | ||
| 299 | return -EINVAL; | ||
| 300 | if (num < 1 || num >= 100) | ||
| 301 | return -EINVAL; | ||
| 302 | mutex_lock(&isolated_cpus_lock); | ||
| 303 | idle_pct = num; | ||
| 304 | mutex_unlock(&isolated_cpus_lock); | ||
| 305 | return count; | ||
| 306 | } | ||
| 307 | |||
| 308 | static ssize_t acpi_pad_idlepct_show(struct device *dev, | ||
| 309 | struct device_attribute *attr, char *buf) | ||
| 310 | { | ||
| 311 | return scnprintf(buf, PAGE_SIZE, "%d", idle_pct); | ||
| 312 | } | ||
| 313 | static DEVICE_ATTR(idlepct, S_IRUGO|S_IWUSR, | ||
| 314 | acpi_pad_idlepct_show, | ||
| 315 | acpi_pad_idlepct_store); | ||
| 316 | |||
| 317 | static ssize_t acpi_pad_idlecpus_store(struct device *dev, | ||
| 318 | struct device_attribute *attr, const char *buf, size_t count) | ||
| 319 | { | ||
| 320 | unsigned long num; | ||
| 321 | if (strict_strtoul(buf, 0, &num)) | ||
| 322 | return -EINVAL; | ||
| 323 | mutex_lock(&isolated_cpus_lock); | ||
| 324 | acpi_pad_idle_cpus(num); | ||
| 325 | mutex_unlock(&isolated_cpus_lock); | ||
| 326 | return count; | ||
| 327 | } | ||
| 328 | |||
| 329 | static ssize_t acpi_pad_idlecpus_show(struct device *dev, | ||
| 330 | struct device_attribute *attr, char *buf) | ||
| 331 | { | ||
| 332 | return cpumask_scnprintf(buf, PAGE_SIZE, | ||
| 333 | to_cpumask(pad_busy_cpus_bits)); | ||
| 334 | } | ||
| 335 | static DEVICE_ATTR(idlecpus, S_IRUGO|S_IWUSR, | ||
| 336 | acpi_pad_idlecpus_show, | ||
| 337 | acpi_pad_idlecpus_store); | ||
| 338 | |||
| 339 | static int acpi_pad_add_sysfs(struct acpi_device *device) | ||
| 340 | { | ||
| 341 | int result; | ||
| 342 | |||
| 343 | result = device_create_file(&device->dev, &dev_attr_idlecpus); | ||
| 344 | if (result) | ||
| 345 | return -ENODEV; | ||
| 346 | result = device_create_file(&device->dev, &dev_attr_idlepct); | ||
| 347 | if (result) { | ||
| 348 | device_remove_file(&device->dev, &dev_attr_idlecpus); | ||
| 349 | return -ENODEV; | ||
| 350 | } | ||
| 351 | result = device_create_file(&device->dev, &dev_attr_rrtime); | ||
| 352 | if (result) { | ||
| 353 | device_remove_file(&device->dev, &dev_attr_idlecpus); | ||
| 354 | device_remove_file(&device->dev, &dev_attr_idlepct); | ||
| 355 | return -ENODEV; | ||
| 356 | } | ||
| 357 | return 0; | ||
| 358 | } | ||
| 359 | |||
| 360 | static void acpi_pad_remove_sysfs(struct acpi_device *device) | ||
| 361 | { | ||
| 362 | device_remove_file(&device->dev, &dev_attr_idlecpus); | ||
| 363 | device_remove_file(&device->dev, &dev_attr_idlepct); | ||
| 364 | device_remove_file(&device->dev, &dev_attr_rrtime); | ||
| 365 | } | ||
| 366 | |||
| 367 | /* Query firmware how many CPUs should be idle */ | ||
| 368 | static int acpi_pad_pur(acpi_handle handle, int *num_cpus) | ||
| 369 | { | ||
| 370 | struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL}; | ||
| 371 | acpi_status status; | ||
| 372 | union acpi_object *package; | ||
| 373 | int rev, num, ret = -EINVAL; | ||
| 374 | |||
| 375 | status = acpi_evaluate_object(handle, "_PUR", NULL, &buffer); | ||
| 376 | if (ACPI_FAILURE(status)) | ||
| 377 | return -EINVAL; | ||
| 378 | package = buffer.pointer; | ||
| 379 | if (package->type != ACPI_TYPE_PACKAGE || package->package.count != 2) | ||
| 380 | goto out; | ||
| 381 | rev = package->package.elements[0].integer.value; | ||
| 382 | num = package->package.elements[1].integer.value; | ||
| 383 | if (rev != 1) | ||
| 384 | goto out; | ||
| 385 | *num_cpus = num; | ||
| 386 | ret = 0; | ||
| 387 | out: | ||
| 388 | kfree(buffer.pointer); | ||
| 389 | return ret; | ||
| 390 | } | ||
| 391 | |||
| 392 | /* Notify firmware how many CPUs are idle */ | ||
| 393 | static void acpi_pad_ost(acpi_handle handle, int stat, | ||
| 394 | uint32_t idle_cpus) | ||
| 395 | { | ||
| 396 | union acpi_object params[3] = { | ||
| 397 | {.type = ACPI_TYPE_INTEGER,}, | ||
| 398 | {.type = ACPI_TYPE_INTEGER,}, | ||
| 399 | {.type = ACPI_TYPE_BUFFER,}, | ||
| 400 | }; | ||
| 401 | struct acpi_object_list arg_list = {3, params}; | ||
| 402 | |||
| 403 | params[0].integer.value = ACPI_PROCESSOR_AGGREGATOR_NOTIFY; | ||
| 404 | params[1].integer.value = stat; | ||
| 405 | params[2].buffer.length = 4; | ||
| 406 | params[2].buffer.pointer = (void *)&idle_cpus; | ||
| 407 | acpi_evaluate_object(handle, "_OST", &arg_list, NULL); | ||
| 408 | } | ||
| 409 | |||
| 410 | static void acpi_pad_handle_notify(acpi_handle handle) | ||
| 411 | { | ||
| 412 | int num_cpus, ret; | ||
| 413 | uint32_t idle_cpus; | ||
| 414 | |||
| 415 | mutex_lock(&isolated_cpus_lock); | ||
| 416 | if (acpi_pad_pur(handle, &num_cpus)) { | ||
| 417 | mutex_unlock(&isolated_cpus_lock); | ||
| 418 | return; | ||
| 419 | } | ||
| 420 | ret = acpi_pad_idle_cpus(num_cpus); | ||
| 421 | idle_cpus = acpi_pad_idle_cpus_num(); | ||
| 422 | if (!ret) | ||
| 423 | acpi_pad_ost(handle, 0, idle_cpus); | ||
| 424 | else | ||
| 425 | acpi_pad_ost(handle, 1, 0); | ||
| 426 | mutex_unlock(&isolated_cpus_lock); | ||
| 427 | } | ||
| 428 | |||
| 429 | static void acpi_pad_notify(acpi_handle handle, u32 event, | ||
| 430 | void *data) | ||
| 431 | { | ||
| 432 | struct acpi_device *device = data; | ||
| 433 | |||
| 434 | switch (event) { | ||
| 435 | case ACPI_PROCESSOR_AGGREGATOR_NOTIFY: | ||
| 436 | acpi_pad_handle_notify(handle); | ||
| 437 | acpi_bus_generate_proc_event(device, event, 0); | ||
| 438 | acpi_bus_generate_netlink_event(device->pnp.device_class, | ||
| 439 | dev_name(&device->dev), event, 0); | ||
| 440 | break; | ||
| 441 | default: | ||
| 442 | printk(KERN_WARNING"Unsupported event [0x%x]\n", event); | ||
| 443 | break; | ||
| 444 | } | ||
| 445 | } | ||
| 446 | |||
| 447 | static int acpi_pad_add(struct acpi_device *device) | ||
| 448 | { | ||
| 449 | acpi_status status; | ||
| 450 | |||
| 451 | strcpy(acpi_device_name(device), ACPI_PROCESSOR_AGGREGATOR_DEVICE_NAME); | ||
| 452 | strcpy(acpi_device_class(device), ACPI_PROCESSOR_AGGREGATOR_CLASS); | ||
| 453 | |||
| 454 | if (acpi_pad_add_sysfs(device)) | ||
| 455 | return -ENODEV; | ||
| 456 | |||
| 457 | status = acpi_install_notify_handler(device->handle, | ||
| 458 | ACPI_DEVICE_NOTIFY, acpi_pad_notify, device); | ||
| 459 | if (ACPI_FAILURE(status)) { | ||
| 460 | acpi_pad_remove_sysfs(device); | ||
| 461 | return -ENODEV; | ||
| 462 | } | ||
| 463 | |||
| 464 | return 0; | ||
| 465 | } | ||
| 466 | |||
| 467 | static int acpi_pad_remove(struct acpi_device *device, | ||
| 468 | int type) | ||
| 469 | { | ||
| 470 | mutex_lock(&isolated_cpus_lock); | ||
| 471 | acpi_pad_idle_cpus(0); | ||
| 472 | mutex_unlock(&isolated_cpus_lock); | ||
| 473 | |||
| 474 | acpi_remove_notify_handler(device->handle, | ||
| 475 | ACPI_DEVICE_NOTIFY, acpi_pad_notify); | ||
| 476 | acpi_pad_remove_sysfs(device); | ||
| 477 | return 0; | ||
| 478 | } | ||
| 479 | |||
| 480 | static const struct acpi_device_id pad_device_ids[] = { | ||
| 481 | {"ACPI000C", 0}, | ||
| 482 | {"", 0}, | ||
| 483 | }; | ||
| 484 | MODULE_DEVICE_TABLE(acpi, pad_device_ids); | ||
| 485 | |||
| 486 | static struct acpi_driver acpi_pad_driver = { | ||
| 487 | .name = "processor_aggregator", | ||
| 488 | .class = ACPI_PROCESSOR_AGGREGATOR_CLASS, | ||
| 489 | .ids = pad_device_ids, | ||
| 490 | .ops = { | ||
| 491 | .add = acpi_pad_add, | ||
| 492 | .remove = acpi_pad_remove, | ||
| 493 | }, | ||
| 494 | }; | ||
| 495 | |||
| 496 | static int __init acpi_pad_init(void) | ||
| 497 | { | ||
| 498 | power_saving_mwait_init(); | ||
| 499 | if (power_saving_mwait_eax == 0) | ||
| 500 | return -EINVAL; | ||
| 501 | |||
| 502 | return acpi_bus_register_driver(&acpi_pad_driver); | ||
| 503 | } | ||
| 504 | |||
| 505 | static void __exit acpi_pad_exit(void) | ||
| 506 | { | ||
| 507 | acpi_bus_unregister_driver(&acpi_pad_driver); | ||
| 508 | } | ||
| 509 | |||
| 510 | module_init(acpi_pad_init); | ||
| 511 | module_exit(acpi_pad_exit); | ||
| 512 | MODULE_AUTHOR("Shaohua Li<shaohua.li@intel.com>"); | ||
| 513 | MODULE_DESCRIPTION("ACPI Processor Aggregator Driver"); | ||
| 514 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/acpi/dock.c b/drivers/acpi/dock.c index 3a2cfefc71ab..7338b6a3e049 100644 --- a/drivers/acpi/dock.c +++ b/drivers/acpi/dock.c | |||
| @@ -67,7 +67,7 @@ struct dock_station { | |||
| 67 | struct list_head dependent_devices; | 67 | struct list_head dependent_devices; |
| 68 | struct list_head hotplug_devices; | 68 | struct list_head hotplug_devices; |
| 69 | 69 | ||
| 70 | struct list_head sibiling; | 70 | struct list_head sibling; |
| 71 | struct platform_device *dock_device; | 71 | struct platform_device *dock_device; |
| 72 | }; | 72 | }; |
| 73 | static LIST_HEAD(dock_stations); | 73 | static LIST_HEAD(dock_stations); |
| @@ -275,7 +275,7 @@ int is_dock_device(acpi_handle handle) | |||
| 275 | 275 | ||
| 276 | if (is_dock(handle)) | 276 | if (is_dock(handle)) |
| 277 | return 1; | 277 | return 1; |
| 278 | list_for_each_entry(dock_station, &dock_stations, sibiling) { | 278 | list_for_each_entry(dock_station, &dock_stations, sibling) { |
| 279 | if (find_dock_dependent_device(dock_station, handle)) | 279 | if (find_dock_dependent_device(dock_station, handle)) |
| 280 | return 1; | 280 | return 1; |
| 281 | } | 281 | } |
| @@ -619,7 +619,7 @@ register_hotplug_dock_device(acpi_handle handle, struct acpi_dock_ops *ops, | |||
| 619 | * make sure this handle is for a device dependent on the dock, | 619 | * make sure this handle is for a device dependent on the dock, |
| 620 | * this would include the dock station itself | 620 | * this would include the dock station itself |
| 621 | */ | 621 | */ |
| 622 | list_for_each_entry(dock_station, &dock_stations, sibiling) { | 622 | list_for_each_entry(dock_station, &dock_stations, sibling) { |
| 623 | /* | 623 | /* |
| 624 | * An ATA bay can be in a dock and itself can be ejected | 624 | * An ATA bay can be in a dock and itself can be ejected |
| 625 | * seperately, so there are two 'dock stations' which need the | 625 | * seperately, so there are two 'dock stations' which need the |
| @@ -651,7 +651,7 @@ void unregister_hotplug_dock_device(acpi_handle handle) | |||
| 651 | if (!dock_station_count) | 651 | if (!dock_station_count) |
| 652 | return; | 652 | return; |
| 653 | 653 | ||
| 654 | list_for_each_entry(dock_station, &dock_stations, sibiling) { | 654 | list_for_each_entry(dock_station, &dock_stations, sibling) { |
| 655 | dd = find_dock_dependent_device(dock_station, handle); | 655 | dd = find_dock_dependent_device(dock_station, handle); |
| 656 | if (dd) | 656 | if (dd) |
| 657 | dock_del_hotplug_device(dock_station, dd); | 657 | dock_del_hotplug_device(dock_station, dd); |
| @@ -787,7 +787,7 @@ static int acpi_dock_notifier_call(struct notifier_block *this, | |||
| 787 | if (event != ACPI_NOTIFY_BUS_CHECK && event != ACPI_NOTIFY_DEVICE_CHECK | 787 | if (event != ACPI_NOTIFY_BUS_CHECK && event != ACPI_NOTIFY_DEVICE_CHECK |
| 788 | && event != ACPI_NOTIFY_EJECT_REQUEST) | 788 | && event != ACPI_NOTIFY_EJECT_REQUEST) |
| 789 | return 0; | 789 | return 0; |
| 790 | list_for_each_entry(dock_station, &dock_stations, sibiling) { | 790 | list_for_each_entry(dock_station, &dock_stations, sibling) { |
| 791 | if (dock_station->handle == handle) { | 791 | if (dock_station->handle == handle) { |
| 792 | struct dock_data *dock_data; | 792 | struct dock_data *dock_data; |
| 793 | 793 | ||
| @@ -958,7 +958,7 @@ static int dock_add(acpi_handle handle) | |||
| 958 | dock_station->last_dock_time = jiffies - HZ; | 958 | dock_station->last_dock_time = jiffies - HZ; |
| 959 | INIT_LIST_HEAD(&dock_station->dependent_devices); | 959 | INIT_LIST_HEAD(&dock_station->dependent_devices); |
| 960 | INIT_LIST_HEAD(&dock_station->hotplug_devices); | 960 | INIT_LIST_HEAD(&dock_station->hotplug_devices); |
| 961 | INIT_LIST_HEAD(&dock_station->sibiling); | 961 | INIT_LIST_HEAD(&dock_station->sibling); |
| 962 | spin_lock_init(&dock_station->dd_lock); | 962 | spin_lock_init(&dock_station->dd_lock); |
| 963 | mutex_init(&dock_station->hp_lock); | 963 | mutex_init(&dock_station->hp_lock); |
| 964 | ATOMIC_INIT_NOTIFIER_HEAD(&dock_notifier_list); | 964 | ATOMIC_INIT_NOTIFIER_HEAD(&dock_notifier_list); |
| @@ -1044,7 +1044,7 @@ static int dock_add(acpi_handle handle) | |||
| 1044 | add_dock_dependent_device(dock_station, dd); | 1044 | add_dock_dependent_device(dock_station, dd); |
| 1045 | 1045 | ||
| 1046 | dock_station_count++; | 1046 | dock_station_count++; |
| 1047 | list_add(&dock_station->sibiling, &dock_stations); | 1047 | list_add(&dock_station->sibling, &dock_stations); |
| 1048 | return 0; | 1048 | return 0; |
| 1049 | 1049 | ||
| 1050 | dock_add_err_unregister: | 1050 | dock_add_err_unregister: |
| @@ -1149,7 +1149,7 @@ static void __exit dock_exit(void) | |||
| 1149 | struct dock_station *tmp; | 1149 | struct dock_station *tmp; |
| 1150 | 1150 | ||
| 1151 | unregister_acpi_bus_notifier(&dock_acpi_notifier); | 1151 | unregister_acpi_bus_notifier(&dock_acpi_notifier); |
| 1152 | list_for_each_entry_safe(dock_station, tmp, &dock_stations, sibiling) | 1152 | list_for_each_entry_safe(dock_station, tmp, &dock_stations, sibling) |
| 1153 | dock_remove(dock_station); | 1153 | dock_remove(dock_station); |
| 1154 | } | 1154 | } |
| 1155 | 1155 | ||
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c index f70796081c4c..baef28c1e630 100644 --- a/drivers/acpi/ec.c +++ b/drivers/acpi/ec.c | |||
| @@ -119,6 +119,8 @@ static struct acpi_ec { | |||
| 119 | } *boot_ec, *first_ec; | 119 | } *boot_ec, *first_ec; |
| 120 | 120 | ||
| 121 | static int EC_FLAGS_MSI; /* Out-of-spec MSI controller */ | 121 | static int EC_FLAGS_MSI; /* Out-of-spec MSI controller */ |
| 122 | static int EC_FLAGS_VALIDATE_ECDT; /* ASUStec ECDTs need to be validated */ | ||
| 123 | static int EC_FLAGS_SKIP_DSDT_SCAN; /* Not all BIOS survive early DSDT scan */ | ||
| 122 | 124 | ||
| 123 | /* -------------------------------------------------------------------------- | 125 | /* -------------------------------------------------------------------------- |
| 124 | Transaction Management | 126 | Transaction Management |
| @@ -232,10 +234,8 @@ static int ec_poll(struct acpi_ec *ec) | |||
| 232 | } | 234 | } |
| 233 | advance_transaction(ec, acpi_ec_read_status(ec)); | 235 | advance_transaction(ec, acpi_ec_read_status(ec)); |
| 234 | } while (time_before(jiffies, delay)); | 236 | } while (time_before(jiffies, delay)); |
| 235 | if (!ec->curr->irq_count || | 237 | if (acpi_ec_read_status(ec) & ACPI_EC_FLAG_IBF) |
| 236 | (acpi_ec_read_status(ec) & ACPI_EC_FLAG_IBF)) | ||
| 237 | break; | 238 | break; |
| 238 | /* try restart command if we get any false interrupts */ | ||
| 239 | pr_debug(PREFIX "controller reset, restart transaction\n"); | 239 | pr_debug(PREFIX "controller reset, restart transaction\n"); |
| 240 | spin_lock_irqsave(&ec->curr_lock, flags); | 240 | spin_lock_irqsave(&ec->curr_lock, flags); |
| 241 | start_transaction(ec); | 241 | start_transaction(ec); |
| @@ -899,6 +899,44 @@ static const struct acpi_device_id ec_device_ids[] = { | |||
| 899 | {"", 0}, | 899 | {"", 0}, |
| 900 | }; | 900 | }; |
| 901 | 901 | ||
| 902 | /* Some BIOS do not survive early DSDT scan, skip it */ | ||
| 903 | static int ec_skip_dsdt_scan(const struct dmi_system_id *id) | ||
| 904 | { | ||
| 905 | EC_FLAGS_SKIP_DSDT_SCAN = 1; | ||
| 906 | return 0; | ||
| 907 | } | ||
| 908 | |||
| 909 | /* ASUStek often supplies us with broken ECDT, validate it */ | ||
| 910 | static int ec_validate_ecdt(const struct dmi_system_id *id) | ||
| 911 | { | ||
| 912 | EC_FLAGS_VALIDATE_ECDT = 1; | ||
| 913 | return 0; | ||
| 914 | } | ||
| 915 | |||
| 916 | /* MSI EC needs special treatment, enable it */ | ||
| 917 | static int ec_flag_msi(const struct dmi_system_id *id) | ||
| 918 | { | ||
| 919 | EC_FLAGS_MSI = 1; | ||
| 920 | EC_FLAGS_VALIDATE_ECDT = 1; | ||
| 921 | return 0; | ||
| 922 | } | ||
| 923 | |||
| 924 | static struct dmi_system_id __initdata ec_dmi_table[] = { | ||
| 925 | { | ||
| 926 | ec_skip_dsdt_scan, "Compal JFL92", { | ||
| 927 | DMI_MATCH(DMI_BIOS_VENDOR, "COMPAL"), | ||
| 928 | DMI_MATCH(DMI_BOARD_NAME, "JFL92") }, NULL}, | ||
| 929 | { | ||
| 930 | ec_flag_msi, "MSI hardware", { | ||
| 931 | DMI_MATCH(DMI_BIOS_VENDOR, "Micro-Star"), | ||
| 932 | DMI_MATCH(DMI_CHASSIS_VENDOR, "MICRO-Star") }, NULL}, | ||
| 933 | { | ||
| 934 | ec_validate_ecdt, "ASUS hardware", { | ||
| 935 | DMI_MATCH(DMI_BIOS_VENDOR, "ASUS") }, NULL}, | ||
| 936 | {}, | ||
| 937 | }; | ||
| 938 | |||
| 939 | |||
| 902 | int __init acpi_ec_ecdt_probe(void) | 940 | int __init acpi_ec_ecdt_probe(void) |
| 903 | { | 941 | { |
| 904 | acpi_status status; | 942 | acpi_status status; |
| @@ -911,11 +949,7 @@ int __init acpi_ec_ecdt_probe(void) | |||
| 911 | /* | 949 | /* |
| 912 | * Generate a boot ec context | 950 | * Generate a boot ec context |
| 913 | */ | 951 | */ |
| 914 | if (dmi_name_in_vendors("Micro-Star") || | 952 | dmi_check_system(ec_dmi_table); |
| 915 | dmi_name_in_vendors("Notebook")) { | ||
| 916 | pr_info(PREFIX "Enabling special treatment for EC from MSI.\n"); | ||
| 917 | EC_FLAGS_MSI = 1; | ||
| 918 | } | ||
| 919 | status = acpi_get_table(ACPI_SIG_ECDT, 1, | 953 | status = acpi_get_table(ACPI_SIG_ECDT, 1, |
| 920 | (struct acpi_table_header **)&ecdt_ptr); | 954 | (struct acpi_table_header **)&ecdt_ptr); |
| 921 | if (ACPI_SUCCESS(status)) { | 955 | if (ACPI_SUCCESS(status)) { |
| @@ -926,7 +960,7 @@ int __init acpi_ec_ecdt_probe(void) | |||
| 926 | boot_ec->handle = ACPI_ROOT_OBJECT; | 960 | boot_ec->handle = ACPI_ROOT_OBJECT; |
| 927 | acpi_get_handle(ACPI_ROOT_OBJECT, ecdt_ptr->id, &boot_ec->handle); | 961 | acpi_get_handle(ACPI_ROOT_OBJECT, ecdt_ptr->id, &boot_ec->handle); |
| 928 | /* Don't trust ECDT, which comes from ASUSTek */ | 962 | /* Don't trust ECDT, which comes from ASUSTek */ |
| 929 | if (!dmi_name_in_vendors("ASUS") && EC_FLAGS_MSI == 0) | 963 | if (!EC_FLAGS_VALIDATE_ECDT) |
| 930 | goto install; | 964 | goto install; |
| 931 | saved_ec = kmalloc(sizeof(struct acpi_ec), GFP_KERNEL); | 965 | saved_ec = kmalloc(sizeof(struct acpi_ec), GFP_KERNEL); |
| 932 | if (!saved_ec) | 966 | if (!saved_ec) |
| @@ -934,6 +968,10 @@ int __init acpi_ec_ecdt_probe(void) | |||
| 934 | memcpy(saved_ec, boot_ec, sizeof(struct acpi_ec)); | 968 | memcpy(saved_ec, boot_ec, sizeof(struct acpi_ec)); |
| 935 | /* fall through */ | 969 | /* fall through */ |
| 936 | } | 970 | } |
| 971 | |||
| 972 | if (EC_FLAGS_SKIP_DSDT_SCAN) | ||
| 973 | return -ENODEV; | ||
| 974 | |||
| 937 | /* This workaround is needed only on some broken machines, | 975 | /* This workaround is needed only on some broken machines, |
| 938 | * which require early EC, but fail to provide ECDT */ | 976 | * which require early EC, but fail to provide ECDT */ |
| 939 | printk(KERN_DEBUG PREFIX "Look up EC in DSDT\n"); | 977 | printk(KERN_DEBUG PREFIX "Look up EC in DSDT\n"); |
diff --git a/drivers/acpi/proc.c b/drivers/acpi/proc.c index d0d550d22a6d..f8b6f555ba52 100644 --- a/drivers/acpi/proc.c +++ b/drivers/acpi/proc.c | |||
| @@ -398,6 +398,8 @@ acpi_system_write_wakeup_device(struct file *file, | |||
| 398 | 398 | ||
| 399 | if (len > 4) | 399 | if (len > 4) |
| 400 | len = 4; | 400 | len = 4; |
| 401 | if (len < 0) | ||
| 402 | return -EFAULT; | ||
| 401 | 403 | ||
| 402 | if (copy_from_user(strbuf, buffer, len)) | 404 | if (copy_from_user(strbuf, buffer, len)) |
| 403 | return -EFAULT; | 405 | return -EFAULT; |
diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c index c2d4d6e09364..c567b46dfa0f 100644 --- a/drivers/acpi/processor_core.c +++ b/drivers/acpi/processor_core.c | |||
| @@ -863,13 +863,6 @@ static int acpi_processor_add(struct acpi_device *device) | |||
| 863 | goto err_remove_sysfs; | 863 | goto err_remove_sysfs; |
| 864 | } | 864 | } |
| 865 | 865 | ||
| 866 | if (pr->flags.throttling) { | ||
| 867 | printk(KERN_INFO PREFIX "%s [%s] (supports", | ||
| 868 | acpi_device_name(device), acpi_device_bid(device)); | ||
| 869 | printk(" %d throttling states", pr->throttling.state_count); | ||
| 870 | printk(")\n"); | ||
| 871 | } | ||
| 872 | |||
| 873 | return 0; | 866 | return 0; |
| 874 | 867 | ||
| 875 | err_remove_sysfs: | 868 | err_remove_sysfs: |
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c index 468921bed22f..14a7481c97d7 100644 --- a/drivers/acpi/scan.c +++ b/drivers/acpi/scan.c | |||
| @@ -1052,6 +1052,8 @@ static void acpi_device_set_id(struct acpi_device *device) | |||
| 1052 | device->flags.bus_address = 1; | 1052 | device->flags.bus_address = 1; |
| 1053 | } | 1053 | } |
| 1054 | 1054 | ||
| 1055 | kfree(info); | ||
| 1056 | |||
| 1055 | /* | 1057 | /* |
| 1056 | * Some devices don't reliably have _HIDs & _CIDs, so add | 1058 | * Some devices don't reliably have _HIDs & _CIDs, so add |
| 1057 | * synthetic HIDs to make sure drivers can find them. | 1059 | * synthetic HIDs to make sure drivers can find them. |
| @@ -1325,13 +1327,8 @@ static int acpi_bus_scan(acpi_handle handle, struct acpi_bus_ops *ops, | |||
| 1325 | struct acpi_device **child) | 1327 | struct acpi_device **child) |
| 1326 | { | 1328 | { |
| 1327 | acpi_status status; | 1329 | acpi_status status; |
| 1328 | struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; | ||
| 1329 | void *device = NULL; | 1330 | void *device = NULL; |
| 1330 | 1331 | ||
| 1331 | acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer); | ||
| 1332 | printk(KERN_INFO PREFIX "Enumerating devices from [%s]\n", | ||
| 1333 | (char *) buffer.pointer); | ||
| 1334 | |||
| 1335 | status = acpi_bus_check_add(handle, 0, ops, &device); | 1332 | status = acpi_bus_check_add(handle, 0, ops, &device); |
| 1336 | if (ACPI_SUCCESS(status)) | 1333 | if (ACPI_SUCCESS(status)) |
| 1337 | acpi_walk_namespace(ACPI_TYPE_ANY, handle, ACPI_UINT32_MAX, | 1334 | acpi_walk_namespace(ACPI_TYPE_ANY, handle, ACPI_UINT32_MAX, |
diff --git a/drivers/block/DAC960.c b/drivers/block/DAC960.c index 6fa7b0fdbdfd..eb4fa1943944 100644 --- a/drivers/block/DAC960.c +++ b/drivers/block/DAC960.c | |||
| @@ -38,6 +38,7 @@ | |||
| 38 | #include <linux/slab.h> | 38 | #include <linux/slab.h> |
| 39 | #include <linux/smp_lock.h> | 39 | #include <linux/smp_lock.h> |
| 40 | #include <linux/proc_fs.h> | 40 | #include <linux/proc_fs.h> |
| 41 | #include <linux/seq_file.h> | ||
| 41 | #include <linux/reboot.h> | 42 | #include <linux/reboot.h> |
| 42 | #include <linux/spinlock.h> | 43 | #include <linux/spinlock.h> |
| 43 | #include <linux/timer.h> | 44 | #include <linux/timer.h> |
| @@ -6422,16 +6423,10 @@ static bool DAC960_V2_ExecuteUserCommand(DAC960_Controller_T *Controller, | |||
| 6422 | return true; | 6423 | return true; |
| 6423 | } | 6424 | } |
| 6424 | 6425 | ||
| 6425 | 6426 | static int dac960_proc_show(struct seq_file *m, void *v) | |
| 6426 | /* | ||
| 6427 | DAC960_ProcReadStatus implements reading /proc/rd/status. | ||
| 6428 | */ | ||
| 6429 | |||
| 6430 | static int DAC960_ProcReadStatus(char *Page, char **Start, off_t Offset, | ||
| 6431 | int Count, int *EOF, void *Data) | ||
| 6432 | { | 6427 | { |
| 6433 | unsigned char *StatusMessage = "OK\n"; | 6428 | unsigned char *StatusMessage = "OK\n"; |
| 6434 | int ControllerNumber, BytesAvailable; | 6429 | int ControllerNumber; |
| 6435 | for (ControllerNumber = 0; | 6430 | for (ControllerNumber = 0; |
| 6436 | ControllerNumber < DAC960_ControllerCount; | 6431 | ControllerNumber < DAC960_ControllerCount; |
| 6437 | ControllerNumber++) | 6432 | ControllerNumber++) |
| @@ -6444,52 +6439,49 @@ static int DAC960_ProcReadStatus(char *Page, char **Start, off_t Offset, | |||
| 6444 | break; | 6439 | break; |
| 6445 | } | 6440 | } |
| 6446 | } | 6441 | } |
| 6447 | BytesAvailable = strlen(StatusMessage) - Offset; | 6442 | seq_puts(m, StatusMessage); |
| 6448 | if (Count >= BytesAvailable) | 6443 | return 0; |
| 6449 | { | ||
| 6450 | Count = BytesAvailable; | ||
| 6451 | *EOF = true; | ||
| 6452 | } | ||
| 6453 | if (Count <= 0) return 0; | ||
| 6454 | *Start = Page; | ||
| 6455 | memcpy(Page, &StatusMessage[Offset], Count); | ||
| 6456 | return Count; | ||
| 6457 | } | 6444 | } |
| 6458 | 6445 | ||
| 6446 | static int dac960_proc_open(struct inode *inode, struct file *file) | ||
| 6447 | { | ||
| 6448 | return single_open(file, dac960_proc_show, NULL); | ||
| 6449 | } | ||
| 6459 | 6450 | ||
| 6460 | /* | 6451 | static const struct file_operations dac960_proc_fops = { |
| 6461 | DAC960_ProcReadInitialStatus implements reading /proc/rd/cN/initial_status. | 6452 | .owner = THIS_MODULE, |
| 6462 | */ | 6453 | .open = dac960_proc_open, |
| 6454 | .read = seq_read, | ||
| 6455 | .llseek = seq_lseek, | ||
| 6456 | .release = single_release, | ||
| 6457 | }; | ||
| 6463 | 6458 | ||
| 6464 | static int DAC960_ProcReadInitialStatus(char *Page, char **Start, off_t Offset, | 6459 | static int dac960_initial_status_proc_show(struct seq_file *m, void *v) |
| 6465 | int Count, int *EOF, void *Data) | ||
| 6466 | { | 6460 | { |
| 6467 | DAC960_Controller_T *Controller = (DAC960_Controller_T *) Data; | 6461 | DAC960_Controller_T *Controller = (DAC960_Controller_T *)m->private; |
| 6468 | int BytesAvailable = Controller->InitialStatusLength - Offset; | 6462 | seq_printf(m, "%.*s", Controller->InitialStatusLength, Controller->CombinedStatusBuffer); |
| 6469 | if (Count >= BytesAvailable) | 6463 | return 0; |
| 6470 | { | ||
| 6471 | Count = BytesAvailable; | ||
| 6472 | *EOF = true; | ||
| 6473 | } | ||
| 6474 | if (Count <= 0) return 0; | ||
| 6475 | *Start = Page; | ||
| 6476 | memcpy(Page, &Controller->CombinedStatusBuffer[Offset], Count); | ||
| 6477 | return Count; | ||
| 6478 | } | 6464 | } |
| 6479 | 6465 | ||
| 6466 | static int dac960_initial_status_proc_open(struct inode *inode, struct file *file) | ||
| 6467 | { | ||
| 6468 | return single_open(file, dac960_initial_status_proc_show, PDE(inode)->data); | ||
| 6469 | } | ||
| 6480 | 6470 | ||
| 6481 | /* | 6471 | static const struct file_operations dac960_initial_status_proc_fops = { |
| 6482 | DAC960_ProcReadCurrentStatus implements reading /proc/rd/cN/current_status. | 6472 | .owner = THIS_MODULE, |
| 6483 | */ | 6473 | .open = dac960_initial_status_proc_open, |
| 6474 | .read = seq_read, | ||
| 6475 | .llseek = seq_lseek, | ||
| 6476 | .release = single_release, | ||
| 6477 | }; | ||
| 6484 | 6478 | ||
| 6485 | static int DAC960_ProcReadCurrentStatus(char *Page, char **Start, off_t Offset, | 6479 | static int dac960_current_status_proc_show(struct seq_file *m, void *v) |
| 6486 | int Count, int *EOF, void *Data) | ||
| 6487 | { | 6480 | { |
| 6488 | DAC960_Controller_T *Controller = (DAC960_Controller_T *) Data; | 6481 | DAC960_Controller_T *Controller = (DAC960_Controller_T *) m->private; |
| 6489 | unsigned char *StatusMessage = | 6482 | unsigned char *StatusMessage = |
| 6490 | "No Rebuild or Consistency Check in Progress\n"; | 6483 | "No Rebuild or Consistency Check in Progress\n"; |
| 6491 | int ProgressMessageLength = strlen(StatusMessage); | 6484 | int ProgressMessageLength = strlen(StatusMessage); |
| 6492 | int BytesAvailable; | ||
| 6493 | if (jiffies != Controller->LastCurrentStatusTime) | 6485 | if (jiffies != Controller->LastCurrentStatusTime) |
| 6494 | { | 6486 | { |
| 6495 | Controller->CurrentStatusLength = 0; | 6487 | Controller->CurrentStatusLength = 0; |
| @@ -6513,49 +6505,41 @@ static int DAC960_ProcReadCurrentStatus(char *Page, char **Start, off_t Offset, | |||
| 6513 | } | 6505 | } |
| 6514 | Controller->LastCurrentStatusTime = jiffies; | 6506 | Controller->LastCurrentStatusTime = jiffies; |
| 6515 | } | 6507 | } |
| 6516 | BytesAvailable = Controller->CurrentStatusLength - Offset; | 6508 | seq_printf(m, "%.*s", Controller->CurrentStatusLength, Controller->CurrentStatusBuffer); |
| 6517 | if (Count >= BytesAvailable) | 6509 | return 0; |
| 6518 | { | ||
| 6519 | Count = BytesAvailable; | ||
| 6520 | *EOF = true; | ||
| 6521 | } | ||
| 6522 | if (Count <= 0) return 0; | ||
| 6523 | *Start = Page; | ||
| 6524 | memcpy(Page, &Controller->CurrentStatusBuffer[Offset], Count); | ||
| 6525 | return Count; | ||
| 6526 | } | 6510 | } |
| 6527 | 6511 | ||
| 6512 | static int dac960_current_status_proc_open(struct inode *inode, struct file *file) | ||
| 6513 | { | ||
| 6514 | return single_open(file, dac960_current_status_proc_show, PDE(inode)->data); | ||
| 6515 | } | ||
| 6528 | 6516 | ||
| 6529 | /* | 6517 | static const struct file_operations dac960_current_status_proc_fops = { |
| 6530 | DAC960_ProcReadUserCommand implements reading /proc/rd/cN/user_command. | 6518 | .owner = THIS_MODULE, |
| 6531 | */ | 6519 | .open = dac960_current_status_proc_open, |
| 6520 | .read = seq_read, | ||
| 6521 | .llseek = seq_lseek, | ||
| 6522 | .release = single_release, | ||
| 6523 | }; | ||
| 6532 | 6524 | ||
| 6533 | static int DAC960_ProcReadUserCommand(char *Page, char **Start, off_t Offset, | 6525 | static int dac960_user_command_proc_show(struct seq_file *m, void *v) |
| 6534 | int Count, int *EOF, void *Data) | ||
| 6535 | { | 6526 | { |
| 6536 | DAC960_Controller_T *Controller = (DAC960_Controller_T *) Data; | 6527 | DAC960_Controller_T *Controller = (DAC960_Controller_T *)m->private; |
| 6537 | int BytesAvailable = Controller->UserStatusLength - Offset; | ||
| 6538 | if (Count >= BytesAvailable) | ||
| 6539 | { | ||
| 6540 | Count = BytesAvailable; | ||
| 6541 | *EOF = true; | ||
| 6542 | } | ||
| 6543 | if (Count <= 0) return 0; | ||
| 6544 | *Start = Page; | ||
| 6545 | memcpy(Page, &Controller->UserStatusBuffer[Offset], Count); | ||
| 6546 | return Count; | ||
| 6547 | } | ||
| 6548 | 6528 | ||
| 6529 | seq_printf(m, "%.*s", Controller->UserStatusLength, Controller->UserStatusBuffer); | ||
| 6530 | return 0; | ||
| 6531 | } | ||
| 6549 | 6532 | ||
| 6550 | /* | 6533 | static int dac960_user_command_proc_open(struct inode *inode, struct file *file) |
| 6551 | DAC960_ProcWriteUserCommand implements writing /proc/rd/cN/user_command. | 6534 | { |
| 6552 | */ | 6535 | return single_open(file, dac960_user_command_proc_show, PDE(inode)->data); |
| 6536 | } | ||
| 6553 | 6537 | ||
| 6554 | static int DAC960_ProcWriteUserCommand(struct file *file, | 6538 | static ssize_t dac960_user_command_proc_write(struct file *file, |
| 6555 | const char __user *Buffer, | 6539 | const char __user *Buffer, |
| 6556 | unsigned long Count, void *Data) | 6540 | size_t Count, loff_t *pos) |
| 6557 | { | 6541 | { |
| 6558 | DAC960_Controller_T *Controller = (DAC960_Controller_T *) Data; | 6542 | DAC960_Controller_T *Controller = (DAC960_Controller_T *) PDE(file->f_path.dentry->d_inode)->data; |
| 6559 | unsigned char CommandBuffer[80]; | 6543 | unsigned char CommandBuffer[80]; |
| 6560 | int Length; | 6544 | int Length; |
| 6561 | if (Count > sizeof(CommandBuffer)-1) return -EINVAL; | 6545 | if (Count > sizeof(CommandBuffer)-1) return -EINVAL; |
| @@ -6572,6 +6556,14 @@ static int DAC960_ProcWriteUserCommand(struct file *file, | |||
| 6572 | ? Count : -EBUSY); | 6556 | ? Count : -EBUSY); |
| 6573 | } | 6557 | } |
| 6574 | 6558 | ||
| 6559 | static const struct file_operations dac960_user_command_proc_fops = { | ||
| 6560 | .owner = THIS_MODULE, | ||
| 6561 | .open = dac960_user_command_proc_open, | ||
| 6562 | .read = seq_read, | ||
| 6563 | .llseek = seq_lseek, | ||
| 6564 | .release = single_release, | ||
| 6565 | .write = dac960_user_command_proc_write, | ||
| 6566 | }; | ||
| 6575 | 6567 | ||
| 6576 | /* | 6568 | /* |
| 6577 | DAC960_CreateProcEntries creates the /proc/rd/... entries for the | 6569 | DAC960_CreateProcEntries creates the /proc/rd/... entries for the |
| @@ -6586,23 +6578,17 @@ static void DAC960_CreateProcEntries(DAC960_Controller_T *Controller) | |||
| 6586 | 6578 | ||
| 6587 | if (DAC960_ProcDirectoryEntry == NULL) { | 6579 | if (DAC960_ProcDirectoryEntry == NULL) { |
| 6588 | DAC960_ProcDirectoryEntry = proc_mkdir("rd", NULL); | 6580 | DAC960_ProcDirectoryEntry = proc_mkdir("rd", NULL); |
| 6589 | StatusProcEntry = create_proc_read_entry("status", 0, | 6581 | StatusProcEntry = proc_create("status", 0, |
| 6590 | DAC960_ProcDirectoryEntry, | 6582 | DAC960_ProcDirectoryEntry, |
| 6591 | DAC960_ProcReadStatus, NULL); | 6583 | &dac960_proc_fops); |
| 6592 | } | 6584 | } |
| 6593 | 6585 | ||
| 6594 | sprintf(Controller->ControllerName, "c%d", Controller->ControllerNumber); | 6586 | sprintf(Controller->ControllerName, "c%d", Controller->ControllerNumber); |
| 6595 | ControllerProcEntry = proc_mkdir(Controller->ControllerName, | 6587 | ControllerProcEntry = proc_mkdir(Controller->ControllerName, |
| 6596 | DAC960_ProcDirectoryEntry); | 6588 | DAC960_ProcDirectoryEntry); |
| 6597 | create_proc_read_entry("initial_status", 0, ControllerProcEntry, | 6589 | proc_create_data("initial_status", 0, ControllerProcEntry, &dac960_initial_status_proc_fops, Controller); |
| 6598 | DAC960_ProcReadInitialStatus, Controller); | 6590 | proc_create_data("current_status", 0, ControllerProcEntry, &dac960_current_status_proc_fops, Controller); |
| 6599 | create_proc_read_entry("current_status", 0, ControllerProcEntry, | 6591 | UserCommandProcEntry = proc_create_data("user_command", S_IWUSR | S_IRUSR, ControllerProcEntry, &dac960_user_command_proc_fops, Controller); |
| 6600 | DAC960_ProcReadCurrentStatus, Controller); | ||
| 6601 | UserCommandProcEntry = | ||
| 6602 | create_proc_read_entry("user_command", S_IWUSR | S_IRUSR, | ||
| 6603 | ControllerProcEntry, DAC960_ProcReadUserCommand, | ||
| 6604 | Controller); | ||
| 6605 | UserCommandProcEntry->write_proc = DAC960_ProcWriteUserCommand; | ||
| 6606 | Controller->ControllerProcEntry = ControllerProcEntry; | 6592 | Controller->ControllerProcEntry = ControllerProcEntry; |
| 6607 | } | 6593 | } |
| 6608 | 6594 | ||
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c index 1ece0b47b581..fb5be2d95d52 100644 --- a/drivers/block/cciss.c +++ b/drivers/block/cciss.c | |||
| @@ -36,9 +36,11 @@ | |||
| 36 | #include <linux/proc_fs.h> | 36 | #include <linux/proc_fs.h> |
| 37 | #include <linux/seq_file.h> | 37 | #include <linux/seq_file.h> |
| 38 | #include <linux/init.h> | 38 | #include <linux/init.h> |
| 39 | #include <linux/jiffies.h> | ||
| 39 | #include <linux/hdreg.h> | 40 | #include <linux/hdreg.h> |
| 40 | #include <linux/spinlock.h> | 41 | #include <linux/spinlock.h> |
| 41 | #include <linux/compat.h> | 42 | #include <linux/compat.h> |
| 43 | #include <linux/mutex.h> | ||
| 42 | #include <asm/uaccess.h> | 44 | #include <asm/uaccess.h> |
| 43 | #include <asm/io.h> | 45 | #include <asm/io.h> |
| 44 | 46 | ||
| @@ -155,6 +157,10 @@ static struct board_type products[] = { | |||
| 155 | 157 | ||
| 156 | static ctlr_info_t *hba[MAX_CTLR]; | 158 | static ctlr_info_t *hba[MAX_CTLR]; |
| 157 | 159 | ||
| 160 | static struct task_struct *cciss_scan_thread; | ||
| 161 | static DEFINE_MUTEX(scan_mutex); | ||
| 162 | static LIST_HEAD(scan_q); | ||
| 163 | |||
| 158 | static void do_cciss_request(struct request_queue *q); | 164 | static void do_cciss_request(struct request_queue *q); |
| 159 | static irqreturn_t do_cciss_intr(int irq, void *dev_id); | 165 | static irqreturn_t do_cciss_intr(int irq, void *dev_id); |
| 160 | static int cciss_open(struct block_device *bdev, fmode_t mode); | 166 | static int cciss_open(struct block_device *bdev, fmode_t mode); |
| @@ -164,9 +170,9 @@ static int cciss_ioctl(struct block_device *bdev, fmode_t mode, | |||
| 164 | static int cciss_getgeo(struct block_device *bdev, struct hd_geometry *geo); | 170 | static int cciss_getgeo(struct block_device *bdev, struct hd_geometry *geo); |
| 165 | 171 | ||
| 166 | static int cciss_revalidate(struct gendisk *disk); | 172 | static int cciss_revalidate(struct gendisk *disk); |
| 167 | static int rebuild_lun_table(ctlr_info_t *h, int first_time); | 173 | static int rebuild_lun_table(ctlr_info_t *h, int first_time, int via_ioctl); |
| 168 | static int deregister_disk(ctlr_info_t *h, int drv_index, | 174 | static int deregister_disk(ctlr_info_t *h, int drv_index, |
| 169 | int clear_all); | 175 | int clear_all, int via_ioctl); |
| 170 | 176 | ||
| 171 | static void cciss_read_capacity(int ctlr, int logvol, int withirq, | 177 | static void cciss_read_capacity(int ctlr, int logvol, int withirq, |
| 172 | sector_t *total_size, unsigned int *block_size); | 178 | sector_t *total_size, unsigned int *block_size); |
| @@ -189,8 +195,13 @@ static int sendcmd_withirq_core(ctlr_info_t *h, CommandList_struct *c, | |||
| 189 | static int process_sendcmd_error(ctlr_info_t *h, CommandList_struct *c); | 195 | static int process_sendcmd_error(ctlr_info_t *h, CommandList_struct *c); |
| 190 | 196 | ||
| 191 | static void fail_all_cmds(unsigned long ctlr); | 197 | static void fail_all_cmds(unsigned long ctlr); |
| 198 | static int add_to_scan_list(struct ctlr_info *h); | ||
| 192 | static int scan_thread(void *data); | 199 | static int scan_thread(void *data); |
| 193 | static int check_for_unit_attention(ctlr_info_t *h, CommandList_struct *c); | 200 | static int check_for_unit_attention(ctlr_info_t *h, CommandList_struct *c); |
| 201 | static void cciss_hba_release(struct device *dev); | ||
| 202 | static void cciss_device_release(struct device *dev); | ||
| 203 | static void cciss_free_gendisk(ctlr_info_t *h, int drv_index); | ||
| 204 | static void cciss_free_drive_info(ctlr_info_t *h, int drv_index); | ||
| 194 | 205 | ||
| 195 | #ifdef CONFIG_PROC_FS | 206 | #ifdef CONFIG_PROC_FS |
| 196 | static void cciss_procinit(int i); | 207 | static void cciss_procinit(int i); |
| @@ -245,7 +256,10 @@ static inline void removeQ(CommandList_struct *c) | |||
| 245 | 256 | ||
| 246 | #include "cciss_scsi.c" /* For SCSI tape support */ | 257 | #include "cciss_scsi.c" /* For SCSI tape support */ |
| 247 | 258 | ||
| 248 | #define RAID_UNKNOWN 6 | 259 | static const char *raid_label[] = { "0", "4", "1(1+0)", "5", "5+1", "ADG", |
| 260 | "UNKNOWN" | ||
| 261 | }; | ||
| 262 | #define RAID_UNKNOWN (sizeof(raid_label) / sizeof(raid_label[0])-1) | ||
| 249 | 263 | ||
| 250 | #ifdef CONFIG_PROC_FS | 264 | #ifdef CONFIG_PROC_FS |
| 251 | 265 | ||
| @@ -255,9 +269,6 @@ static inline void removeQ(CommandList_struct *c) | |||
| 255 | #define ENG_GIG 1000000000 | 269 | #define ENG_GIG 1000000000 |
| 256 | #define ENG_GIG_FACTOR (ENG_GIG/512) | 270 | #define ENG_GIG_FACTOR (ENG_GIG/512) |
| 257 | #define ENGAGE_SCSI "engage scsi" | 271 | #define ENGAGE_SCSI "engage scsi" |
| 258 | static const char *raid_label[] = { "0", "4", "1(1+0)", "5", "5+1", "ADG", | ||
| 259 | "UNKNOWN" | ||
| 260 | }; | ||
| 261 | 272 | ||
| 262 | static struct proc_dir_entry *proc_cciss; | 273 | static struct proc_dir_entry *proc_cciss; |
| 263 | 274 | ||
| @@ -318,7 +329,7 @@ static int cciss_seq_show(struct seq_file *seq, void *v) | |||
| 318 | ctlr_info_t *h = seq->private; | 329 | ctlr_info_t *h = seq->private; |
| 319 | unsigned ctlr = h->ctlr; | 330 | unsigned ctlr = h->ctlr; |
| 320 | loff_t *pos = v; | 331 | loff_t *pos = v; |
| 321 | drive_info_struct *drv = &h->drv[*pos]; | 332 | drive_info_struct *drv = h->drv[*pos]; |
| 322 | 333 | ||
| 323 | if (*pos > h->highest_lun) | 334 | if (*pos > h->highest_lun) |
| 324 | return 0; | 335 | return 0; |
| @@ -331,7 +342,7 @@ static int cciss_seq_show(struct seq_file *seq, void *v) | |||
| 331 | vol_sz_frac *= 100; | 342 | vol_sz_frac *= 100; |
| 332 | sector_div(vol_sz_frac, ENG_GIG_FACTOR); | 343 | sector_div(vol_sz_frac, ENG_GIG_FACTOR); |
| 333 | 344 | ||
| 334 | if (drv->raid_level > 5) | 345 | if (drv->raid_level < 0 || drv->raid_level > RAID_UNKNOWN) |
| 335 | drv->raid_level = RAID_UNKNOWN; | 346 | drv->raid_level = RAID_UNKNOWN; |
| 336 | seq_printf(seq, "cciss/c%dd%d:" | 347 | seq_printf(seq, "cciss/c%dd%d:" |
| 337 | "\t%4u.%02uGB\tRAID %s\n", | 348 | "\t%4u.%02uGB\tRAID %s\n", |
| @@ -454,9 +465,19 @@ static void __devinit cciss_procinit(int i) | |||
| 454 | #define to_hba(n) container_of(n, struct ctlr_info, dev) | 465 | #define to_hba(n) container_of(n, struct ctlr_info, dev) |
| 455 | #define to_drv(n) container_of(n, drive_info_struct, dev) | 466 | #define to_drv(n) container_of(n, drive_info_struct, dev) |
| 456 | 467 | ||
| 457 | static struct device_type cciss_host_type = { | 468 | static ssize_t host_store_rescan(struct device *dev, |
| 458 | .name = "cciss_host", | 469 | struct device_attribute *attr, |
| 459 | }; | 470 | const char *buf, size_t count) |
| 471 | { | ||
| 472 | struct ctlr_info *h = to_hba(dev); | ||
| 473 | |||
| 474 | add_to_scan_list(h); | ||
| 475 | wake_up_process(cciss_scan_thread); | ||
| 476 | wait_for_completion_interruptible(&h->scan_wait); | ||
| 477 | |||
| 478 | return count; | ||
| 479 | } | ||
| 480 | DEVICE_ATTR(rescan, S_IWUSR, NULL, host_store_rescan); | ||
| 460 | 481 | ||
| 461 | static ssize_t dev_show_unique_id(struct device *dev, | 482 | static ssize_t dev_show_unique_id(struct device *dev, |
| 462 | struct device_attribute *attr, | 483 | struct device_attribute *attr, |
| @@ -560,11 +581,101 @@ static ssize_t dev_show_rev(struct device *dev, | |||
| 560 | } | 581 | } |
| 561 | DEVICE_ATTR(rev, S_IRUGO, dev_show_rev, NULL); | 582 | DEVICE_ATTR(rev, S_IRUGO, dev_show_rev, NULL); |
| 562 | 583 | ||
| 584 | static ssize_t cciss_show_lunid(struct device *dev, | ||
| 585 | struct device_attribute *attr, char *buf) | ||
| 586 | { | ||
| 587 | drive_info_struct *drv = to_drv(dev); | ||
| 588 | struct ctlr_info *h = to_hba(drv->dev.parent); | ||
| 589 | unsigned long flags; | ||
| 590 | unsigned char lunid[8]; | ||
| 591 | |||
| 592 | spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags); | ||
| 593 | if (h->busy_configuring) { | ||
| 594 | spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags); | ||
| 595 | return -EBUSY; | ||
| 596 | } | ||
| 597 | if (!drv->heads) { | ||
| 598 | spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags); | ||
| 599 | return -ENOTTY; | ||
| 600 | } | ||
| 601 | memcpy(lunid, drv->LunID, sizeof(lunid)); | ||
| 602 | spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags); | ||
| 603 | return snprintf(buf, 20, "0x%02x%02x%02x%02x%02x%02x%02x%02x\n", | ||
| 604 | lunid[0], lunid[1], lunid[2], lunid[3], | ||
| 605 | lunid[4], lunid[5], lunid[6], lunid[7]); | ||
| 606 | } | ||
| 607 | DEVICE_ATTR(lunid, S_IRUGO, cciss_show_lunid, NULL); | ||
| 608 | |||
| 609 | static ssize_t cciss_show_raid_level(struct device *dev, | ||
| 610 | struct device_attribute *attr, char *buf) | ||
| 611 | { | ||
| 612 | drive_info_struct *drv = to_drv(dev); | ||
| 613 | struct ctlr_info *h = to_hba(drv->dev.parent); | ||
| 614 | int raid; | ||
| 615 | unsigned long flags; | ||
| 616 | |||
| 617 | spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags); | ||
| 618 | if (h->busy_configuring) { | ||
| 619 | spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags); | ||
| 620 | return -EBUSY; | ||
| 621 | } | ||
| 622 | raid = drv->raid_level; | ||
| 623 | spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags); | ||
| 624 | if (raid < 0 || raid > RAID_UNKNOWN) | ||
| 625 | raid = RAID_UNKNOWN; | ||
| 626 | |||
| 627 | return snprintf(buf, strlen(raid_label[raid]) + 7, "RAID %s\n", | ||
| 628 | raid_label[raid]); | ||
| 629 | } | ||
| 630 | DEVICE_ATTR(raid_level, S_IRUGO, cciss_show_raid_level, NULL); | ||
| 631 | |||
| 632 | static ssize_t cciss_show_usage_count(struct device *dev, | ||
| 633 | struct device_attribute *attr, char *buf) | ||
| 634 | { | ||
| 635 | drive_info_struct *drv = to_drv(dev); | ||
| 636 | struct ctlr_info *h = to_hba(drv->dev.parent); | ||
| 637 | unsigned long flags; | ||
| 638 | int count; | ||
| 639 | |||
| 640 | spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags); | ||
| 641 | if (h->busy_configuring) { | ||
| 642 | spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags); | ||
| 643 | return -EBUSY; | ||
| 644 | } | ||
| 645 | count = drv->usage_count; | ||
| 646 | spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags); | ||
| 647 | return snprintf(buf, 20, "%d\n", count); | ||
| 648 | } | ||
| 649 | DEVICE_ATTR(usage_count, S_IRUGO, cciss_show_usage_count, NULL); | ||
| 650 | |||
| 651 | static struct attribute *cciss_host_attrs[] = { | ||
| 652 | &dev_attr_rescan.attr, | ||
| 653 | NULL | ||
| 654 | }; | ||
| 655 | |||
| 656 | static struct attribute_group cciss_host_attr_group = { | ||
| 657 | .attrs = cciss_host_attrs, | ||
| 658 | }; | ||
| 659 | |||
| 660 | static const struct attribute_group *cciss_host_attr_groups[] = { | ||
| 661 | &cciss_host_attr_group, | ||
| 662 | NULL | ||
| 663 | }; | ||
| 664 | |||
| 665 | static struct device_type cciss_host_type = { | ||
| 666 | .name = "cciss_host", | ||
| 667 | .groups = cciss_host_attr_groups, | ||
| 668 | .release = cciss_hba_release, | ||
| 669 | }; | ||
| 670 | |||
| 563 | static struct attribute *cciss_dev_attrs[] = { | 671 | static struct attribute *cciss_dev_attrs[] = { |
| 564 | &dev_attr_unique_id.attr, | 672 | &dev_attr_unique_id.attr, |
| 565 | &dev_attr_model.attr, | 673 | &dev_attr_model.attr, |
| 566 | &dev_attr_vendor.attr, | 674 | &dev_attr_vendor.attr, |
| 567 | &dev_attr_rev.attr, | 675 | &dev_attr_rev.attr, |
| 676 | &dev_attr_lunid.attr, | ||
| 677 | &dev_attr_raid_level.attr, | ||
| 678 | &dev_attr_usage_count.attr, | ||
| 568 | NULL | 679 | NULL |
| 569 | }; | 680 | }; |
| 570 | 681 | ||
| @@ -580,12 +691,24 @@ static const struct attribute_group *cciss_dev_attr_groups[] = { | |||
| 580 | static struct device_type cciss_dev_type = { | 691 | static struct device_type cciss_dev_type = { |
| 581 | .name = "cciss_device", | 692 | .name = "cciss_device", |
| 582 | .groups = cciss_dev_attr_groups, | 693 | .groups = cciss_dev_attr_groups, |
| 694 | .release = cciss_device_release, | ||
| 583 | }; | 695 | }; |
| 584 | 696 | ||
| 585 | static struct bus_type cciss_bus_type = { | 697 | static struct bus_type cciss_bus_type = { |
| 586 | .name = "cciss", | 698 | .name = "cciss", |
| 587 | }; | 699 | }; |
| 588 | 700 | ||
| 701 | /* | ||
| 702 | * cciss_hba_release is called when the reference count | ||
| 703 | * of h->dev goes to zero. | ||
| 704 | */ | ||
| 705 | static void cciss_hba_release(struct device *dev) | ||
| 706 | { | ||
| 707 | /* | ||
| 708 | * nothing to do, but need this to avoid a warning | ||
| 709 | * about not having a release handler from lib/kref.c. | ||
| 710 | */ | ||
| 711 | } | ||
| 589 | 712 | ||
| 590 | /* | 713 | /* |
| 591 | * Initialize sysfs entry for each controller. This sets up and registers | 714 | * Initialize sysfs entry for each controller. This sets up and registers |
| @@ -609,6 +732,16 @@ static int cciss_create_hba_sysfs_entry(struct ctlr_info *h) | |||
| 609 | static void cciss_destroy_hba_sysfs_entry(struct ctlr_info *h) | 732 | static void cciss_destroy_hba_sysfs_entry(struct ctlr_info *h) |
| 610 | { | 733 | { |
| 611 | device_del(&h->dev); | 734 | device_del(&h->dev); |
| 735 | put_device(&h->dev); /* final put. */ | ||
| 736 | } | ||
| 737 | |||
| 738 | /* cciss_device_release is called when the reference count | ||
| 739 | * of h->drv[x]dev goes to zero. | ||
| 740 | */ | ||
| 741 | static void cciss_device_release(struct device *dev) | ||
| 742 | { | ||
| 743 | drive_info_struct *drv = to_drv(dev); | ||
| 744 | kfree(drv); | ||
| 612 | } | 745 | } |
| 613 | 746 | ||
| 614 | /* | 747 | /* |
| @@ -617,24 +750,39 @@ static void cciss_destroy_hba_sysfs_entry(struct ctlr_info *h) | |||
| 617 | * /sys/bus/pci/devices/<dev/ccis#/. We also create a link from | 750 | * /sys/bus/pci/devices/<dev/ccis#/. We also create a link from |
| 618 | * /sys/block/cciss!c#d# to this entry. | 751 | * /sys/block/cciss!c#d# to this entry. |
| 619 | */ | 752 | */ |
| 620 | static int cciss_create_ld_sysfs_entry(struct ctlr_info *h, | 753 | static long cciss_create_ld_sysfs_entry(struct ctlr_info *h, |
| 621 | drive_info_struct *drv, | ||
| 622 | int drv_index) | 754 | int drv_index) |
| 623 | { | 755 | { |
| 624 | device_initialize(&drv->dev); | 756 | struct device *dev; |
| 625 | drv->dev.type = &cciss_dev_type; | 757 | |
| 626 | drv->dev.bus = &cciss_bus_type; | 758 | if (h->drv[drv_index]->device_initialized) |
| 627 | dev_set_name(&drv->dev, "c%dd%d", h->ctlr, drv_index); | 759 | return 0; |
| 628 | drv->dev.parent = &h->dev; | 760 | |
| 629 | return device_add(&drv->dev); | 761 | dev = &h->drv[drv_index]->dev; |
| 762 | device_initialize(dev); | ||
| 763 | dev->type = &cciss_dev_type; | ||
| 764 | dev->bus = &cciss_bus_type; | ||
| 765 | dev_set_name(dev, "c%dd%d", h->ctlr, drv_index); | ||
| 766 | dev->parent = &h->dev; | ||
| 767 | h->drv[drv_index]->device_initialized = 1; | ||
| 768 | return device_add(dev); | ||
| 630 | } | 769 | } |
| 631 | 770 | ||
| 632 | /* | 771 | /* |
| 633 | * Remove sysfs entries for a logical drive. | 772 | * Remove sysfs entries for a logical drive. |
| 634 | */ | 773 | */ |
| 635 | static void cciss_destroy_ld_sysfs_entry(drive_info_struct *drv) | 774 | static void cciss_destroy_ld_sysfs_entry(struct ctlr_info *h, int drv_index, |
| 775 | int ctlr_exiting) | ||
| 636 | { | 776 | { |
| 637 | device_del(&drv->dev); | 777 | struct device *dev = &h->drv[drv_index]->dev; |
| 778 | |||
| 779 | /* special case for c*d0, we only destroy it on controller exit */ | ||
| 780 | if (drv_index == 0 && !ctlr_exiting) | ||
| 781 | return; | ||
| 782 | |||
| 783 | device_del(dev); | ||
| 784 | put_device(dev); /* the "final" put. */ | ||
| 785 | h->drv[drv_index] = NULL; | ||
| 638 | } | 786 | } |
| 639 | 787 | ||
| 640 | /* | 788 | /* |
| @@ -751,7 +899,7 @@ static int cciss_open(struct block_device *bdev, fmode_t mode) | |||
| 751 | printk(KERN_DEBUG "cciss_open %s\n", bdev->bd_disk->disk_name); | 899 | printk(KERN_DEBUG "cciss_open %s\n", bdev->bd_disk->disk_name); |
| 752 | #endif /* CCISS_DEBUG */ | 900 | #endif /* CCISS_DEBUG */ |
| 753 | 901 | ||
| 754 | if (host->busy_initializing || drv->busy_configuring) | 902 | if (drv->busy_configuring) |
| 755 | return -EBUSY; | 903 | return -EBUSY; |
| 756 | /* | 904 | /* |
| 757 | * Root is allowed to open raw volume zero even if it's not configured | 905 | * Root is allowed to open raw volume zero even if it's not configured |
| @@ -767,7 +915,8 @@ static int cciss_open(struct block_device *bdev, fmode_t mode) | |||
| 767 | if (MINOR(bdev->bd_dev) & 0x0f) { | 915 | if (MINOR(bdev->bd_dev) & 0x0f) { |
| 768 | return -ENXIO; | 916 | return -ENXIO; |
| 769 | /* if it is, make sure we have a LUN ID */ | 917 | /* if it is, make sure we have a LUN ID */ |
| 770 | } else if (drv->LunID == 0) { | 918 | } else if (memcmp(drv->LunID, CTLR_LUNID, |
| 919 | sizeof(drv->LunID))) { | ||
| 771 | return -ENXIO; | 920 | return -ENXIO; |
| 772 | } | 921 | } |
| 773 | } | 922 | } |
| @@ -1132,12 +1281,13 @@ static int cciss_ioctl(struct block_device *bdev, fmode_t mode, | |||
| 1132 | case CCISS_DEREGDISK: | 1281 | case CCISS_DEREGDISK: |
| 1133 | case CCISS_REGNEWD: | 1282 | case CCISS_REGNEWD: |
| 1134 | case CCISS_REVALIDVOLS: | 1283 | case CCISS_REVALIDVOLS: |
| 1135 | return rebuild_lun_table(host, 0); | 1284 | return rebuild_lun_table(host, 0, 1); |
| 1136 | 1285 | ||
| 1137 | case CCISS_GETLUNINFO:{ | 1286 | case CCISS_GETLUNINFO:{ |
| 1138 | LogvolInfo_struct luninfo; | 1287 | LogvolInfo_struct luninfo; |
| 1139 | 1288 | ||
| 1140 | luninfo.LunID = drv->LunID; | 1289 | memcpy(&luninfo.LunID, drv->LunID, |
| 1290 | sizeof(luninfo.LunID)); | ||
| 1141 | luninfo.num_opens = drv->usage_count; | 1291 | luninfo.num_opens = drv->usage_count; |
| 1142 | luninfo.num_parts = 0; | 1292 | luninfo.num_parts = 0; |
| 1143 | if (copy_to_user(argp, &luninfo, | 1293 | if (copy_to_user(argp, &luninfo, |
| @@ -1475,7 +1625,10 @@ static void cciss_check_queues(ctlr_info_t *h) | |||
| 1475 | /* make sure the disk has been added and the drive is real | 1625 | /* make sure the disk has been added and the drive is real |
| 1476 | * because this can be called from the middle of init_one. | 1626 | * because this can be called from the middle of init_one. |
| 1477 | */ | 1627 | */ |
| 1478 | if (!(h->drv[curr_queue].queue) || !(h->drv[curr_queue].heads)) | 1628 | if (!h->drv[curr_queue]) |
| 1629 | continue; | ||
| 1630 | if (!(h->drv[curr_queue]->queue) || | ||
| 1631 | !(h->drv[curr_queue]->heads)) | ||
| 1479 | continue; | 1632 | continue; |
| 1480 | blk_start_queue(h->gendisk[curr_queue]->queue); | 1633 | blk_start_queue(h->gendisk[curr_queue]->queue); |
| 1481 | 1634 | ||
| @@ -1532,13 +1685,11 @@ static void cciss_softirq_done(struct request *rq) | |||
| 1532 | spin_unlock_irqrestore(&h->lock, flags); | 1685 | spin_unlock_irqrestore(&h->lock, flags); |
| 1533 | } | 1686 | } |
| 1534 | 1687 | ||
| 1535 | static void log_unit_to_scsi3addr(ctlr_info_t *h, unsigned char scsi3addr[], | 1688 | static inline void log_unit_to_scsi3addr(ctlr_info_t *h, |
| 1536 | uint32_t log_unit) | 1689 | unsigned char scsi3addr[], uint32_t log_unit) |
| 1537 | { | 1690 | { |
| 1538 | log_unit = h->drv[log_unit].LunID & 0x03fff; | 1691 | memcpy(scsi3addr, h->drv[log_unit]->LunID, |
| 1539 | memset(&scsi3addr[4], 0, 4); | 1692 | sizeof(h->drv[log_unit]->LunID)); |
| 1540 | memcpy(&scsi3addr[0], &log_unit, 4); | ||
| 1541 | scsi3addr[3] |= 0x40; | ||
| 1542 | } | 1693 | } |
| 1543 | 1694 | ||
| 1544 | /* This function gets the SCSI vendor, model, and revision of a logical drive | 1695 | /* This function gets the SCSI vendor, model, and revision of a logical drive |
| @@ -1615,16 +1766,23 @@ static void cciss_get_serial_no(int ctlr, int logvol, int withirq, | |||
| 1615 | return; | 1766 | return; |
| 1616 | } | 1767 | } |
| 1617 | 1768 | ||
| 1618 | static void cciss_add_disk(ctlr_info_t *h, struct gendisk *disk, | 1769 | /* |
| 1770 | * cciss_add_disk sets up the block device queue for a logical drive | ||
| 1771 | */ | ||
| 1772 | static int cciss_add_disk(ctlr_info_t *h, struct gendisk *disk, | ||
| 1619 | int drv_index) | 1773 | int drv_index) |
| 1620 | { | 1774 | { |
| 1621 | disk->queue = blk_init_queue(do_cciss_request, &h->lock); | 1775 | disk->queue = blk_init_queue(do_cciss_request, &h->lock); |
| 1776 | if (!disk->queue) | ||
| 1777 | goto init_queue_failure; | ||
| 1622 | sprintf(disk->disk_name, "cciss/c%dd%d", h->ctlr, drv_index); | 1778 | sprintf(disk->disk_name, "cciss/c%dd%d", h->ctlr, drv_index); |
| 1623 | disk->major = h->major; | 1779 | disk->major = h->major; |
| 1624 | disk->first_minor = drv_index << NWD_SHIFT; | 1780 | disk->first_minor = drv_index << NWD_SHIFT; |
| 1625 | disk->fops = &cciss_fops; | 1781 | disk->fops = &cciss_fops; |
| 1626 | disk->private_data = &h->drv[drv_index]; | 1782 | if (cciss_create_ld_sysfs_entry(h, drv_index)) |
| 1627 | disk->driverfs_dev = &h->drv[drv_index].dev; | 1783 | goto cleanup_queue; |
| 1784 | disk->private_data = h->drv[drv_index]; | ||
| 1785 | disk->driverfs_dev = &h->drv[drv_index]->dev; | ||
| 1628 | 1786 | ||
| 1629 | /* Set up queue information */ | 1787 | /* Set up queue information */ |
| 1630 | blk_queue_bounce_limit(disk->queue, h->pdev->dma_mask); | 1788 | blk_queue_bounce_limit(disk->queue, h->pdev->dma_mask); |
| @@ -1642,14 +1800,21 @@ static void cciss_add_disk(ctlr_info_t *h, struct gendisk *disk, | |||
| 1642 | disk->queue->queuedata = h; | 1800 | disk->queue->queuedata = h; |
| 1643 | 1801 | ||
| 1644 | blk_queue_logical_block_size(disk->queue, | 1802 | blk_queue_logical_block_size(disk->queue, |
| 1645 | h->drv[drv_index].block_size); | 1803 | h->drv[drv_index]->block_size); |
| 1646 | 1804 | ||
| 1647 | /* Make sure all queue data is written out before */ | 1805 | /* Make sure all queue data is written out before */ |
| 1648 | /* setting h->drv[drv_index].queue, as setting this */ | 1806 | /* setting h->drv[drv_index]->queue, as setting this */ |
| 1649 | /* allows the interrupt handler to start the queue */ | 1807 | /* allows the interrupt handler to start the queue */ |
| 1650 | wmb(); | 1808 | wmb(); |
| 1651 | h->drv[drv_index].queue = disk->queue; | 1809 | h->drv[drv_index]->queue = disk->queue; |
| 1652 | add_disk(disk); | 1810 | add_disk(disk); |
| 1811 | return 0; | ||
| 1812 | |||
| 1813 | cleanup_queue: | ||
| 1814 | blk_cleanup_queue(disk->queue); | ||
| 1815 | disk->queue = NULL; | ||
| 1816 | init_queue_failure: | ||
| 1817 | return -1; | ||
| 1653 | } | 1818 | } |
| 1654 | 1819 | ||
| 1655 | /* This function will check the usage_count of the drive to be updated/added. | 1820 | /* This function will check the usage_count of the drive to be updated/added. |
| @@ -1662,7 +1827,8 @@ static void cciss_add_disk(ctlr_info_t *h, struct gendisk *disk, | |||
| 1662 | * is also the controller node. Any changes to disk 0 will show up on | 1827 | * is also the controller node. Any changes to disk 0 will show up on |
| 1663 | * the next reboot. | 1828 | * the next reboot. |
| 1664 | */ | 1829 | */ |
| 1665 | static void cciss_update_drive_info(int ctlr, int drv_index, int first_time) | 1830 | static void cciss_update_drive_info(int ctlr, int drv_index, int first_time, |
| 1831 | int via_ioctl) | ||
| 1666 | { | 1832 | { |
| 1667 | ctlr_info_t *h = hba[ctlr]; | 1833 | ctlr_info_t *h = hba[ctlr]; |
| 1668 | struct gendisk *disk; | 1834 | struct gendisk *disk; |
| @@ -1672,21 +1838,13 @@ static void cciss_update_drive_info(int ctlr, int drv_index, int first_time) | |||
| 1672 | unsigned long flags = 0; | 1838 | unsigned long flags = 0; |
| 1673 | int ret = 0; | 1839 | int ret = 0; |
| 1674 | drive_info_struct *drvinfo; | 1840 | drive_info_struct *drvinfo; |
| 1675 | int was_only_controller_node; | ||
| 1676 | 1841 | ||
| 1677 | /* Get information about the disk and modify the driver structure */ | 1842 | /* Get information about the disk and modify the driver structure */ |
| 1678 | inq_buff = kmalloc(sizeof(InquiryData_struct), GFP_KERNEL); | 1843 | inq_buff = kmalloc(sizeof(InquiryData_struct), GFP_KERNEL); |
| 1679 | drvinfo = kmalloc(sizeof(*drvinfo), GFP_KERNEL); | 1844 | drvinfo = kzalloc(sizeof(*drvinfo), GFP_KERNEL); |
| 1680 | if (inq_buff == NULL || drvinfo == NULL) | 1845 | if (inq_buff == NULL || drvinfo == NULL) |
| 1681 | goto mem_msg; | 1846 | goto mem_msg; |
| 1682 | 1847 | ||
| 1683 | /* See if we're trying to update the "controller node" | ||
| 1684 | * this will happen the when the first logical drive gets | ||
| 1685 | * created by ACU. | ||
| 1686 | */ | ||
| 1687 | was_only_controller_node = (drv_index == 0 && | ||
| 1688 | h->drv[0].raid_level == -1); | ||
| 1689 | |||
| 1690 | /* testing to see if 16-byte CDBs are already being used */ | 1848 | /* testing to see if 16-byte CDBs are already being used */ |
| 1691 | if (h->cciss_read == CCISS_READ_16) { | 1849 | if (h->cciss_read == CCISS_READ_16) { |
| 1692 | cciss_read_capacity_16(h->ctlr, drv_index, 1, | 1850 | cciss_read_capacity_16(h->ctlr, drv_index, 1, |
| @@ -1719,16 +1877,19 @@ static void cciss_update_drive_info(int ctlr, int drv_index, int first_time) | |||
| 1719 | drvinfo->model, drvinfo->rev); | 1877 | drvinfo->model, drvinfo->rev); |
| 1720 | cciss_get_serial_no(ctlr, drv_index, 1, drvinfo->serial_no, | 1878 | cciss_get_serial_no(ctlr, drv_index, 1, drvinfo->serial_no, |
| 1721 | sizeof(drvinfo->serial_no)); | 1879 | sizeof(drvinfo->serial_no)); |
| 1880 | /* Save the lunid in case we deregister the disk, below. */ | ||
| 1881 | memcpy(drvinfo->LunID, h->drv[drv_index]->LunID, | ||
| 1882 | sizeof(drvinfo->LunID)); | ||
| 1722 | 1883 | ||
| 1723 | /* Is it the same disk we already know, and nothing's changed? */ | 1884 | /* Is it the same disk we already know, and nothing's changed? */ |
| 1724 | if (h->drv[drv_index].raid_level != -1 && | 1885 | if (h->drv[drv_index]->raid_level != -1 && |
| 1725 | ((memcmp(drvinfo->serial_no, | 1886 | ((memcmp(drvinfo->serial_no, |
| 1726 | h->drv[drv_index].serial_no, 16) == 0) && | 1887 | h->drv[drv_index]->serial_no, 16) == 0) && |
| 1727 | drvinfo->block_size == h->drv[drv_index].block_size && | 1888 | drvinfo->block_size == h->drv[drv_index]->block_size && |
| 1728 | drvinfo->nr_blocks == h->drv[drv_index].nr_blocks && | 1889 | drvinfo->nr_blocks == h->drv[drv_index]->nr_blocks && |
| 1729 | drvinfo->heads == h->drv[drv_index].heads && | 1890 | drvinfo->heads == h->drv[drv_index]->heads && |
| 1730 | drvinfo->sectors == h->drv[drv_index].sectors && | 1891 | drvinfo->sectors == h->drv[drv_index]->sectors && |
| 1731 | drvinfo->cylinders == h->drv[drv_index].cylinders)) | 1892 | drvinfo->cylinders == h->drv[drv_index]->cylinders)) |
| 1732 | /* The disk is unchanged, nothing to update */ | 1893 | /* The disk is unchanged, nothing to update */ |
| 1733 | goto freeret; | 1894 | goto freeret; |
| 1734 | 1895 | ||
| @@ -1738,18 +1899,17 @@ static void cciss_update_drive_info(int ctlr, int drv_index, int first_time) | |||
| 1738 | * If the disk already exists then deregister it before proceeding | 1899 | * If the disk already exists then deregister it before proceeding |
| 1739 | * (unless it's the first disk (for the controller node). | 1900 | * (unless it's the first disk (for the controller node). |
| 1740 | */ | 1901 | */ |
| 1741 | if (h->drv[drv_index].raid_level != -1 && drv_index != 0) { | 1902 | if (h->drv[drv_index]->raid_level != -1 && drv_index != 0) { |
| 1742 | printk(KERN_WARNING "disk %d has changed.\n", drv_index); | 1903 | printk(KERN_WARNING "disk %d has changed.\n", drv_index); |
| 1743 | spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags); | 1904 | spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags); |
| 1744 | h->drv[drv_index].busy_configuring = 1; | 1905 | h->drv[drv_index]->busy_configuring = 1; |
| 1745 | spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags); | 1906 | spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags); |
| 1746 | 1907 | ||
| 1747 | /* deregister_disk sets h->drv[drv_index].queue = NULL | 1908 | /* deregister_disk sets h->drv[drv_index]->queue = NULL |
| 1748 | * which keeps the interrupt handler from starting | 1909 | * which keeps the interrupt handler from starting |
| 1749 | * the queue. | 1910 | * the queue. |
| 1750 | */ | 1911 | */ |
| 1751 | ret = deregister_disk(h, drv_index, 0); | 1912 | ret = deregister_disk(h, drv_index, 0, via_ioctl); |
| 1752 | h->drv[drv_index].busy_configuring = 0; | ||
| 1753 | } | 1913 | } |
| 1754 | 1914 | ||
| 1755 | /* If the disk is in use return */ | 1915 | /* If the disk is in use return */ |
| @@ -1757,22 +1917,31 @@ static void cciss_update_drive_info(int ctlr, int drv_index, int first_time) | |||
| 1757 | goto freeret; | 1917 | goto freeret; |
| 1758 | 1918 | ||
| 1759 | /* Save the new information from cciss_geometry_inquiry | 1919 | /* Save the new information from cciss_geometry_inquiry |
| 1760 | * and serial number inquiry. | 1920 | * and serial number inquiry. If the disk was deregistered |
| 1921 | * above, then h->drv[drv_index] will be NULL. | ||
| 1761 | */ | 1922 | */ |
| 1762 | h->drv[drv_index].block_size = drvinfo->block_size; | 1923 | if (h->drv[drv_index] == NULL) { |
| 1763 | h->drv[drv_index].nr_blocks = drvinfo->nr_blocks; | 1924 | drvinfo->device_initialized = 0; |
| 1764 | h->drv[drv_index].heads = drvinfo->heads; | 1925 | h->drv[drv_index] = drvinfo; |
| 1765 | h->drv[drv_index].sectors = drvinfo->sectors; | 1926 | drvinfo = NULL; /* so it won't be freed below. */ |
| 1766 | h->drv[drv_index].cylinders = drvinfo->cylinders; | 1927 | } else { |
| 1767 | h->drv[drv_index].raid_level = drvinfo->raid_level; | 1928 | /* special case for cxd0 */ |
| 1768 | memcpy(h->drv[drv_index].serial_no, drvinfo->serial_no, 16); | 1929 | h->drv[drv_index]->block_size = drvinfo->block_size; |
| 1769 | memcpy(h->drv[drv_index].vendor, drvinfo->vendor, VENDOR_LEN + 1); | 1930 | h->drv[drv_index]->nr_blocks = drvinfo->nr_blocks; |
| 1770 | memcpy(h->drv[drv_index].model, drvinfo->model, MODEL_LEN + 1); | 1931 | h->drv[drv_index]->heads = drvinfo->heads; |
| 1771 | memcpy(h->drv[drv_index].rev, drvinfo->rev, REV_LEN + 1); | 1932 | h->drv[drv_index]->sectors = drvinfo->sectors; |
| 1933 | h->drv[drv_index]->cylinders = drvinfo->cylinders; | ||
| 1934 | h->drv[drv_index]->raid_level = drvinfo->raid_level; | ||
| 1935 | memcpy(h->drv[drv_index]->serial_no, drvinfo->serial_no, 16); | ||
| 1936 | memcpy(h->drv[drv_index]->vendor, drvinfo->vendor, | ||
| 1937 | VENDOR_LEN + 1); | ||
| 1938 | memcpy(h->drv[drv_index]->model, drvinfo->model, MODEL_LEN + 1); | ||
| 1939 | memcpy(h->drv[drv_index]->rev, drvinfo->rev, REV_LEN + 1); | ||
| 1940 | } | ||
| 1772 | 1941 | ||
| 1773 | ++h->num_luns; | 1942 | ++h->num_luns; |
| 1774 | disk = h->gendisk[drv_index]; | 1943 | disk = h->gendisk[drv_index]; |
| 1775 | set_capacity(disk, h->drv[drv_index].nr_blocks); | 1944 | set_capacity(disk, h->drv[drv_index]->nr_blocks); |
| 1776 | 1945 | ||
| 1777 | /* If it's not disk 0 (drv_index != 0) | 1946 | /* If it's not disk 0 (drv_index != 0) |
| 1778 | * or if it was disk 0, but there was previously | 1947 | * or if it was disk 0, but there was previously |
| @@ -1780,8 +1949,15 @@ static void cciss_update_drive_info(int ctlr, int drv_index, int first_time) | |||
| 1780 | * (raid_leve == -1) then we want to update the | 1949 | * (raid_leve == -1) then we want to update the |
| 1781 | * logical drive's information. | 1950 | * logical drive's information. |
| 1782 | */ | 1951 | */ |
| 1783 | if (drv_index || first_time) | 1952 | if (drv_index || first_time) { |
| 1784 | cciss_add_disk(h, disk, drv_index); | 1953 | if (cciss_add_disk(h, disk, drv_index) != 0) { |
| 1954 | cciss_free_gendisk(h, drv_index); | ||
| 1955 | cciss_free_drive_info(h, drv_index); | ||
| 1956 | printk(KERN_WARNING "cciss:%d could not update " | ||
| 1957 | "disk %d\n", h->ctlr, drv_index); | ||
| 1958 | --h->num_luns; | ||
| 1959 | } | ||
| 1960 | } | ||
| 1785 | 1961 | ||
| 1786 | freeret: | 1962 | freeret: |
| 1787 | kfree(inq_buff); | 1963 | kfree(inq_buff); |
| @@ -1793,28 +1969,70 @@ mem_msg: | |||
| 1793 | } | 1969 | } |
| 1794 | 1970 | ||
| 1795 | /* This function will find the first index of the controllers drive array | 1971 | /* This function will find the first index of the controllers drive array |
| 1796 | * that has a -1 for the raid_level and will return that index. This is | 1972 | * that has a null drv pointer and allocate the drive info struct and |
| 1797 | * where new drives will be added. If the index to be returned is greater | 1973 | * will return that index This is where new drives will be added. |
| 1798 | * than the highest_lun index for the controller then highest_lun is set | 1974 | * If the index to be returned is greater than the highest_lun index for |
| 1799 | * to this new index. If there are no available indexes then -1 is returned. | 1975 | * the controller then highest_lun is set * to this new index. |
| 1800 | * "controller_node" is used to know if this is a real logical drive, or just | 1976 | * If there are no available indexes or if tha allocation fails, then -1 |
| 1801 | * the controller node, which determines if this counts towards highest_lun. | 1977 | * is returned. * "controller_node" is used to know if this is a real |
| 1978 | * logical drive, or just the controller node, which determines if this | ||
| 1979 | * counts towards highest_lun. | ||
| 1802 | */ | 1980 | */ |
| 1803 | static int cciss_find_free_drive_index(int ctlr, int controller_node) | 1981 | static int cciss_alloc_drive_info(ctlr_info_t *h, int controller_node) |
| 1804 | { | 1982 | { |
| 1805 | int i; | 1983 | int i; |
| 1984 | drive_info_struct *drv; | ||
| 1806 | 1985 | ||
| 1986 | /* Search for an empty slot for our drive info */ | ||
| 1807 | for (i = 0; i < CISS_MAX_LUN; i++) { | 1987 | for (i = 0; i < CISS_MAX_LUN; i++) { |
| 1808 | if (hba[ctlr]->drv[i].raid_level == -1) { | 1988 | |
| 1809 | if (i > hba[ctlr]->highest_lun) | 1989 | /* if not cxd0 case, and it's occupied, skip it. */ |
| 1810 | if (!controller_node) | 1990 | if (h->drv[i] && i != 0) |
| 1811 | hba[ctlr]->highest_lun = i; | 1991 | continue; |
| 1992 | /* | ||
| 1993 | * If it's cxd0 case, and drv is alloc'ed already, and a | ||
| 1994 | * disk is configured there, skip it. | ||
| 1995 | */ | ||
| 1996 | if (i == 0 && h->drv[i] && h->drv[i]->raid_level != -1) | ||
| 1997 | continue; | ||
| 1998 | |||
| 1999 | /* | ||
| 2000 | * We've found an empty slot. Update highest_lun | ||
| 2001 | * provided this isn't just the fake cxd0 controller node. | ||
| 2002 | */ | ||
| 2003 | if (i > h->highest_lun && !controller_node) | ||
| 2004 | h->highest_lun = i; | ||
| 2005 | |||
| 2006 | /* If adding a real disk at cxd0, and it's already alloc'ed */ | ||
| 2007 | if (i == 0 && h->drv[i] != NULL) | ||
| 1812 | return i; | 2008 | return i; |
| 1813 | } | 2009 | |
| 2010 | /* | ||
| 2011 | * Found an empty slot, not already alloc'ed. Allocate it. | ||
| 2012 | * Mark it with raid_level == -1, so we know it's new later on. | ||
| 2013 | */ | ||
| 2014 | drv = kzalloc(sizeof(*drv), GFP_KERNEL); | ||
| 2015 | if (!drv) | ||
| 2016 | return -1; | ||
| 2017 | drv->raid_level = -1; /* so we know it's new */ | ||
| 2018 | h->drv[i] = drv; | ||
| 2019 | return i; | ||
| 1814 | } | 2020 | } |
| 1815 | return -1; | 2021 | return -1; |
| 1816 | } | 2022 | } |
| 1817 | 2023 | ||
| 2024 | static void cciss_free_drive_info(ctlr_info_t *h, int drv_index) | ||
| 2025 | { | ||
| 2026 | kfree(h->drv[drv_index]); | ||
| 2027 | h->drv[drv_index] = NULL; | ||
| 2028 | } | ||
| 2029 | |||
| 2030 | static void cciss_free_gendisk(ctlr_info_t *h, int drv_index) | ||
| 2031 | { | ||
| 2032 | put_disk(h->gendisk[drv_index]); | ||
| 2033 | h->gendisk[drv_index] = NULL; | ||
| 2034 | } | ||
| 2035 | |||
| 1818 | /* cciss_add_gendisk finds a free hba[]->drv structure | 2036 | /* cciss_add_gendisk finds a free hba[]->drv structure |
| 1819 | * and allocates a gendisk if needed, and sets the lunid | 2037 | * and allocates a gendisk if needed, and sets the lunid |
| 1820 | * in the drvinfo structure. It returns the index into | 2038 | * in the drvinfo structure. It returns the index into |
| @@ -1824,13 +2042,15 @@ static int cciss_find_free_drive_index(int ctlr, int controller_node) | |||
| 1824 | * a means to talk to the controller in case no logical | 2042 | * a means to talk to the controller in case no logical |
| 1825 | * drives have yet been configured. | 2043 | * drives have yet been configured. |
| 1826 | */ | 2044 | */ |
| 1827 | static int cciss_add_gendisk(ctlr_info_t *h, __u32 lunid, int controller_node) | 2045 | static int cciss_add_gendisk(ctlr_info_t *h, unsigned char lunid[], |
| 2046 | int controller_node) | ||
| 1828 | { | 2047 | { |
| 1829 | int drv_index; | 2048 | int drv_index; |
| 1830 | 2049 | ||
| 1831 | drv_index = cciss_find_free_drive_index(h->ctlr, controller_node); | 2050 | drv_index = cciss_alloc_drive_info(h, controller_node); |
| 1832 | if (drv_index == -1) | 2051 | if (drv_index == -1) |
| 1833 | return -1; | 2052 | return -1; |
| 2053 | |||
| 1834 | /*Check if the gendisk needs to be allocated */ | 2054 | /*Check if the gendisk needs to be allocated */ |
| 1835 | if (!h->gendisk[drv_index]) { | 2055 | if (!h->gendisk[drv_index]) { |
| 1836 | h->gendisk[drv_index] = | 2056 | h->gendisk[drv_index] = |
| @@ -1839,23 +2059,24 @@ static int cciss_add_gendisk(ctlr_info_t *h, __u32 lunid, int controller_node) | |||
| 1839 | printk(KERN_ERR "cciss%d: could not " | 2059 | printk(KERN_ERR "cciss%d: could not " |
| 1840 | "allocate a new disk %d\n", | 2060 | "allocate a new disk %d\n", |
| 1841 | h->ctlr, drv_index); | 2061 | h->ctlr, drv_index); |
| 1842 | return -1; | 2062 | goto err_free_drive_info; |
| 1843 | } | 2063 | } |
| 1844 | } | 2064 | } |
| 1845 | h->drv[drv_index].LunID = lunid; | 2065 | memcpy(h->drv[drv_index]->LunID, lunid, |
| 1846 | if (cciss_create_ld_sysfs_entry(h, &h->drv[drv_index], drv_index)) | 2066 | sizeof(h->drv[drv_index]->LunID)); |
| 2067 | if (cciss_create_ld_sysfs_entry(h, drv_index)) | ||
| 1847 | goto err_free_disk; | 2068 | goto err_free_disk; |
| 1848 | |||
| 1849 | /* Don't need to mark this busy because nobody */ | 2069 | /* Don't need to mark this busy because nobody */ |
| 1850 | /* else knows about this disk yet to contend */ | 2070 | /* else knows about this disk yet to contend */ |
| 1851 | /* for access to it. */ | 2071 | /* for access to it. */ |
| 1852 | h->drv[drv_index].busy_configuring = 0; | 2072 | h->drv[drv_index]->busy_configuring = 0; |
| 1853 | wmb(); | 2073 | wmb(); |
| 1854 | return drv_index; | 2074 | return drv_index; |
| 1855 | 2075 | ||
| 1856 | err_free_disk: | 2076 | err_free_disk: |
| 1857 | put_disk(h->gendisk[drv_index]); | 2077 | cciss_free_gendisk(h, drv_index); |
| 1858 | h->gendisk[drv_index] = NULL; | 2078 | err_free_drive_info: |
| 2079 | cciss_free_drive_info(h, drv_index); | ||
| 1859 | return -1; | 2080 | return -1; |
| 1860 | } | 2081 | } |
| 1861 | 2082 | ||
| @@ -1872,21 +2093,25 @@ static void cciss_add_controller_node(ctlr_info_t *h) | |||
| 1872 | if (h->gendisk[0] != NULL) /* already did this? Then bail. */ | 2093 | if (h->gendisk[0] != NULL) /* already did this? Then bail. */ |
| 1873 | return; | 2094 | return; |
| 1874 | 2095 | ||
| 1875 | drv_index = cciss_add_gendisk(h, 0, 1); | 2096 | drv_index = cciss_add_gendisk(h, CTLR_LUNID, 1); |
| 1876 | if (drv_index == -1) { | 2097 | if (drv_index == -1) |
| 1877 | printk(KERN_WARNING "cciss%d: could not " | 2098 | goto error; |
| 1878 | "add disk 0.\n", h->ctlr); | 2099 | h->drv[drv_index]->block_size = 512; |
| 1879 | return; | 2100 | h->drv[drv_index]->nr_blocks = 0; |
| 1880 | } | 2101 | h->drv[drv_index]->heads = 0; |
| 1881 | h->drv[drv_index].block_size = 512; | 2102 | h->drv[drv_index]->sectors = 0; |
| 1882 | h->drv[drv_index].nr_blocks = 0; | 2103 | h->drv[drv_index]->cylinders = 0; |
| 1883 | h->drv[drv_index].heads = 0; | 2104 | h->drv[drv_index]->raid_level = -1; |
| 1884 | h->drv[drv_index].sectors = 0; | 2105 | memset(h->drv[drv_index]->serial_no, 0, 16); |
| 1885 | h->drv[drv_index].cylinders = 0; | ||
| 1886 | h->drv[drv_index].raid_level = -1; | ||
| 1887 | memset(h->drv[drv_index].serial_no, 0, 16); | ||
| 1888 | disk = h->gendisk[drv_index]; | 2106 | disk = h->gendisk[drv_index]; |
| 1889 | cciss_add_disk(h, disk, drv_index); | 2107 | if (cciss_add_disk(h, disk, drv_index) == 0) |
| 2108 | return; | ||
| 2109 | cciss_free_gendisk(h, drv_index); | ||
| 2110 | cciss_free_drive_info(h, drv_index); | ||
| 2111 | error: | ||
| 2112 | printk(KERN_WARNING "cciss%d: could not " | ||
| 2113 | "add disk 0.\n", h->ctlr); | ||
| 2114 | return; | ||
| 1890 | } | 2115 | } |
| 1891 | 2116 | ||
| 1892 | /* This function will add and remove logical drives from the Logical | 2117 | /* This function will add and remove logical drives from the Logical |
| @@ -1897,7 +2122,8 @@ static void cciss_add_controller_node(ctlr_info_t *h) | |||
| 1897 | * INPUT | 2122 | * INPUT |
| 1898 | * h = The controller to perform the operations on | 2123 | * h = The controller to perform the operations on |
| 1899 | */ | 2124 | */ |
| 1900 | static int rebuild_lun_table(ctlr_info_t *h, int first_time) | 2125 | static int rebuild_lun_table(ctlr_info_t *h, int first_time, |
| 2126 | int via_ioctl) | ||
| 1901 | { | 2127 | { |
| 1902 | int ctlr = h->ctlr; | 2128 | int ctlr = h->ctlr; |
| 1903 | int num_luns; | 2129 | int num_luns; |
| @@ -1907,7 +2133,7 @@ static int rebuild_lun_table(ctlr_info_t *h, int first_time) | |||
| 1907 | int i; | 2133 | int i; |
| 1908 | int drv_found; | 2134 | int drv_found; |
| 1909 | int drv_index = 0; | 2135 | int drv_index = 0; |
| 1910 | __u32 lunid = 0; | 2136 | unsigned char lunid[8] = CTLR_LUNID; |
| 1911 | unsigned long flags; | 2137 | unsigned long flags; |
| 1912 | 2138 | ||
| 1913 | if (!capable(CAP_SYS_RAWIO)) | 2139 | if (!capable(CAP_SYS_RAWIO)) |
| @@ -1960,13 +2186,13 @@ static int rebuild_lun_table(ctlr_info_t *h, int first_time) | |||
| 1960 | drv_found = 0; | 2186 | drv_found = 0; |
| 1961 | 2187 | ||
| 1962 | /* skip holes in the array from already deleted drives */ | 2188 | /* skip holes in the array from already deleted drives */ |
| 1963 | if (h->drv[i].raid_level == -1) | 2189 | if (h->drv[i] == NULL) |
| 1964 | continue; | 2190 | continue; |
| 1965 | 2191 | ||
| 1966 | for (j = 0; j < num_luns; j++) { | 2192 | for (j = 0; j < num_luns; j++) { |
| 1967 | memcpy(&lunid, &ld_buff->LUN[j][0], 4); | 2193 | memcpy(lunid, &ld_buff->LUN[j][0], sizeof(lunid)); |
| 1968 | lunid = le32_to_cpu(lunid); | 2194 | if (memcmp(h->drv[i]->LunID, lunid, |
| 1969 | if (h->drv[i].LunID == lunid) { | 2195 | sizeof(lunid)) == 0) { |
| 1970 | drv_found = 1; | 2196 | drv_found = 1; |
| 1971 | break; | 2197 | break; |
| 1972 | } | 2198 | } |
| @@ -1974,11 +2200,11 @@ static int rebuild_lun_table(ctlr_info_t *h, int first_time) | |||
| 1974 | if (!drv_found) { | 2200 | if (!drv_found) { |
| 1975 | /* Deregister it from the OS, it's gone. */ | 2201 | /* Deregister it from the OS, it's gone. */ |
| 1976 | spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags); | 2202 | spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags); |
| 1977 | h->drv[i].busy_configuring = 1; | 2203 | h->drv[i]->busy_configuring = 1; |
| 1978 | spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags); | 2204 | spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags); |
| 1979 | return_code = deregister_disk(h, i, 1); | 2205 | return_code = deregister_disk(h, i, 1, via_ioctl); |
| 1980 | cciss_destroy_ld_sysfs_entry(&h->drv[i]); | 2206 | if (h->drv[i] != NULL) |
| 1981 | h->drv[i].busy_configuring = 0; | 2207 | h->drv[i]->busy_configuring = 0; |
| 1982 | } | 2208 | } |
| 1983 | } | 2209 | } |
| 1984 | 2210 | ||
| @@ -1992,17 +2218,16 @@ static int rebuild_lun_table(ctlr_info_t *h, int first_time) | |||
| 1992 | 2218 | ||
| 1993 | drv_found = 0; | 2219 | drv_found = 0; |
| 1994 | 2220 | ||
| 1995 | memcpy(&lunid, &ld_buff->LUN[i][0], 4); | 2221 | memcpy(lunid, &ld_buff->LUN[i][0], sizeof(lunid)); |
| 1996 | lunid = le32_to_cpu(lunid); | ||
| 1997 | |||
| 1998 | /* Find if the LUN is already in the drive array | 2222 | /* Find if the LUN is already in the drive array |
| 1999 | * of the driver. If so then update its info | 2223 | * of the driver. If so then update its info |
| 2000 | * if not in use. If it does not exist then find | 2224 | * if not in use. If it does not exist then find |
| 2001 | * the first free index and add it. | 2225 | * the first free index and add it. |
| 2002 | */ | 2226 | */ |
| 2003 | for (j = 0; j <= h->highest_lun; j++) { | 2227 | for (j = 0; j <= h->highest_lun; j++) { |
| 2004 | if (h->drv[j].raid_level != -1 && | 2228 | if (h->drv[j] != NULL && |
| 2005 | h->drv[j].LunID == lunid) { | 2229 | memcmp(h->drv[j]->LunID, lunid, |
| 2230 | sizeof(h->drv[j]->LunID)) == 0) { | ||
| 2006 | drv_index = j; | 2231 | drv_index = j; |
| 2007 | drv_found = 1; | 2232 | drv_found = 1; |
| 2008 | break; | 2233 | break; |
| @@ -2015,7 +2240,8 @@ static int rebuild_lun_table(ctlr_info_t *h, int first_time) | |||
| 2015 | if (drv_index == -1) | 2240 | if (drv_index == -1) |
| 2016 | goto freeret; | 2241 | goto freeret; |
| 2017 | } | 2242 | } |
| 2018 | cciss_update_drive_info(ctlr, drv_index, first_time); | 2243 | cciss_update_drive_info(ctlr, drv_index, first_time, |
| 2244 | via_ioctl); | ||
| 2019 | } /* end for */ | 2245 | } /* end for */ |
| 2020 | 2246 | ||
| 2021 | freeret: | 2247 | freeret: |
| @@ -2032,6 +2258,25 @@ mem_msg: | |||
| 2032 | goto freeret; | 2258 | goto freeret; |
| 2033 | } | 2259 | } |
| 2034 | 2260 | ||
| 2261 | static void cciss_clear_drive_info(drive_info_struct *drive_info) | ||
| 2262 | { | ||
| 2263 | /* zero out the disk size info */ | ||
| 2264 | drive_info->nr_blocks = 0; | ||
| 2265 | drive_info->block_size = 0; | ||
| 2266 | drive_info->heads = 0; | ||
| 2267 | drive_info->sectors = 0; | ||
| 2268 | drive_info->cylinders = 0; | ||
| 2269 | drive_info->raid_level = -1; | ||
| 2270 | memset(drive_info->serial_no, 0, sizeof(drive_info->serial_no)); | ||
| 2271 | memset(drive_info->model, 0, sizeof(drive_info->model)); | ||
| 2272 | memset(drive_info->rev, 0, sizeof(drive_info->rev)); | ||
| 2273 | memset(drive_info->vendor, 0, sizeof(drive_info->vendor)); | ||
| 2274 | /* | ||
| 2275 | * don't clear the LUNID though, we need to remember which | ||
| 2276 | * one this one is. | ||
| 2277 | */ | ||
| 2278 | } | ||
| 2279 | |||
| 2035 | /* This function will deregister the disk and it's queue from the | 2280 | /* This function will deregister the disk and it's queue from the |
| 2036 | * kernel. It must be called with the controller lock held and the | 2281 | * kernel. It must be called with the controller lock held and the |
| 2037 | * drv structures busy_configuring flag set. It's parameters are: | 2282 | * drv structures busy_configuring flag set. It's parameters are: |
| @@ -2046,43 +2291,48 @@ mem_msg: | |||
| 2046 | * the disk in preparation for re-adding it. In this case | 2291 | * the disk in preparation for re-adding it. In this case |
| 2047 | * the highest_lun should be left unchanged and the LunID | 2292 | * the highest_lun should be left unchanged and the LunID |
| 2048 | * should not be cleared. | 2293 | * should not be cleared. |
| 2294 | * via_ioctl | ||
| 2295 | * This indicates whether we've reached this path via ioctl. | ||
| 2296 | * This affects the maximum usage count allowed for c0d0 to be messed with. | ||
| 2297 | * If this path is reached via ioctl(), then the max_usage_count will | ||
| 2298 | * be 1, as the process calling ioctl() has got to have the device open. | ||
| 2299 | * If we get here via sysfs, then the max usage count will be zero. | ||
| 2049 | */ | 2300 | */ |
| 2050 | static int deregister_disk(ctlr_info_t *h, int drv_index, | 2301 | static int deregister_disk(ctlr_info_t *h, int drv_index, |
| 2051 | int clear_all) | 2302 | int clear_all, int via_ioctl) |
| 2052 | { | 2303 | { |
| 2053 | int i; | 2304 | int i; |
| 2054 | struct gendisk *disk; | 2305 | struct gendisk *disk; |
| 2055 | drive_info_struct *drv; | 2306 | drive_info_struct *drv; |
| 2307 | int recalculate_highest_lun; | ||
| 2056 | 2308 | ||
| 2057 | if (!capable(CAP_SYS_RAWIO)) | 2309 | if (!capable(CAP_SYS_RAWIO)) |
| 2058 | return -EPERM; | 2310 | return -EPERM; |
| 2059 | 2311 | ||
| 2060 | drv = &h->drv[drv_index]; | 2312 | drv = h->drv[drv_index]; |
| 2061 | disk = h->gendisk[drv_index]; | 2313 | disk = h->gendisk[drv_index]; |
| 2062 | 2314 | ||
| 2063 | /* make sure logical volume is NOT is use */ | 2315 | /* make sure logical volume is NOT is use */ |
| 2064 | if (clear_all || (h->gendisk[0] == disk)) { | 2316 | if (clear_all || (h->gendisk[0] == disk)) { |
| 2065 | if (drv->usage_count > 1) | 2317 | if (drv->usage_count > via_ioctl) |
| 2066 | return -EBUSY; | 2318 | return -EBUSY; |
| 2067 | } else if (drv->usage_count > 0) | 2319 | } else if (drv->usage_count > 0) |
| 2068 | return -EBUSY; | 2320 | return -EBUSY; |
| 2069 | 2321 | ||
| 2322 | recalculate_highest_lun = (drv == h->drv[h->highest_lun]); | ||
| 2323 | |||
| 2070 | /* invalidate the devices and deregister the disk. If it is disk | 2324 | /* invalidate the devices and deregister the disk. If it is disk |
| 2071 | * zero do not deregister it but just zero out it's values. This | 2325 | * zero do not deregister it but just zero out it's values. This |
| 2072 | * allows us to delete disk zero but keep the controller registered. | 2326 | * allows us to delete disk zero but keep the controller registered. |
| 2073 | */ | 2327 | */ |
| 2074 | if (h->gendisk[0] != disk) { | 2328 | if (h->gendisk[0] != disk) { |
| 2075 | struct request_queue *q = disk->queue; | 2329 | struct request_queue *q = disk->queue; |
| 2076 | if (disk->flags & GENHD_FL_UP) | 2330 | if (disk->flags & GENHD_FL_UP) { |
| 2331 | cciss_destroy_ld_sysfs_entry(h, drv_index, 0); | ||
| 2077 | del_gendisk(disk); | 2332 | del_gendisk(disk); |
| 2078 | if (q) { | ||
| 2079 | blk_cleanup_queue(q); | ||
| 2080 | /* Set drv->queue to NULL so that we do not try | ||
| 2081 | * to call blk_start_queue on this queue in the | ||
| 2082 | * interrupt handler | ||
| 2083 | */ | ||
| 2084 | drv->queue = NULL; | ||
| 2085 | } | 2333 | } |
| 2334 | if (q) | ||
| 2335 | blk_cleanup_queue(q); | ||
| 2086 | /* If clear_all is set then we are deleting the logical | 2336 | /* If clear_all is set then we are deleting the logical |
| 2087 | * drive, not just refreshing its info. For drives | 2337 | * drive, not just refreshing its info. For drives |
| 2088 | * other than disk 0 we will call put_disk. We do not | 2338 | * other than disk 0 we will call put_disk. We do not |
| @@ -2105,34 +2355,20 @@ static int deregister_disk(ctlr_info_t *h, int drv_index, | |||
| 2105 | } | 2355 | } |
| 2106 | } else { | 2356 | } else { |
| 2107 | set_capacity(disk, 0); | 2357 | set_capacity(disk, 0); |
| 2358 | cciss_clear_drive_info(drv); | ||
| 2108 | } | 2359 | } |
| 2109 | 2360 | ||
| 2110 | --h->num_luns; | 2361 | --h->num_luns; |
| 2111 | /* zero out the disk size info */ | ||
| 2112 | drv->nr_blocks = 0; | ||
| 2113 | drv->block_size = 0; | ||
| 2114 | drv->heads = 0; | ||
| 2115 | drv->sectors = 0; | ||
| 2116 | drv->cylinders = 0; | ||
| 2117 | drv->raid_level = -1; /* This can be used as a flag variable to | ||
| 2118 | * indicate that this element of the drive | ||
| 2119 | * array is free. | ||
| 2120 | */ | ||
| 2121 | |||
| 2122 | if (clear_all) { | ||
| 2123 | /* check to see if it was the last disk */ | ||
| 2124 | if (drv == h->drv + h->highest_lun) { | ||
| 2125 | /* if so, find the new hightest lun */ | ||
| 2126 | int i, newhighest = -1; | ||
| 2127 | for (i = 0; i <= h->highest_lun; i++) { | ||
| 2128 | /* if the disk has size > 0, it is available */ | ||
| 2129 | if (h->drv[i].heads) | ||
| 2130 | newhighest = i; | ||
| 2131 | } | ||
| 2132 | h->highest_lun = newhighest; | ||
| 2133 | } | ||
| 2134 | 2362 | ||
| 2135 | drv->LunID = 0; | 2363 | /* if it was the last disk, find the new hightest lun */ |
| 2364 | if (clear_all && recalculate_highest_lun) { | ||
| 2365 | int i, newhighest = -1; | ||
| 2366 | for (i = 0; i <= h->highest_lun; i++) { | ||
| 2367 | /* if the disk has size > 0, it is available */ | ||
| 2368 | if (h->drv[i] && h->drv[i]->heads) | ||
| 2369 | newhighest = i; | ||
| 2370 | } | ||
| 2371 | h->highest_lun = newhighest; | ||
| 2136 | } | 2372 | } |
| 2137 | return 0; | 2373 | return 0; |
| 2138 | } | 2374 | } |
| @@ -2479,8 +2715,6 @@ static void cciss_geometry_inquiry(int ctlr, int logvol, | |||
| 2479 | } else { /* Get geometry failed */ | 2715 | } else { /* Get geometry failed */ |
| 2480 | printk(KERN_WARNING "cciss: reading geometry failed\n"); | 2716 | printk(KERN_WARNING "cciss: reading geometry failed\n"); |
| 2481 | } | 2717 | } |
| 2482 | printk(KERN_INFO " heads=%d, sectors=%d, cylinders=%d\n\n", | ||
| 2483 | drv->heads, drv->sectors, drv->cylinders); | ||
| 2484 | } | 2718 | } |
| 2485 | 2719 | ||
| 2486 | static void | 2720 | static void |
| @@ -2514,9 +2748,6 @@ cciss_read_capacity(int ctlr, int logvol, int withirq, sector_t *total_size, | |||
| 2514 | *total_size = 0; | 2748 | *total_size = 0; |
| 2515 | *block_size = BLOCK_SIZE; | 2749 | *block_size = BLOCK_SIZE; |
| 2516 | } | 2750 | } |
| 2517 | if (*total_size != 0) | ||
| 2518 | printk(KERN_INFO " blocks= %llu block_size= %d\n", | ||
| 2519 | (unsigned long long)*total_size+1, *block_size); | ||
| 2520 | kfree(buf); | 2751 | kfree(buf); |
| 2521 | } | 2752 | } |
| 2522 | 2753 | ||
| @@ -2568,7 +2799,8 @@ static int cciss_revalidate(struct gendisk *disk) | |||
| 2568 | InquiryData_struct *inq_buff = NULL; | 2799 | InquiryData_struct *inq_buff = NULL; |
| 2569 | 2800 | ||
| 2570 | for (logvol = 0; logvol < CISS_MAX_LUN; logvol++) { | 2801 | for (logvol = 0; logvol < CISS_MAX_LUN; logvol++) { |
| 2571 | if (h->drv[logvol].LunID == drv->LunID) { | 2802 | if (memcmp(h->drv[logvol]->LunID, drv->LunID, |
| 2803 | sizeof(drv->LunID)) == 0) { | ||
| 2572 | FOUND = 1; | 2804 | FOUND = 1; |
| 2573 | break; | 2805 | break; |
| 2574 | } | 2806 | } |
| @@ -3053,8 +3285,7 @@ static void do_cciss_request(struct request_queue *q) | |||
| 3053 | /* The first 2 bits are reserved for controller error reporting. */ | 3285 | /* The first 2 bits are reserved for controller error reporting. */ |
| 3054 | c->Header.Tag.lower = (c->cmdindex << 3); | 3286 | c->Header.Tag.lower = (c->cmdindex << 3); |
| 3055 | c->Header.Tag.lower |= 0x04; /* flag for direct lookup. */ | 3287 | c->Header.Tag.lower |= 0x04; /* flag for direct lookup. */ |
| 3056 | c->Header.LUN.LogDev.VolId = drv->LunID; | 3288 | memcpy(&c->Header.LUN, drv->LunID, sizeof(drv->LunID)); |
| 3057 | c->Header.LUN.LogDev.Mode = 1; | ||
| 3058 | c->Request.CDBLen = 10; // 12 byte commands not in FW yet; | 3289 | c->Request.CDBLen = 10; // 12 byte commands not in FW yet; |
| 3059 | c->Request.Type.Type = TYPE_CMD; // It is a command. | 3290 | c->Request.Type.Type = TYPE_CMD; // It is a command. |
| 3060 | c->Request.Type.Attribute = ATTR_SIMPLE; | 3291 | c->Request.Type.Attribute = ATTR_SIMPLE; |
| @@ -3232,20 +3463,121 @@ static irqreturn_t do_cciss_intr(int irq, void *dev_id) | |||
| 3232 | return IRQ_HANDLED; | 3463 | return IRQ_HANDLED; |
| 3233 | } | 3464 | } |
| 3234 | 3465 | ||
| 3466 | /** | ||
| 3467 | * add_to_scan_list() - add controller to rescan queue | ||
| 3468 | * @h: Pointer to the controller. | ||
| 3469 | * | ||
| 3470 | * Adds the controller to the rescan queue if not already on the queue. | ||
| 3471 | * | ||
| 3472 | * returns 1 if added to the queue, 0 if skipped (could be on the | ||
| 3473 | * queue already, or the controller could be initializing or shutting | ||
| 3474 | * down). | ||
| 3475 | **/ | ||
| 3476 | static int add_to_scan_list(struct ctlr_info *h) | ||
| 3477 | { | ||
| 3478 | struct ctlr_info *test_h; | ||
| 3479 | int found = 0; | ||
| 3480 | int ret = 0; | ||
| 3481 | |||
| 3482 | if (h->busy_initializing) | ||
| 3483 | return 0; | ||
| 3484 | |||
| 3485 | if (!mutex_trylock(&h->busy_shutting_down)) | ||
| 3486 | return 0; | ||
| 3487 | |||
| 3488 | mutex_lock(&scan_mutex); | ||
| 3489 | list_for_each_entry(test_h, &scan_q, scan_list) { | ||
| 3490 | if (test_h == h) { | ||
| 3491 | found = 1; | ||
| 3492 | break; | ||
| 3493 | } | ||
| 3494 | } | ||
| 3495 | if (!found && !h->busy_scanning) { | ||
| 3496 | INIT_COMPLETION(h->scan_wait); | ||
| 3497 | list_add_tail(&h->scan_list, &scan_q); | ||
| 3498 | ret = 1; | ||
| 3499 | } | ||
| 3500 | mutex_unlock(&scan_mutex); | ||
| 3501 | mutex_unlock(&h->busy_shutting_down); | ||
| 3502 | |||
| 3503 | return ret; | ||
| 3504 | } | ||
| 3505 | |||
| 3506 | /** | ||
| 3507 | * remove_from_scan_list() - remove controller from rescan queue | ||
| 3508 | * @h: Pointer to the controller. | ||
| 3509 | * | ||
| 3510 | * Removes the controller from the rescan queue if present. Blocks if | ||
| 3511 | * the controller is currently conducting a rescan. | ||
| 3512 | **/ | ||
| 3513 | static void remove_from_scan_list(struct ctlr_info *h) | ||
| 3514 | { | ||
| 3515 | struct ctlr_info *test_h, *tmp_h; | ||
| 3516 | int scanning = 0; | ||
| 3517 | |||
| 3518 | mutex_lock(&scan_mutex); | ||
| 3519 | list_for_each_entry_safe(test_h, tmp_h, &scan_q, scan_list) { | ||
| 3520 | if (test_h == h) { | ||
| 3521 | list_del(&h->scan_list); | ||
| 3522 | complete_all(&h->scan_wait); | ||
| 3523 | mutex_unlock(&scan_mutex); | ||
| 3524 | return; | ||
| 3525 | } | ||
| 3526 | } | ||
| 3527 | if (&h->busy_scanning) | ||
| 3528 | scanning = 0; | ||
| 3529 | mutex_unlock(&scan_mutex); | ||
| 3530 | |||
| 3531 | if (scanning) | ||
| 3532 | wait_for_completion(&h->scan_wait); | ||
| 3533 | } | ||
| 3534 | |||
| 3535 | /** | ||
| 3536 | * scan_thread() - kernel thread used to rescan controllers | ||
| 3537 | * @data: Ignored. | ||
| 3538 | * | ||
| 3539 | * A kernel thread used scan for drive topology changes on | ||
| 3540 | * controllers. The thread processes only one controller at a time | ||
| 3541 | * using a queue. Controllers are added to the queue using | ||
| 3542 | * add_to_scan_list() and removed from the queue either after done | ||
| 3543 | * processing or using remove_from_scan_list(). | ||
| 3544 | * | ||
| 3545 | * returns 0. | ||
| 3546 | **/ | ||
| 3235 | static int scan_thread(void *data) | 3547 | static int scan_thread(void *data) |
| 3236 | { | 3548 | { |
| 3237 | ctlr_info_t *h = data; | 3549 | struct ctlr_info *h; |
| 3238 | int rc; | ||
| 3239 | DECLARE_COMPLETION_ONSTACK(wait); | ||
| 3240 | h->rescan_wait = &wait; | ||
| 3241 | 3550 | ||
| 3242 | for (;;) { | 3551 | while (1) { |
| 3243 | rc = wait_for_completion_interruptible(&wait); | 3552 | set_current_state(TASK_INTERRUPTIBLE); |
| 3553 | schedule(); | ||
| 3244 | if (kthread_should_stop()) | 3554 | if (kthread_should_stop()) |
| 3245 | break; | 3555 | break; |
| 3246 | if (!rc) | 3556 | |
| 3247 | rebuild_lun_table(h, 0); | 3557 | while (1) { |
| 3558 | mutex_lock(&scan_mutex); | ||
| 3559 | if (list_empty(&scan_q)) { | ||
| 3560 | mutex_unlock(&scan_mutex); | ||
| 3561 | break; | ||
| 3562 | } | ||
| 3563 | |||
| 3564 | h = list_entry(scan_q.next, | ||
| 3565 | struct ctlr_info, | ||
| 3566 | scan_list); | ||
| 3567 | list_del(&h->scan_list); | ||
| 3568 | h->busy_scanning = 1; | ||
| 3569 | mutex_unlock(&scan_mutex); | ||
| 3570 | |||
| 3571 | if (h) { | ||
| 3572 | rebuild_lun_table(h, 0, 0); | ||
| 3573 | complete_all(&h->scan_wait); | ||
| 3574 | mutex_lock(&scan_mutex); | ||
| 3575 | h->busy_scanning = 0; | ||
| 3576 | mutex_unlock(&scan_mutex); | ||
| 3577 | } | ||
| 3578 | } | ||
| 3248 | } | 3579 | } |
| 3580 | |||
| 3249 | return 0; | 3581 | return 0; |
| 3250 | } | 3582 | } |
| 3251 | 3583 | ||
| @@ -3268,8 +3600,8 @@ static int check_for_unit_attention(ctlr_info_t *h, CommandList_struct *c) | |||
| 3268 | case REPORT_LUNS_CHANGED: | 3600 | case REPORT_LUNS_CHANGED: |
| 3269 | printk(KERN_WARNING "cciss%d: report LUN data " | 3601 | printk(KERN_WARNING "cciss%d: report LUN data " |
| 3270 | "changed\n", h->ctlr); | 3602 | "changed\n", h->ctlr); |
| 3271 | if (h->rescan_wait) | 3603 | add_to_scan_list(h); |
| 3272 | complete(h->rescan_wait); | 3604 | wake_up_process(cciss_scan_thread); |
| 3273 | return 1; | 3605 | return 1; |
| 3274 | break; | 3606 | break; |
| 3275 | case POWER_OR_RESET: | 3607 | case POWER_OR_RESET: |
| @@ -3489,7 +3821,7 @@ static int __devinit cciss_pci_init(ctlr_info_t *c, struct pci_dev *pdev) | |||
| 3489 | if (scratchpad == CCISS_FIRMWARE_READY) | 3821 | if (scratchpad == CCISS_FIRMWARE_READY) |
| 3490 | break; | 3822 | break; |
| 3491 | set_current_state(TASK_INTERRUPTIBLE); | 3823 | set_current_state(TASK_INTERRUPTIBLE); |
| 3492 | schedule_timeout(HZ / 10); /* wait 100ms */ | 3824 | schedule_timeout(msecs_to_jiffies(100)); /* wait 100ms */ |
| 3493 | } | 3825 | } |
| 3494 | if (scratchpad != CCISS_FIRMWARE_READY) { | 3826 | if (scratchpad != CCISS_FIRMWARE_READY) { |
| 3495 | printk(KERN_WARNING "cciss: Board not ready. Timed out.\n"); | 3827 | printk(KERN_WARNING "cciss: Board not ready. Timed out.\n"); |
| @@ -3615,7 +3947,7 @@ static int __devinit cciss_pci_init(ctlr_info_t *c, struct pci_dev *pdev) | |||
| 3615 | break; | 3947 | break; |
| 3616 | /* delay and try again */ | 3948 | /* delay and try again */ |
| 3617 | set_current_state(TASK_INTERRUPTIBLE); | 3949 | set_current_state(TASK_INTERRUPTIBLE); |
| 3618 | schedule_timeout(10); | 3950 | schedule_timeout(msecs_to_jiffies(1)); |
| 3619 | } | 3951 | } |
| 3620 | 3952 | ||
| 3621 | #ifdef CCISS_DEBUG | 3953 | #ifdef CCISS_DEBUG |
| @@ -3669,15 +4001,16 @@ Enomem: | |||
| 3669 | return -1; | 4001 | return -1; |
| 3670 | } | 4002 | } |
| 3671 | 4003 | ||
| 3672 | static void free_hba(int i) | 4004 | static void free_hba(int n) |
| 3673 | { | 4005 | { |
| 3674 | ctlr_info_t *p = hba[i]; | 4006 | ctlr_info_t *h = hba[n]; |
| 3675 | int n; | 4007 | int i; |
| 3676 | 4008 | ||
| 3677 | hba[i] = NULL; | 4009 | hba[n] = NULL; |
| 3678 | for (n = 0; n < CISS_MAX_LUN; n++) | 4010 | for (i = 0; i < h->highest_lun + 1; i++) |
| 3679 | put_disk(p->gendisk[n]); | 4011 | if (h->gendisk[i] != NULL) |
| 3680 | kfree(p); | 4012 | put_disk(h->gendisk[i]); |
| 4013 | kfree(h); | ||
| 3681 | } | 4014 | } |
| 3682 | 4015 | ||
| 3683 | /* Send a message CDB to the firmware. */ | 4016 | /* Send a message CDB to the firmware. */ |
| @@ -3918,6 +4251,7 @@ static int __devinit cciss_init_one(struct pci_dev *pdev, | |||
| 3918 | hba[i]->busy_initializing = 1; | 4251 | hba[i]->busy_initializing = 1; |
| 3919 | INIT_HLIST_HEAD(&hba[i]->cmpQ); | 4252 | INIT_HLIST_HEAD(&hba[i]->cmpQ); |
| 3920 | INIT_HLIST_HEAD(&hba[i]->reqQ); | 4253 | INIT_HLIST_HEAD(&hba[i]->reqQ); |
| 4254 | mutex_init(&hba[i]->busy_shutting_down); | ||
| 3921 | 4255 | ||
| 3922 | if (cciss_pci_init(hba[i], pdev) != 0) | 4256 | if (cciss_pci_init(hba[i], pdev) != 0) |
| 3923 | goto clean0; | 4257 | goto clean0; |
| @@ -3926,6 +4260,8 @@ static int __devinit cciss_init_one(struct pci_dev *pdev, | |||
| 3926 | hba[i]->ctlr = i; | 4260 | hba[i]->ctlr = i; |
| 3927 | hba[i]->pdev = pdev; | 4261 | hba[i]->pdev = pdev; |
| 3928 | 4262 | ||
| 4263 | init_completion(&hba[i]->scan_wait); | ||
| 4264 | |||
| 3929 | if (cciss_create_hba_sysfs_entry(hba[i])) | 4265 | if (cciss_create_hba_sysfs_entry(hba[i])) |
| 3930 | goto clean0; | 4266 | goto clean0; |
| 3931 | 4267 | ||
| @@ -4001,8 +4337,7 @@ static int __devinit cciss_init_one(struct pci_dev *pdev, | |||
| 4001 | hba[i]->num_luns = 0; | 4337 | hba[i]->num_luns = 0; |
| 4002 | hba[i]->highest_lun = -1; | 4338 | hba[i]->highest_lun = -1; |
| 4003 | for (j = 0; j < CISS_MAX_LUN; j++) { | 4339 | for (j = 0; j < CISS_MAX_LUN; j++) { |
| 4004 | hba[i]->drv[j].raid_level = -1; | 4340 | hba[i]->drv[j] = NULL; |
| 4005 | hba[i]->drv[j].queue = NULL; | ||
| 4006 | hba[i]->gendisk[j] = NULL; | 4341 | hba[i]->gendisk[j] = NULL; |
| 4007 | } | 4342 | } |
| 4008 | 4343 | ||
| @@ -4035,14 +4370,8 @@ static int __devinit cciss_init_one(struct pci_dev *pdev, | |||
| 4035 | 4370 | ||
| 4036 | hba[i]->cciss_max_sectors = 2048; | 4371 | hba[i]->cciss_max_sectors = 2048; |
| 4037 | 4372 | ||
| 4373 | rebuild_lun_table(hba[i], 1, 0); | ||
| 4038 | hba[i]->busy_initializing = 0; | 4374 | hba[i]->busy_initializing = 0; |
| 4039 | |||
| 4040 | rebuild_lun_table(hba[i], 1); | ||
| 4041 | hba[i]->cciss_scan_thread = kthread_run(scan_thread, hba[i], | ||
| 4042 | "cciss_scan%02d", i); | ||
| 4043 | if (IS_ERR(hba[i]->cciss_scan_thread)) | ||
| 4044 | return PTR_ERR(hba[i]->cciss_scan_thread); | ||
| 4045 | |||
| 4046 | return 1; | 4375 | return 1; |
| 4047 | 4376 | ||
| 4048 | clean4: | 4377 | clean4: |
| @@ -4063,12 +4392,7 @@ clean1: | |||
| 4063 | cciss_destroy_hba_sysfs_entry(hba[i]); | 4392 | cciss_destroy_hba_sysfs_entry(hba[i]); |
| 4064 | clean0: | 4393 | clean0: |
| 4065 | hba[i]->busy_initializing = 0; | 4394 | hba[i]->busy_initializing = 0; |
| 4066 | /* cleanup any queues that may have been initialized */ | 4395 | |
| 4067 | for (j=0; j <= hba[i]->highest_lun; j++){ | ||
| 4068 | drive_info_struct *drv = &(hba[i]->drv[j]); | ||
| 4069 | if (drv->queue) | ||
| 4070 | blk_cleanup_queue(drv->queue); | ||
| 4071 | } | ||
| 4072 | /* | 4396 | /* |
| 4073 | * Deliberately omit pci_disable_device(): it does something nasty to | 4397 | * Deliberately omit pci_disable_device(): it does something nasty to |
| 4074 | * Smart Array controllers that pci_enable_device does not undo | 4398 | * Smart Array controllers that pci_enable_device does not undo |
| @@ -4125,8 +4449,9 @@ static void __devexit cciss_remove_one(struct pci_dev *pdev) | |||
| 4125 | return; | 4449 | return; |
| 4126 | } | 4450 | } |
| 4127 | 4451 | ||
| 4128 | kthread_stop(hba[i]->cciss_scan_thread); | 4452 | mutex_lock(&hba[i]->busy_shutting_down); |
| 4129 | 4453 | ||
| 4454 | remove_from_scan_list(hba[i]); | ||
| 4130 | remove_proc_entry(hba[i]->devname, proc_cciss); | 4455 | remove_proc_entry(hba[i]->devname, proc_cciss); |
| 4131 | unregister_blkdev(hba[i]->major, hba[i]->devname); | 4456 | unregister_blkdev(hba[i]->major, hba[i]->devname); |
| 4132 | 4457 | ||
| @@ -4136,8 +4461,10 @@ static void __devexit cciss_remove_one(struct pci_dev *pdev) | |||
| 4136 | if (disk) { | 4461 | if (disk) { |
| 4137 | struct request_queue *q = disk->queue; | 4462 | struct request_queue *q = disk->queue; |
| 4138 | 4463 | ||
| 4139 | if (disk->flags & GENHD_FL_UP) | 4464 | if (disk->flags & GENHD_FL_UP) { |
| 4465 | cciss_destroy_ld_sysfs_entry(hba[i], j, 1); | ||
| 4140 | del_gendisk(disk); | 4466 | del_gendisk(disk); |
| 4467 | } | ||
| 4141 | if (q) | 4468 | if (q) |
| 4142 | blk_cleanup_queue(q); | 4469 | blk_cleanup_queue(q); |
| 4143 | } | 4470 | } |
| @@ -4170,6 +4497,7 @@ static void __devexit cciss_remove_one(struct pci_dev *pdev) | |||
| 4170 | pci_release_regions(pdev); | 4497 | pci_release_regions(pdev); |
| 4171 | pci_set_drvdata(pdev, NULL); | 4498 | pci_set_drvdata(pdev, NULL); |
| 4172 | cciss_destroy_hba_sysfs_entry(hba[i]); | 4499 | cciss_destroy_hba_sysfs_entry(hba[i]); |
| 4500 | mutex_unlock(&hba[i]->busy_shutting_down); | ||
| 4173 | free_hba(i); | 4501 | free_hba(i); |
| 4174 | } | 4502 | } |
| 4175 | 4503 | ||
| @@ -4202,15 +4530,25 @@ static int __init cciss_init(void) | |||
| 4202 | if (err) | 4530 | if (err) |
| 4203 | return err; | 4531 | return err; |
| 4204 | 4532 | ||
| 4533 | /* Start the scan thread */ | ||
| 4534 | cciss_scan_thread = kthread_run(scan_thread, NULL, "cciss_scan"); | ||
| 4535 | if (IS_ERR(cciss_scan_thread)) { | ||
| 4536 | err = PTR_ERR(cciss_scan_thread); | ||
| 4537 | goto err_bus_unregister; | ||
| 4538 | } | ||
| 4539 | |||
| 4205 | /* Register for our PCI devices */ | 4540 | /* Register for our PCI devices */ |
| 4206 | err = pci_register_driver(&cciss_pci_driver); | 4541 | err = pci_register_driver(&cciss_pci_driver); |
| 4207 | if (err) | 4542 | if (err) |
| 4208 | goto err_bus_register; | 4543 | goto err_thread_stop; |
| 4209 | 4544 | ||
| 4210 | return 0; | 4545 | return err; |
| 4211 | 4546 | ||
| 4212 | err_bus_register: | 4547 | err_thread_stop: |
| 4548 | kthread_stop(cciss_scan_thread); | ||
| 4549 | err_bus_unregister: | ||
| 4213 | bus_unregister(&cciss_bus_type); | 4550 | bus_unregister(&cciss_bus_type); |
| 4551 | |||
| 4214 | return err; | 4552 | return err; |
| 4215 | } | 4553 | } |
| 4216 | 4554 | ||
| @@ -4227,6 +4565,7 @@ static void __exit cciss_cleanup(void) | |||
| 4227 | cciss_remove_one(hba[i]->pdev); | 4565 | cciss_remove_one(hba[i]->pdev); |
| 4228 | } | 4566 | } |
| 4229 | } | 4567 | } |
| 4568 | kthread_stop(cciss_scan_thread); | ||
| 4230 | remove_proc_entry("driver/cciss", NULL); | 4569 | remove_proc_entry("driver/cciss", NULL); |
| 4231 | bus_unregister(&cciss_bus_type); | 4570 | bus_unregister(&cciss_bus_type); |
| 4232 | } | 4571 | } |
diff --git a/drivers/block/cciss.h b/drivers/block/cciss.h index 06a5db25b298..31524cf42c77 100644 --- a/drivers/block/cciss.h +++ b/drivers/block/cciss.h | |||
| @@ -2,6 +2,7 @@ | |||
| 2 | #define CCISS_H | 2 | #define CCISS_H |
| 3 | 3 | ||
| 4 | #include <linux/genhd.h> | 4 | #include <linux/genhd.h> |
| 5 | #include <linux/mutex.h> | ||
| 5 | 6 | ||
| 6 | #include "cciss_cmd.h" | 7 | #include "cciss_cmd.h" |
| 7 | 8 | ||
| @@ -29,7 +30,7 @@ struct access_method { | |||
| 29 | }; | 30 | }; |
| 30 | typedef struct _drive_info_struct | 31 | typedef struct _drive_info_struct |
| 31 | { | 32 | { |
| 32 | __u32 LunID; | 33 | unsigned char LunID[8]; |
| 33 | int usage_count; | 34 | int usage_count; |
| 34 | struct request_queue *queue; | 35 | struct request_queue *queue; |
| 35 | sector_t nr_blocks; | 36 | sector_t nr_blocks; |
| @@ -51,6 +52,7 @@ typedef struct _drive_info_struct | |||
| 51 | char vendor[VENDOR_LEN + 1]; /* SCSI vendor string */ | 52 | char vendor[VENDOR_LEN + 1]; /* SCSI vendor string */ |
| 52 | char model[MODEL_LEN + 1]; /* SCSI model string */ | 53 | char model[MODEL_LEN + 1]; /* SCSI model string */ |
| 53 | char rev[REV_LEN + 1]; /* SCSI revision string */ | 54 | char rev[REV_LEN + 1]; /* SCSI revision string */ |
| 55 | char device_initialized; /* indicates whether dev is initialized */ | ||
| 54 | } drive_info_struct; | 56 | } drive_info_struct; |
| 55 | 57 | ||
| 56 | struct ctlr_info | 58 | struct ctlr_info |
| @@ -86,7 +88,7 @@ struct ctlr_info | |||
| 86 | BYTE cciss_read_capacity; | 88 | BYTE cciss_read_capacity; |
| 87 | 89 | ||
| 88 | // information about each logical volume | 90 | // information about each logical volume |
| 89 | drive_info_struct drv[CISS_MAX_LUN]; | 91 | drive_info_struct *drv[CISS_MAX_LUN]; |
| 90 | 92 | ||
| 91 | struct access_method access; | 93 | struct access_method access; |
| 92 | 94 | ||
| @@ -108,6 +110,8 @@ struct ctlr_info | |||
| 108 | int nr_frees; | 110 | int nr_frees; |
| 109 | int busy_configuring; | 111 | int busy_configuring; |
| 110 | int busy_initializing; | 112 | int busy_initializing; |
| 113 | int busy_scanning; | ||
| 114 | struct mutex busy_shutting_down; | ||
| 111 | 115 | ||
| 112 | /* This element holds the zero based queue number of the last | 116 | /* This element holds the zero based queue number of the last |
| 113 | * queue to be started. It is used for fairness. | 117 | * queue to be started. It is used for fairness. |
| @@ -122,8 +126,8 @@ struct ctlr_info | |||
| 122 | /* and saved for later processing */ | 126 | /* and saved for later processing */ |
| 123 | #endif | 127 | #endif |
| 124 | unsigned char alive; | 128 | unsigned char alive; |
| 125 | struct completion *rescan_wait; | 129 | struct list_head scan_list; |
| 126 | struct task_struct *cciss_scan_thread; | 130 | struct completion scan_wait; |
| 127 | struct device dev; | 131 | struct device dev; |
| 128 | }; | 132 | }; |
| 129 | 133 | ||
diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c index b82d438e2607..6422651ec364 100644 --- a/drivers/block/cpqarray.c +++ b/drivers/block/cpqarray.c | |||
| @@ -32,6 +32,7 @@ | |||
| 32 | #include <linux/blkpg.h> | 32 | #include <linux/blkpg.h> |
| 33 | #include <linux/timer.h> | 33 | #include <linux/timer.h> |
| 34 | #include <linux/proc_fs.h> | 34 | #include <linux/proc_fs.h> |
| 35 | #include <linux/seq_file.h> | ||
| 35 | #include <linux/init.h> | 36 | #include <linux/init.h> |
| 36 | #include <linux/hdreg.h> | 37 | #include <linux/hdreg.h> |
| 37 | #include <linux/spinlock.h> | 38 | #include <linux/spinlock.h> |
| @@ -177,7 +178,6 @@ static int cpqarray_register_ctlr(int ctlr, struct pci_dev *pdev); | |||
| 177 | 178 | ||
| 178 | #ifdef CONFIG_PROC_FS | 179 | #ifdef CONFIG_PROC_FS |
| 179 | static void ida_procinit(int i); | 180 | static void ida_procinit(int i); |
| 180 | static int ida_proc_get_info(char *buffer, char **start, off_t offset, int length, int *eof, void *data); | ||
| 181 | #else | 181 | #else |
| 182 | static void ida_procinit(int i) {} | 182 | static void ida_procinit(int i) {} |
| 183 | #endif | 183 | #endif |
| @@ -206,6 +206,7 @@ static const struct block_device_operations ida_fops = { | |||
| 206 | #ifdef CONFIG_PROC_FS | 206 | #ifdef CONFIG_PROC_FS |
| 207 | 207 | ||
| 208 | static struct proc_dir_entry *proc_array; | 208 | static struct proc_dir_entry *proc_array; |
| 209 | static const struct file_operations ida_proc_fops; | ||
| 209 | 210 | ||
| 210 | /* | 211 | /* |
| 211 | * Get us a file in /proc/array that says something about each controller. | 212 | * Get us a file in /proc/array that says something about each controller. |
| @@ -218,19 +219,16 @@ static void __init ida_procinit(int i) | |||
| 218 | if (!proc_array) return; | 219 | if (!proc_array) return; |
| 219 | } | 220 | } |
| 220 | 221 | ||
| 221 | create_proc_read_entry(hba[i]->devname, 0, proc_array, | 222 | proc_create_data(hba[i]->devname, 0, proc_array, &ida_proc_fops, hba[i]); |
| 222 | ida_proc_get_info, hba[i]); | ||
| 223 | } | 223 | } |
| 224 | 224 | ||
| 225 | /* | 225 | /* |
| 226 | * Report information about this controller. | 226 | * Report information about this controller. |
| 227 | */ | 227 | */ |
| 228 | static int ida_proc_get_info(char *buffer, char **start, off_t offset, int length, int *eof, void *data) | 228 | static int ida_proc_show(struct seq_file *m, void *v) |
| 229 | { | 229 | { |
| 230 | off_t pos = 0; | 230 | int i, ctlr; |
| 231 | off_t len = 0; | 231 | ctlr_info_t *h = (ctlr_info_t*)m->private; |
| 232 | int size, i, ctlr; | ||
| 233 | ctlr_info_t *h = (ctlr_info_t*)data; | ||
| 234 | drv_info_t *drv; | 232 | drv_info_t *drv; |
| 235 | #ifdef CPQ_PROC_PRINT_QUEUES | 233 | #ifdef CPQ_PROC_PRINT_QUEUES |
| 236 | cmdlist_t *c; | 234 | cmdlist_t *c; |
| @@ -238,7 +236,7 @@ static int ida_proc_get_info(char *buffer, char **start, off_t offset, int lengt | |||
| 238 | #endif | 236 | #endif |
| 239 | 237 | ||
| 240 | ctlr = h->ctlr; | 238 | ctlr = h->ctlr; |
| 241 | size = sprintf(buffer, "%s: Compaq %s Controller\n" | 239 | seq_printf(m, "%s: Compaq %s Controller\n" |
| 242 | " Board ID: 0x%08lx\n" | 240 | " Board ID: 0x%08lx\n" |
| 243 | " Firmware Revision: %c%c%c%c\n" | 241 | " Firmware Revision: %c%c%c%c\n" |
| 244 | " Controller Sig: 0x%08lx\n" | 242 | " Controller Sig: 0x%08lx\n" |
| @@ -258,55 +256,54 @@ static int ida_proc_get_info(char *buffer, char **start, off_t offset, int lengt | |||
| 258 | h->log_drives, h->phys_drives, | 256 | h->log_drives, h->phys_drives, |
| 259 | h->Qdepth, h->maxQsinceinit); | 257 | h->Qdepth, h->maxQsinceinit); |
| 260 | 258 | ||
| 261 | pos += size; len += size; | 259 | seq_puts(m, "Logical Drive Info:\n"); |
| 262 | |||
| 263 | size = sprintf(buffer+len, "Logical Drive Info:\n"); | ||
| 264 | pos += size; len += size; | ||
| 265 | 260 | ||
| 266 | for(i=0; i<h->log_drives; i++) { | 261 | for(i=0; i<h->log_drives; i++) { |
| 267 | drv = &h->drv[i]; | 262 | drv = &h->drv[i]; |
| 268 | size = sprintf(buffer+len, "ida/c%dd%d: blksz=%d nr_blks=%d\n", | 263 | seq_printf(m, "ida/c%dd%d: blksz=%d nr_blks=%d\n", |
| 269 | ctlr, i, drv->blk_size, drv->nr_blks); | 264 | ctlr, i, drv->blk_size, drv->nr_blks); |
| 270 | pos += size; len += size; | ||
| 271 | } | 265 | } |
| 272 | 266 | ||
| 273 | #ifdef CPQ_PROC_PRINT_QUEUES | 267 | #ifdef CPQ_PROC_PRINT_QUEUES |
| 274 | spin_lock_irqsave(IDA_LOCK(h->ctlr), flags); | 268 | spin_lock_irqsave(IDA_LOCK(h->ctlr), flags); |
| 275 | size = sprintf(buffer+len, "\nCurrent Queues:\n"); | 269 | seq_puts(m, "\nCurrent Queues:\n"); |
| 276 | pos += size; len += size; | ||
| 277 | 270 | ||
| 278 | c = h->reqQ; | 271 | c = h->reqQ; |
| 279 | size = sprintf(buffer+len, "reqQ = %p", c); pos += size; len += size; | 272 | seq_printf(m, "reqQ = %p", c); |
| 280 | if (c) c=c->next; | 273 | if (c) c=c->next; |
| 281 | while(c && c != h->reqQ) { | 274 | while(c && c != h->reqQ) { |
| 282 | size = sprintf(buffer+len, "->%p", c); | 275 | seq_printf(m, "->%p", c); |
| 283 | pos += size; len += size; | ||
| 284 | c=c->next; | 276 | c=c->next; |
| 285 | } | 277 | } |
| 286 | 278 | ||
| 287 | c = h->cmpQ; | 279 | c = h->cmpQ; |
| 288 | size = sprintf(buffer+len, "\ncmpQ = %p", c); pos += size; len += size; | 280 | seq_printf(m, "\ncmpQ = %p", c); |
| 289 | if (c) c=c->next; | 281 | if (c) c=c->next; |
| 290 | while(c && c != h->cmpQ) { | 282 | while(c && c != h->cmpQ) { |
| 291 | size = sprintf(buffer+len, "->%p", c); | 283 | seq_printf(m, "->%p", c); |
| 292 | pos += size; len += size; | ||
| 293 | c=c->next; | 284 | c=c->next; |
| 294 | } | 285 | } |
| 295 | 286 | ||
| 296 | size = sprintf(buffer+len, "\n"); pos += size; len += size; | 287 | seq_putc(m, '\n'); |
| 297 | spin_unlock_irqrestore(IDA_LOCK(h->ctlr), flags); | 288 | spin_unlock_irqrestore(IDA_LOCK(h->ctlr), flags); |
| 298 | #endif | 289 | #endif |
| 299 | size = sprintf(buffer+len, "nr_allocs = %d\nnr_frees = %d\n", | 290 | seq_printf(m, "nr_allocs = %d\nnr_frees = %d\n", |
| 300 | h->nr_allocs, h->nr_frees); | 291 | h->nr_allocs, h->nr_frees); |
| 301 | pos += size; len += size; | 292 | return 0; |
| 302 | 293 | } | |
| 303 | *eof = 1; | 294 | |
| 304 | *start = buffer+offset; | 295 | static int ida_proc_open(struct inode *inode, struct file *file) |
| 305 | len -= offset; | 296 | { |
| 306 | if (len>length) | 297 | return single_open(file, ida_proc_show, PDE(inode)->data); |
| 307 | len = length; | ||
| 308 | return len; | ||
| 309 | } | 298 | } |
| 299 | |||
| 300 | static const struct file_operations ida_proc_fops = { | ||
| 301 | .owner = THIS_MODULE, | ||
| 302 | .open = ida_proc_open, | ||
| 303 | .read = seq_read, | ||
| 304 | .llseek = seq_lseek, | ||
| 305 | .release = single_release, | ||
| 306 | }; | ||
| 310 | #endif /* CONFIG_PROC_FS */ | 307 | #endif /* CONFIG_PROC_FS */ |
| 311 | 308 | ||
| 312 | module_param_array(eisa, int, NULL, 0); | 309 | module_param_array(eisa, int, NULL, 0); |
diff --git a/drivers/char/agp/parisc-agp.c b/drivers/char/agp/parisc-agp.c index 60ab75104da9..1c129211302d 100644 --- a/drivers/char/agp/parisc-agp.c +++ b/drivers/char/agp/parisc-agp.c | |||
| @@ -217,7 +217,7 @@ static const struct agp_bridge_driver parisc_agp_driver = { | |||
| 217 | .configure = parisc_agp_configure, | 217 | .configure = parisc_agp_configure, |
| 218 | .fetch_size = parisc_agp_fetch_size, | 218 | .fetch_size = parisc_agp_fetch_size, |
| 219 | .tlb_flush = parisc_agp_tlbflush, | 219 | .tlb_flush = parisc_agp_tlbflush, |
| 220 | .mask_memory = parisc_agp_page_mask_memory, | 220 | .mask_memory = parisc_agp_mask_memory, |
| 221 | .masks = parisc_agp_masks, | 221 | .masks = parisc_agp_masks, |
| 222 | .agp_enable = parisc_agp_enable, | 222 | .agp_enable = parisc_agp_enable, |
| 223 | .cache_flush = global_cache_flush, | 223 | .cache_flush = global_cache_flush, |
diff --git a/drivers/char/dtlk.c b/drivers/char/dtlk.c index 52e06589821d..045c930e6320 100644 --- a/drivers/char/dtlk.c +++ b/drivers/char/dtlk.c | |||
| @@ -56,6 +56,7 @@ | |||
| 56 | #include <linux/errno.h> /* for -EBUSY */ | 56 | #include <linux/errno.h> /* for -EBUSY */ |
| 57 | #include <linux/ioport.h> /* for request_region */ | 57 | #include <linux/ioport.h> /* for request_region */ |
| 58 | #include <linux/delay.h> /* for loops_per_jiffy */ | 58 | #include <linux/delay.h> /* for loops_per_jiffy */ |
| 59 | #include <linux/sched.h> | ||
| 59 | #include <linux/smp_lock.h> /* cycle_kernel_lock() */ | 60 | #include <linux/smp_lock.h> /* cycle_kernel_lock() */ |
| 60 | #include <asm/io.h> /* for inb_p, outb_p, inb, outb, etc. */ | 61 | #include <asm/io.h> /* for inb_p, outb_p, inb, outb, etc. */ |
| 61 | #include <asm/uaccess.h> /* for get_user, etc. */ | 62 | #include <asm/uaccess.h> /* for get_user, etc. */ |
diff --git a/drivers/char/ipmi/ipmi_devintf.c b/drivers/char/ipmi/ipmi_devintf.c index 41fc11dc921c..65545de3dbf4 100644 --- a/drivers/char/ipmi/ipmi_devintf.c +++ b/drivers/char/ipmi/ipmi_devintf.c | |||
| @@ -36,6 +36,7 @@ | |||
| 36 | #include <linux/errno.h> | 36 | #include <linux/errno.h> |
| 37 | #include <asm/system.h> | 37 | #include <asm/system.h> |
| 38 | #include <linux/poll.h> | 38 | #include <linux/poll.h> |
| 39 | #include <linux/sched.h> | ||
| 39 | #include <linux/spinlock.h> | 40 | #include <linux/spinlock.h> |
| 40 | #include <linux/slab.h> | 41 | #include <linux/slab.h> |
| 41 | #include <linux/ipmi.h> | 42 | #include <linux/ipmi.h> |
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c index 09050797c76a..ec5e3f8df648 100644 --- a/drivers/char/ipmi/ipmi_msghandler.c +++ b/drivers/char/ipmi/ipmi_msghandler.c | |||
| @@ -35,6 +35,7 @@ | |||
| 35 | #include <linux/errno.h> | 35 | #include <linux/errno.h> |
| 36 | #include <asm/system.h> | 36 | #include <asm/system.h> |
| 37 | #include <linux/poll.h> | 37 | #include <linux/poll.h> |
| 38 | #include <linux/sched.h> | ||
| 38 | #include <linux/spinlock.h> | 39 | #include <linux/spinlock.h> |
| 39 | #include <linux/mutex.h> | 40 | #include <linux/mutex.h> |
| 40 | #include <linux/slab.h> | 41 | #include <linux/slab.h> |
diff --git a/drivers/connector/cn_proc.c b/drivers/connector/cn_proc.c index abf4a2529f80..60697909ebdb 100644 --- a/drivers/connector/cn_proc.c +++ b/drivers/connector/cn_proc.c | |||
| @@ -227,7 +227,8 @@ static void cn_proc_ack(int err, int rcvd_seq, int rcvd_ack) | |||
| 227 | * cn_proc_mcast_ctl | 227 | * cn_proc_mcast_ctl |
| 228 | * @data: message sent from userspace via the connector | 228 | * @data: message sent from userspace via the connector |
| 229 | */ | 229 | */ |
| 230 | static void cn_proc_mcast_ctl(struct cn_msg *msg) | 230 | static void cn_proc_mcast_ctl(struct cn_msg *msg, |
| 231 | struct netlink_skb_parms *nsp) | ||
| 231 | { | 232 | { |
| 232 | enum proc_cn_mcast_op *mc_op = NULL; | 233 | enum proc_cn_mcast_op *mc_op = NULL; |
| 233 | int err = 0; | 234 | int err = 0; |
diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c index 4e551e63b6dc..4f4ac82382f7 100644 --- a/drivers/edac/amd64_edac.c +++ b/drivers/edac/amd64_edac.c | |||
| @@ -15,8 +15,8 @@ module_param(ecc_enable_override, int, 0644); | |||
| 15 | 15 | ||
| 16 | /* Lookup table for all possible MC control instances */ | 16 | /* Lookup table for all possible MC control instances */ |
| 17 | struct amd64_pvt; | 17 | struct amd64_pvt; |
| 18 | static struct mem_ctl_info *mci_lookup[MAX_NUMNODES]; | 18 | static struct mem_ctl_info *mci_lookup[EDAC_MAX_NUMNODES]; |
| 19 | static struct amd64_pvt *pvt_lookup[MAX_NUMNODES]; | 19 | static struct amd64_pvt *pvt_lookup[EDAC_MAX_NUMNODES]; |
| 20 | 20 | ||
| 21 | /* | 21 | /* |
| 22 | * See F2x80 for K8 and F2x[1,0]80 for Fam10 and later. The table below is only | 22 | * See F2x80 for K8 and F2x[1,0]80 for Fam10 and later. The table below is only |
| @@ -189,7 +189,10 @@ static int amd64_get_scrub_rate(struct mem_ctl_info *mci, u32 *bw) | |||
| 189 | /* Map from a CSROW entry to the mask entry that operates on it */ | 189 | /* Map from a CSROW entry to the mask entry that operates on it */ |
| 190 | static inline u32 amd64_map_to_dcs_mask(struct amd64_pvt *pvt, int csrow) | 190 | static inline u32 amd64_map_to_dcs_mask(struct amd64_pvt *pvt, int csrow) |
| 191 | { | 191 | { |
| 192 | return csrow >> (pvt->num_dcsm >> 3); | 192 | if (boot_cpu_data.x86 == 0xf && pvt->ext_model < OPTERON_CPU_REV_F) |
| 193 | return csrow; | ||
| 194 | else | ||
| 195 | return csrow >> 1; | ||
| 193 | } | 196 | } |
| 194 | 197 | ||
| 195 | /* return the 'base' address the i'th CS entry of the 'dct' DRAM controller */ | 198 | /* return the 'base' address the i'th CS entry of the 'dct' DRAM controller */ |
| @@ -279,29 +282,26 @@ static struct mem_ctl_info *find_mc_by_sys_addr(struct mem_ctl_info *mci, | |||
| 279 | intlv_en = pvt->dram_IntlvEn[0]; | 282 | intlv_en = pvt->dram_IntlvEn[0]; |
| 280 | 283 | ||
| 281 | if (intlv_en == 0) { | 284 | if (intlv_en == 0) { |
| 282 | for (node_id = 0; ; ) { | 285 | for (node_id = 0; node_id < DRAM_REG_COUNT; node_id++) { |
| 283 | if (amd64_base_limit_match(pvt, sys_addr, node_id)) | 286 | if (amd64_base_limit_match(pvt, sys_addr, node_id)) |
| 284 | break; | 287 | goto found; |
| 285 | |||
| 286 | if (++node_id >= DRAM_REG_COUNT) | ||
| 287 | goto err_no_match; | ||
| 288 | } | 288 | } |
| 289 | goto found; | 289 | goto err_no_match; |
| 290 | } | 290 | } |
| 291 | 291 | ||
| 292 | if (unlikely((intlv_en != (0x01 << 8)) && | 292 | if (unlikely((intlv_en != 0x01) && |
| 293 | (intlv_en != (0x03 << 8)) && | 293 | (intlv_en != 0x03) && |
| 294 | (intlv_en != (0x07 << 8)))) { | 294 | (intlv_en != 0x07))) { |
| 295 | amd64_printk(KERN_WARNING, "junk value of 0x%x extracted from " | 295 | amd64_printk(KERN_WARNING, "junk value of 0x%x extracted from " |
| 296 | "IntlvEn field of DRAM Base Register for node 0: " | 296 | "IntlvEn field of DRAM Base Register for node 0: " |
| 297 | "This probably indicates a BIOS bug.\n", intlv_en); | 297 | "this probably indicates a BIOS bug.\n", intlv_en); |
| 298 | return NULL; | 298 | return NULL; |
| 299 | } | 299 | } |
| 300 | 300 | ||
| 301 | bits = (((u32) sys_addr) >> 12) & intlv_en; | 301 | bits = (((u32) sys_addr) >> 12) & intlv_en; |
| 302 | 302 | ||
| 303 | for (node_id = 0; ; ) { | 303 | for (node_id = 0; ; ) { |
| 304 | if ((pvt->dram_limit[node_id] & intlv_en) == bits) | 304 | if ((pvt->dram_IntlvSel[node_id] & intlv_en) == bits) |
| 305 | break; /* intlv_sel field matches */ | 305 | break; /* intlv_sel field matches */ |
| 306 | 306 | ||
| 307 | if (++node_id >= DRAM_REG_COUNT) | 307 | if (++node_id >= DRAM_REG_COUNT) |
| @@ -311,10 +311,10 @@ static struct mem_ctl_info *find_mc_by_sys_addr(struct mem_ctl_info *mci, | |||
| 311 | /* sanity test for sys_addr */ | 311 | /* sanity test for sys_addr */ |
| 312 | if (unlikely(!amd64_base_limit_match(pvt, sys_addr, node_id))) { | 312 | if (unlikely(!amd64_base_limit_match(pvt, sys_addr, node_id))) { |
| 313 | amd64_printk(KERN_WARNING, | 313 | amd64_printk(KERN_WARNING, |
| 314 | "%s(): sys_addr 0x%lx falls outside base/limit " | 314 | "%s(): sys_addr 0x%llx falls outside base/limit " |
| 315 | "address range for node %d with node interleaving " | 315 | "address range for node %d with node interleaving " |
| 316 | "enabled.\n", __func__, (unsigned long)sys_addr, | 316 | "enabled.\n", |
| 317 | node_id); | 317 | __func__, sys_addr, node_id); |
| 318 | return NULL; | 318 | return NULL; |
| 319 | } | 319 | } |
| 320 | 320 | ||
| @@ -377,7 +377,7 @@ static int input_addr_to_csrow(struct mem_ctl_info *mci, u64 input_addr) | |||
| 377 | * base/mask register pair, test the condition shown near the start of | 377 | * base/mask register pair, test the condition shown near the start of |
| 378 | * section 3.5.4 (p. 84, BKDG #26094, K8, revA-E). | 378 | * section 3.5.4 (p. 84, BKDG #26094, K8, revA-E). |
| 379 | */ | 379 | */ |
| 380 | for (csrow = 0; csrow < CHIPSELECT_COUNT; csrow++) { | 380 | for (csrow = 0; csrow < pvt->cs_count; csrow++) { |
| 381 | 381 | ||
| 382 | /* This DRAM chip select is disabled on this node */ | 382 | /* This DRAM chip select is disabled on this node */ |
| 383 | if ((pvt->dcsb0[csrow] & K8_DCSB_CS_ENABLE) == 0) | 383 | if ((pvt->dcsb0[csrow] & K8_DCSB_CS_ENABLE) == 0) |
| @@ -734,7 +734,7 @@ static void find_csrow_limits(struct mem_ctl_info *mci, int csrow, | |||
| 734 | u64 base, mask; | 734 | u64 base, mask; |
| 735 | 735 | ||
| 736 | pvt = mci->pvt_info; | 736 | pvt = mci->pvt_info; |
| 737 | BUG_ON((csrow < 0) || (csrow >= CHIPSELECT_COUNT)); | 737 | BUG_ON((csrow < 0) || (csrow >= pvt->cs_count)); |
| 738 | 738 | ||
| 739 | base = base_from_dct_base(pvt, csrow); | 739 | base = base_from_dct_base(pvt, csrow); |
| 740 | mask = mask_from_dct_mask(pvt, csrow); | 740 | mask = mask_from_dct_mask(pvt, csrow); |
| @@ -962,35 +962,27 @@ err_reg: | |||
| 962 | */ | 962 | */ |
| 963 | static void amd64_set_dct_base_and_mask(struct amd64_pvt *pvt) | 963 | static void amd64_set_dct_base_and_mask(struct amd64_pvt *pvt) |
| 964 | { | 964 | { |
| 965 | if (pvt->ext_model >= OPTERON_CPU_REV_F) { | 965 | |
| 966 | if (boot_cpu_data.x86 == 0xf && pvt->ext_model < OPTERON_CPU_REV_F) { | ||
| 967 | pvt->dcsb_base = REV_E_DCSB_BASE_BITS; | ||
| 968 | pvt->dcsm_mask = REV_E_DCSM_MASK_BITS; | ||
| 969 | pvt->dcs_mask_notused = REV_E_DCS_NOTUSED_BITS; | ||
| 970 | pvt->dcs_shift = REV_E_DCS_SHIFT; | ||
| 971 | pvt->cs_count = 8; | ||
| 972 | pvt->num_dcsm = 8; | ||
| 973 | } else { | ||
| 966 | pvt->dcsb_base = REV_F_F1Xh_DCSB_BASE_BITS; | 974 | pvt->dcsb_base = REV_F_F1Xh_DCSB_BASE_BITS; |
| 967 | pvt->dcsm_mask = REV_F_F1Xh_DCSM_MASK_BITS; | 975 | pvt->dcsm_mask = REV_F_F1Xh_DCSM_MASK_BITS; |
| 968 | pvt->dcs_mask_notused = REV_F_F1Xh_DCS_NOTUSED_BITS; | 976 | pvt->dcs_mask_notused = REV_F_F1Xh_DCS_NOTUSED_BITS; |
| 969 | pvt->dcs_shift = REV_F_F1Xh_DCS_SHIFT; | 977 | pvt->dcs_shift = REV_F_F1Xh_DCS_SHIFT; |
| 970 | 978 | ||
| 971 | switch (boot_cpu_data.x86) { | 979 | if (boot_cpu_data.x86 == 0x11) { |
| 972 | case 0xf: | 980 | pvt->cs_count = 4; |
| 973 | pvt->num_dcsm = REV_F_DCSM_COUNT; | 981 | pvt->num_dcsm = 2; |
| 974 | break; | 982 | } else { |
| 975 | 983 | pvt->cs_count = 8; | |
| 976 | case 0x10: | 984 | pvt->num_dcsm = 4; |
| 977 | pvt->num_dcsm = F10_DCSM_COUNT; | ||
| 978 | break; | ||
| 979 | |||
| 980 | case 0x11: | ||
| 981 | pvt->num_dcsm = F11_DCSM_COUNT; | ||
| 982 | break; | ||
| 983 | |||
| 984 | default: | ||
| 985 | amd64_printk(KERN_ERR, "Unsupported family!\n"); | ||
| 986 | break; | ||
| 987 | } | 985 | } |
| 988 | } else { | ||
| 989 | pvt->dcsb_base = REV_E_DCSB_BASE_BITS; | ||
| 990 | pvt->dcsm_mask = REV_E_DCSM_MASK_BITS; | ||
| 991 | pvt->dcs_mask_notused = REV_E_DCS_NOTUSED_BITS; | ||
| 992 | pvt->dcs_shift = REV_E_DCS_SHIFT; | ||
| 993 | pvt->num_dcsm = REV_E_DCSM_COUNT; | ||
| 994 | } | 986 | } |
| 995 | } | 987 | } |
| 996 | 988 | ||
| @@ -1003,7 +995,7 @@ static void amd64_read_dct_base_mask(struct amd64_pvt *pvt) | |||
| 1003 | 995 | ||
| 1004 | amd64_set_dct_base_and_mask(pvt); | 996 | amd64_set_dct_base_and_mask(pvt); |
| 1005 | 997 | ||
| 1006 | for (cs = 0; cs < CHIPSELECT_COUNT; cs++) { | 998 | for (cs = 0; cs < pvt->cs_count; cs++) { |
| 1007 | reg = K8_DCSB0 + (cs * 4); | 999 | reg = K8_DCSB0 + (cs * 4); |
| 1008 | err = pci_read_config_dword(pvt->dram_f2_ctl, reg, | 1000 | err = pci_read_config_dword(pvt->dram_f2_ctl, reg, |
| 1009 | &pvt->dcsb0[cs]); | 1001 | &pvt->dcsb0[cs]); |
| @@ -1130,7 +1122,7 @@ static void k8_read_dram_base_limit(struct amd64_pvt *pvt, int dram) | |||
| 1130 | debugf0("Reading K8_DRAM_BASE_LOW failed\n"); | 1122 | debugf0("Reading K8_DRAM_BASE_LOW failed\n"); |
| 1131 | 1123 | ||
| 1132 | /* Extract parts into separate data entries */ | 1124 | /* Extract parts into separate data entries */ |
| 1133 | pvt->dram_base[dram] = ((u64) low & 0xFFFF0000) << 8; | 1125 | pvt->dram_base[dram] = ((u64) low & 0xFFFF0000) << 24; |
| 1134 | pvt->dram_IntlvEn[dram] = (low >> 8) & 0x7; | 1126 | pvt->dram_IntlvEn[dram] = (low >> 8) & 0x7; |
| 1135 | pvt->dram_rw_en[dram] = (low & 0x3); | 1127 | pvt->dram_rw_en[dram] = (low & 0x3); |
| 1136 | 1128 | ||
| @@ -1143,7 +1135,7 @@ static void k8_read_dram_base_limit(struct amd64_pvt *pvt, int dram) | |||
| 1143 | * Extract parts into separate data entries. Limit is the HIGHEST memory | 1135 | * Extract parts into separate data entries. Limit is the HIGHEST memory |
| 1144 | * location of the region, so lower 24 bits need to be all ones | 1136 | * location of the region, so lower 24 bits need to be all ones |
| 1145 | */ | 1137 | */ |
| 1146 | pvt->dram_limit[dram] = (((u64) low & 0xFFFF0000) << 8) | 0x00FFFFFF; | 1138 | pvt->dram_limit[dram] = (((u64) low & 0xFFFF0000) << 24) | 0x00FFFFFF; |
| 1147 | pvt->dram_IntlvSel[dram] = (low >> 8) & 0x7; | 1139 | pvt->dram_IntlvSel[dram] = (low >> 8) & 0x7; |
| 1148 | pvt->dram_DstNode[dram] = (low & 0x7); | 1140 | pvt->dram_DstNode[dram] = (low & 0x7); |
| 1149 | } | 1141 | } |
| @@ -1193,7 +1185,7 @@ static void k8_map_sysaddr_to_csrow(struct mem_ctl_info *mci, | |||
| 1193 | * different from the node that detected the error. | 1185 | * different from the node that detected the error. |
| 1194 | */ | 1186 | */ |
| 1195 | src_mci = find_mc_by_sys_addr(mci, SystemAddress); | 1187 | src_mci = find_mc_by_sys_addr(mci, SystemAddress); |
| 1196 | if (src_mci) { | 1188 | if (!src_mci) { |
| 1197 | amd64_mc_printk(mci, KERN_ERR, | 1189 | amd64_mc_printk(mci, KERN_ERR, |
| 1198 | "failed to map error address 0x%lx to a node\n", | 1190 | "failed to map error address 0x%lx to a node\n", |
| 1199 | (unsigned long)SystemAddress); | 1191 | (unsigned long)SystemAddress); |
| @@ -1376,8 +1368,8 @@ static void f10_read_dram_base_limit(struct amd64_pvt *pvt, int dram) | |||
| 1376 | 1368 | ||
| 1377 | pvt->dram_IntlvEn[dram] = (low_base >> 8) & 0x7; | 1369 | pvt->dram_IntlvEn[dram] = (low_base >> 8) & 0x7; |
| 1378 | 1370 | ||
| 1379 | pvt->dram_base[dram] = (((((u64) high_base & 0x000000FF) << 32) | | 1371 | pvt->dram_base[dram] = (((u64)high_base & 0x000000FF) << 40) | |
| 1380 | ((u64) low_base & 0xFFFF0000))) << 8; | 1372 | (((u64)low_base & 0xFFFF0000) << 24); |
| 1381 | 1373 | ||
| 1382 | low_offset = K8_DRAM_LIMIT_LOW + (dram << 3); | 1374 | low_offset = K8_DRAM_LIMIT_LOW + (dram << 3); |
| 1383 | high_offset = F10_DRAM_LIMIT_HIGH + (dram << 3); | 1375 | high_offset = F10_DRAM_LIMIT_HIGH + (dram << 3); |
| @@ -1398,9 +1390,9 @@ static void f10_read_dram_base_limit(struct amd64_pvt *pvt, int dram) | |||
| 1398 | * Extract address values and form a LIMIT address. Limit is the HIGHEST | 1390 | * Extract address values and form a LIMIT address. Limit is the HIGHEST |
| 1399 | * memory location of the region, so low 24 bits need to be all ones. | 1391 | * memory location of the region, so low 24 bits need to be all ones. |
| 1400 | */ | 1392 | */ |
| 1401 | low_limit |= 0x0000FFFF; | 1393 | pvt->dram_limit[dram] = (((u64)high_limit & 0x000000FF) << 40) | |
| 1402 | pvt->dram_limit[dram] = | 1394 | (((u64) low_limit & 0xFFFF0000) << 24) | |
| 1403 | ((((u64) high_limit << 32) + (u64) low_limit) << 8) | (0xFF); | 1395 | 0x00FFFFFF; |
| 1404 | } | 1396 | } |
| 1405 | 1397 | ||
| 1406 | static void f10_read_dram_ctl_register(struct amd64_pvt *pvt) | 1398 | static void f10_read_dram_ctl_register(struct amd64_pvt *pvt) |
| @@ -1566,7 +1558,7 @@ static int f10_lookup_addr_in_dct(u32 in_addr, u32 nid, u32 cs) | |||
| 1566 | 1558 | ||
| 1567 | debugf1("InputAddr=0x%x channelselect=%d\n", in_addr, cs); | 1559 | debugf1("InputAddr=0x%x channelselect=%d\n", in_addr, cs); |
| 1568 | 1560 | ||
| 1569 | for (csrow = 0; csrow < CHIPSELECT_COUNT; csrow++) { | 1561 | for (csrow = 0; csrow < pvt->cs_count; csrow++) { |
| 1570 | 1562 | ||
| 1571 | cs_base = amd64_get_dct_base(pvt, cs, csrow); | 1563 | cs_base = amd64_get_dct_base(pvt, cs, csrow); |
| 1572 | if (!(cs_base & K8_DCSB_CS_ENABLE)) | 1564 | if (!(cs_base & K8_DCSB_CS_ENABLE)) |
| @@ -2497,7 +2489,7 @@ err_reg: | |||
| 2497 | * NOTE: CPU Revision Dependent code | 2489 | * NOTE: CPU Revision Dependent code |
| 2498 | * | 2490 | * |
| 2499 | * Input: | 2491 | * Input: |
| 2500 | * @csrow_nr ChipSelect Row Number (0..CHIPSELECT_COUNT-1) | 2492 | * @csrow_nr ChipSelect Row Number (0..pvt->cs_count-1) |
| 2501 | * k8 private pointer to --> | 2493 | * k8 private pointer to --> |
| 2502 | * DRAM Bank Address mapping register | 2494 | * DRAM Bank Address mapping register |
| 2503 | * node_id | 2495 | * node_id |
| @@ -2577,7 +2569,7 @@ static int amd64_init_csrows(struct mem_ctl_info *mci) | |||
| 2577 | (pvt->nbcfg & K8_NBCFG_ECC_ENABLE) ? "Enabled" : "Disabled" | 2569 | (pvt->nbcfg & K8_NBCFG_ECC_ENABLE) ? "Enabled" : "Disabled" |
| 2578 | ); | 2570 | ); |
| 2579 | 2571 | ||
| 2580 | for (i = 0; i < CHIPSELECT_COUNT; i++) { | 2572 | for (i = 0; i < pvt->cs_count; i++) { |
| 2581 | csrow = &mci->csrows[i]; | 2573 | csrow = &mci->csrows[i]; |
| 2582 | 2574 | ||
| 2583 | if ((pvt->dcsb0[i] & K8_DCSB_CS_ENABLE) == 0) { | 2575 | if ((pvt->dcsb0[i] & K8_DCSB_CS_ENABLE) == 0) { |
| @@ -2988,7 +2980,7 @@ static int amd64_init_2nd_stage(struct amd64_pvt *pvt) | |||
| 2988 | goto err_exit; | 2980 | goto err_exit; |
| 2989 | 2981 | ||
| 2990 | ret = -ENOMEM; | 2982 | ret = -ENOMEM; |
| 2991 | mci = edac_mc_alloc(0, CHIPSELECT_COUNT, pvt->channel_count, node_id); | 2983 | mci = edac_mc_alloc(0, pvt->cs_count, pvt->channel_count, node_id); |
| 2992 | if (!mci) | 2984 | if (!mci) |
| 2993 | goto err_exit; | 2985 | goto err_exit; |
| 2994 | 2986 | ||
diff --git a/drivers/edac/amd64_edac.h b/drivers/edac/amd64_edac.h index 8ea07e2715dc..c6f359a85207 100644 --- a/drivers/edac/amd64_edac.h +++ b/drivers/edac/amd64_edac.h | |||
| @@ -132,6 +132,8 @@ | |||
| 132 | #define EDAC_AMD64_VERSION " Ver: 3.2.0 " __DATE__ | 132 | #define EDAC_AMD64_VERSION " Ver: 3.2.0 " __DATE__ |
| 133 | #define EDAC_MOD_STR "amd64_edac" | 133 | #define EDAC_MOD_STR "amd64_edac" |
| 134 | 134 | ||
| 135 | #define EDAC_MAX_NUMNODES 8 | ||
| 136 | |||
| 135 | /* Extended Model from CPUID, for CPU Revision numbers */ | 137 | /* Extended Model from CPUID, for CPU Revision numbers */ |
| 136 | #define OPTERON_CPU_LE_REV_C 0 | 138 | #define OPTERON_CPU_LE_REV_C 0 |
| 137 | #define OPTERON_CPU_REV_D 1 | 139 | #define OPTERON_CPU_REV_D 1 |
| @@ -142,7 +144,7 @@ | |||
| 142 | #define OPTERON_CPU_REV_FA 5 | 144 | #define OPTERON_CPU_REV_FA 5 |
| 143 | 145 | ||
| 144 | /* Hardware limit on ChipSelect rows per MC and processors per system */ | 146 | /* Hardware limit on ChipSelect rows per MC and processors per system */ |
| 145 | #define CHIPSELECT_COUNT 8 | 147 | #define MAX_CS_COUNT 8 |
| 146 | #define DRAM_REG_COUNT 8 | 148 | #define DRAM_REG_COUNT 8 |
| 147 | 149 | ||
| 148 | 150 | ||
| @@ -193,7 +195,6 @@ | |||
| 193 | */ | 195 | */ |
| 194 | #define REV_E_DCSB_BASE_BITS (0xFFE0FE00ULL) | 196 | #define REV_E_DCSB_BASE_BITS (0xFFE0FE00ULL) |
| 195 | #define REV_E_DCS_SHIFT 4 | 197 | #define REV_E_DCS_SHIFT 4 |
| 196 | #define REV_E_DCSM_COUNT 8 | ||
| 197 | 198 | ||
| 198 | #define REV_F_F1Xh_DCSB_BASE_BITS (0x1FF83FE0ULL) | 199 | #define REV_F_F1Xh_DCSB_BASE_BITS (0x1FF83FE0ULL) |
| 199 | #define REV_F_F1Xh_DCS_SHIFT 8 | 200 | #define REV_F_F1Xh_DCS_SHIFT 8 |
| @@ -204,9 +205,6 @@ | |||
| 204 | */ | 205 | */ |
| 205 | #define REV_F_DCSB_BASE_BITS (0x1FF83FE0ULL) | 206 | #define REV_F_DCSB_BASE_BITS (0x1FF83FE0ULL) |
| 206 | #define REV_F_DCS_SHIFT 8 | 207 | #define REV_F_DCS_SHIFT 8 |
| 207 | #define REV_F_DCSM_COUNT 4 | ||
| 208 | #define F10_DCSM_COUNT 4 | ||
| 209 | #define F11_DCSM_COUNT 2 | ||
| 210 | 208 | ||
| 211 | /* DRAM CS Mask Registers */ | 209 | /* DRAM CS Mask Registers */ |
| 212 | #define K8_DCSM0 0x60 | 210 | #define K8_DCSM0 0x60 |
| @@ -374,13 +372,11 @@ enum { | |||
| 374 | 372 | ||
| 375 | #define SET_NB_DRAM_INJECTION_WRITE(word, bits) \ | 373 | #define SET_NB_DRAM_INJECTION_WRITE(word, bits) \ |
| 376 | (BIT(((word) & 0xF) + 20) | \ | 374 | (BIT(((word) & 0xF) + 20) | \ |
| 377 | BIT(17) | \ | 375 | BIT(17) | bits) |
| 378 | ((bits) & 0xF)) | ||
| 379 | 376 | ||
| 380 | #define SET_NB_DRAM_INJECTION_READ(word, bits) \ | 377 | #define SET_NB_DRAM_INJECTION_READ(word, bits) \ |
| 381 | (BIT(((word) & 0xF) + 20) | \ | 378 | (BIT(((word) & 0xF) + 20) | \ |
| 382 | BIT(16) | \ | 379 | BIT(16) | bits) |
| 383 | ((bits) & 0xF)) | ||
| 384 | 380 | ||
| 385 | #define K8_NBCAP 0xE8 | 381 | #define K8_NBCAP 0xE8 |
| 386 | #define K8_NBCAP_CORES (BIT(12)|BIT(13)) | 382 | #define K8_NBCAP_CORES (BIT(12)|BIT(13)) |
| @@ -445,12 +441,12 @@ struct amd64_pvt { | |||
| 445 | u32 dbam1; /* DRAM Base Address Mapping reg for DCT1 */ | 441 | u32 dbam1; /* DRAM Base Address Mapping reg for DCT1 */ |
| 446 | 442 | ||
| 447 | /* DRAM CS Base Address Registers F2x[1,0][5C:40] */ | 443 | /* DRAM CS Base Address Registers F2x[1,0][5C:40] */ |
| 448 | u32 dcsb0[CHIPSELECT_COUNT]; | 444 | u32 dcsb0[MAX_CS_COUNT]; |
| 449 | u32 dcsb1[CHIPSELECT_COUNT]; | 445 | u32 dcsb1[MAX_CS_COUNT]; |
| 450 | 446 | ||
| 451 | /* DRAM CS Mask Registers F2x[1,0][6C:60] */ | 447 | /* DRAM CS Mask Registers F2x[1,0][6C:60] */ |
| 452 | u32 dcsm0[CHIPSELECT_COUNT]; | 448 | u32 dcsm0[MAX_CS_COUNT]; |
| 453 | u32 dcsm1[CHIPSELECT_COUNT]; | 449 | u32 dcsm1[MAX_CS_COUNT]; |
| 454 | 450 | ||
| 455 | /* | 451 | /* |
| 456 | * Decoded parts of DRAM BASE and LIMIT Registers | 452 | * Decoded parts of DRAM BASE and LIMIT Registers |
| @@ -470,6 +466,7 @@ struct amd64_pvt { | |||
| 470 | */ | 466 | */ |
| 471 | u32 dcsb_base; /* DCSB base bits */ | 467 | u32 dcsb_base; /* DCSB base bits */ |
| 472 | u32 dcsm_mask; /* DCSM mask bits */ | 468 | u32 dcsm_mask; /* DCSM mask bits */ |
| 469 | u32 cs_count; /* num chip selects (== num DCSB registers) */ | ||
| 473 | u32 num_dcsm; /* Number of DCSM registers */ | 470 | u32 num_dcsm; /* Number of DCSM registers */ |
| 474 | u32 dcs_mask_notused; /* DCSM notused mask bits */ | 471 | u32 dcs_mask_notused; /* DCSM notused mask bits */ |
| 475 | u32 dcs_shift; /* DCSB and DCSM shift value */ | 472 | u32 dcs_shift; /* DCSB and DCSM shift value */ |
diff --git a/drivers/edac/amd64_edac_inj.c b/drivers/edac/amd64_edac_inj.c index d3675b76b3a7..29f1f7a612d9 100644 --- a/drivers/edac/amd64_edac_inj.c +++ b/drivers/edac/amd64_edac_inj.c | |||
| @@ -1,5 +1,11 @@ | |||
| 1 | #include "amd64_edac.h" | 1 | #include "amd64_edac.h" |
| 2 | 2 | ||
| 3 | static ssize_t amd64_inject_section_show(struct mem_ctl_info *mci, char *buf) | ||
| 4 | { | ||
| 5 | struct amd64_pvt *pvt = mci->pvt_info; | ||
| 6 | return sprintf(buf, "0x%x\n", pvt->injection.section); | ||
| 7 | } | ||
| 8 | |||
| 3 | /* | 9 | /* |
| 4 | * store error injection section value which refers to one of 4 16-byte sections | 10 | * store error injection section value which refers to one of 4 16-byte sections |
| 5 | * within a 64-byte cacheline | 11 | * within a 64-byte cacheline |
| @@ -15,12 +21,26 @@ static ssize_t amd64_inject_section_store(struct mem_ctl_info *mci, | |||
| 15 | 21 | ||
| 16 | ret = strict_strtoul(data, 10, &value); | 22 | ret = strict_strtoul(data, 10, &value); |
| 17 | if (ret != -EINVAL) { | 23 | if (ret != -EINVAL) { |
| 24 | |||
| 25 | if (value > 3) { | ||
| 26 | amd64_printk(KERN_WARNING, | ||
| 27 | "%s: invalid section 0x%lx\n", | ||
| 28 | __func__, value); | ||
| 29 | return -EINVAL; | ||
| 30 | } | ||
| 31 | |||
| 18 | pvt->injection.section = (u32) value; | 32 | pvt->injection.section = (u32) value; |
| 19 | return count; | 33 | return count; |
| 20 | } | 34 | } |
| 21 | return ret; | 35 | return ret; |
| 22 | } | 36 | } |
| 23 | 37 | ||
| 38 | static ssize_t amd64_inject_word_show(struct mem_ctl_info *mci, char *buf) | ||
| 39 | { | ||
| 40 | struct amd64_pvt *pvt = mci->pvt_info; | ||
| 41 | return sprintf(buf, "0x%x\n", pvt->injection.word); | ||
| 42 | } | ||
| 43 | |||
| 24 | /* | 44 | /* |
| 25 | * store error injection word value which refers to one of 9 16-bit word of the | 45 | * store error injection word value which refers to one of 9 16-bit word of the |
| 26 | * 16-byte (128-bit + ECC bits) section | 46 | * 16-byte (128-bit + ECC bits) section |
| @@ -37,14 +57,25 @@ static ssize_t amd64_inject_word_store(struct mem_ctl_info *mci, | |||
| 37 | ret = strict_strtoul(data, 10, &value); | 57 | ret = strict_strtoul(data, 10, &value); |
| 38 | if (ret != -EINVAL) { | 58 | if (ret != -EINVAL) { |
| 39 | 59 | ||
| 40 | value = (value <= 8) ? value : 0; | 60 | if (value > 8) { |
| 41 | pvt->injection.word = (u32) value; | 61 | amd64_printk(KERN_WARNING, |
| 62 | "%s: invalid word 0x%lx\n", | ||
| 63 | __func__, value); | ||
| 64 | return -EINVAL; | ||
| 65 | } | ||
| 42 | 66 | ||
| 67 | pvt->injection.word = (u32) value; | ||
| 43 | return count; | 68 | return count; |
| 44 | } | 69 | } |
| 45 | return ret; | 70 | return ret; |
| 46 | } | 71 | } |
| 47 | 72 | ||
| 73 | static ssize_t amd64_inject_ecc_vector_show(struct mem_ctl_info *mci, char *buf) | ||
| 74 | { | ||
| 75 | struct amd64_pvt *pvt = mci->pvt_info; | ||
| 76 | return sprintf(buf, "0x%x\n", pvt->injection.bit_map); | ||
| 77 | } | ||
| 78 | |||
| 48 | /* | 79 | /* |
| 49 | * store 16 bit error injection vector which enables injecting errors to the | 80 | * store 16 bit error injection vector which enables injecting errors to the |
| 50 | * corresponding bit within the error injection word above. When used during a | 81 | * corresponding bit within the error injection word above. When used during a |
| @@ -60,8 +91,14 @@ static ssize_t amd64_inject_ecc_vector_store(struct mem_ctl_info *mci, | |||
| 60 | ret = strict_strtoul(data, 16, &value); | 91 | ret = strict_strtoul(data, 16, &value); |
| 61 | if (ret != -EINVAL) { | 92 | if (ret != -EINVAL) { |
| 62 | 93 | ||
| 63 | pvt->injection.bit_map = (u32) value & 0xFFFF; | 94 | if (value & 0xFFFF0000) { |
| 95 | amd64_printk(KERN_WARNING, | ||
| 96 | "%s: invalid EccVector: 0x%lx\n", | ||
| 97 | __func__, value); | ||
| 98 | return -EINVAL; | ||
| 99 | } | ||
| 64 | 100 | ||
| 101 | pvt->injection.bit_map = (u32) value; | ||
| 65 | return count; | 102 | return count; |
| 66 | } | 103 | } |
| 67 | return ret; | 104 | return ret; |
| @@ -147,7 +184,7 @@ struct mcidev_sysfs_attribute amd64_inj_attrs[] = { | |||
| 147 | .name = "inject_section", | 184 | .name = "inject_section", |
| 148 | .mode = (S_IRUGO | S_IWUSR) | 185 | .mode = (S_IRUGO | S_IWUSR) |
| 149 | }, | 186 | }, |
| 150 | .show = NULL, | 187 | .show = amd64_inject_section_show, |
| 151 | .store = amd64_inject_section_store, | 188 | .store = amd64_inject_section_store, |
| 152 | }, | 189 | }, |
| 153 | { | 190 | { |
| @@ -155,7 +192,7 @@ struct mcidev_sysfs_attribute amd64_inj_attrs[] = { | |||
| 155 | .name = "inject_word", | 192 | .name = "inject_word", |
| 156 | .mode = (S_IRUGO | S_IWUSR) | 193 | .mode = (S_IRUGO | S_IWUSR) |
| 157 | }, | 194 | }, |
| 158 | .show = NULL, | 195 | .show = amd64_inject_word_show, |
| 159 | .store = amd64_inject_word_store, | 196 | .store = amd64_inject_word_store, |
| 160 | }, | 197 | }, |
| 161 | { | 198 | { |
| @@ -163,7 +200,7 @@ struct mcidev_sysfs_attribute amd64_inj_attrs[] = { | |||
| 163 | .name = "inject_ecc_vector", | 200 | .name = "inject_ecc_vector", |
| 164 | .mode = (S_IRUGO | S_IWUSR) | 201 | .mode = (S_IRUGO | S_IWUSR) |
| 165 | }, | 202 | }, |
| 166 | .show = NULL, | 203 | .show = amd64_inject_ecc_vector_show, |
| 167 | .store = amd64_inject_ecc_vector_store, | 204 | .store = amd64_inject_ecc_vector_store, |
| 168 | }, | 205 | }, |
| 169 | { | 206 | { |
diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c index ced186d7e9a9..5089331544ed 100644 --- a/drivers/firewire/core-cdev.c +++ b/drivers/firewire/core-cdev.c | |||
| @@ -33,6 +33,7 @@ | |||
| 33 | #include <linux/mutex.h> | 33 | #include <linux/mutex.h> |
| 34 | #include <linux/poll.h> | 34 | #include <linux/poll.h> |
| 35 | #include <linux/preempt.h> | 35 | #include <linux/preempt.h> |
| 36 | #include <linux/sched.h> | ||
| 36 | #include <linux/spinlock.h> | 37 | #include <linux/spinlock.h> |
| 37 | #include <linux/time.h> | 38 | #include <linux/time.h> |
| 38 | #include <linux/uaccess.h> | 39 | #include <linux/uaccess.h> |
diff --git a/drivers/firmware/iscsi_ibft.c b/drivers/firmware/iscsi_ibft.c index 420a96e7f2db..051d1ebbd287 100644 --- a/drivers/firmware/iscsi_ibft.c +++ b/drivers/firmware/iscsi_ibft.c | |||
| @@ -939,7 +939,7 @@ static int __init ibft_init(void) | |||
| 939 | 939 | ||
| 940 | if (ibft_addr) { | 940 | if (ibft_addr) { |
| 941 | printk(KERN_INFO "iBFT detected at 0x%llx.\n", | 941 | printk(KERN_INFO "iBFT detected at 0x%llx.\n", |
| 942 | (u64)virt_to_phys((void *)ibft_addr)); | 942 | (u64)isa_virt_to_bus(ibft_addr)); |
| 943 | 943 | ||
| 944 | rc = ibft_check_device(); | 944 | rc = ibft_check_device(); |
| 945 | if (rc) | 945 | if (rc) |
diff --git a/drivers/firmware/iscsi_ibft_find.c b/drivers/firmware/iscsi_ibft_find.c index d53fbbfefa3e..dfb15c06c88f 100644 --- a/drivers/firmware/iscsi_ibft_find.c +++ b/drivers/firmware/iscsi_ibft_find.c | |||
| @@ -65,10 +65,10 @@ void __init reserve_ibft_region(void) | |||
| 65 | * so skip that area */ | 65 | * so skip that area */ |
| 66 | if (pos == VGA_MEM) | 66 | if (pos == VGA_MEM) |
| 67 | pos += VGA_SIZE; | 67 | pos += VGA_SIZE; |
| 68 | virt = phys_to_virt(pos); | 68 | virt = isa_bus_to_virt(pos); |
| 69 | if (memcmp(virt, IBFT_SIGN, IBFT_SIGN_LEN) == 0) { | 69 | if (memcmp(virt, IBFT_SIGN, IBFT_SIGN_LEN) == 0) { |
| 70 | unsigned long *addr = | 70 | unsigned long *addr = |
| 71 | (unsigned long *)phys_to_virt(pos + 4); | 71 | (unsigned long *)isa_bus_to_virt(pos + 4); |
| 72 | len = *addr; | 72 | len = *addr; |
| 73 | /* if the length of the table extends past 1M, | 73 | /* if the length of the table extends past 1M, |
| 74 | * the table cannot be valid. */ | 74 | * the table cannot be valid. */ |
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c index 8e7b0ebece0c..5cae0b3eee9b 100644 --- a/drivers/gpu/drm/drm_crtc.c +++ b/drivers/gpu/drm/drm_crtc.c | |||
| @@ -1556,8 +1556,6 @@ int drm_mode_cursor_ioctl(struct drm_device *dev, | |||
| 1556 | struct drm_crtc *crtc; | 1556 | struct drm_crtc *crtc; |
| 1557 | int ret = 0; | 1557 | int ret = 0; |
| 1558 | 1558 | ||
| 1559 | DRM_DEBUG_KMS("\n"); | ||
| 1560 | |||
| 1561 | if (!req->flags) { | 1559 | if (!req->flags) { |
| 1562 | DRM_ERROR("no operation set\n"); | 1560 | DRM_ERROR("no operation set\n"); |
| 1563 | return -EINVAL; | 1561 | return -EINVAL; |
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index 819ddcbfcce5..23dc9c115fd9 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c | |||
| @@ -454,6 +454,96 @@ out_free: | |||
| 454 | } | 454 | } |
| 455 | EXPORT_SYMBOL(drm_fb_helper_init_crtc_count); | 455 | EXPORT_SYMBOL(drm_fb_helper_init_crtc_count); |
| 456 | 456 | ||
| 457 | static void setcolreg(struct drm_crtc *crtc, u16 red, u16 green, | ||
| 458 | u16 blue, u16 regno, struct fb_info *info) | ||
| 459 | { | ||
| 460 | struct drm_fb_helper *fb_helper = info->par; | ||
| 461 | struct drm_framebuffer *fb = fb_helper->fb; | ||
| 462 | int pindex; | ||
| 463 | |||
| 464 | pindex = regno; | ||
| 465 | |||
| 466 | if (fb->bits_per_pixel == 16) { | ||
| 467 | pindex = regno << 3; | ||
| 468 | |||
| 469 | if (fb->depth == 16 && regno > 63) | ||
| 470 | return; | ||
| 471 | if (fb->depth == 15 && regno > 31) | ||
| 472 | return; | ||
| 473 | |||
| 474 | if (fb->depth == 16) { | ||
| 475 | u16 r, g, b; | ||
| 476 | int i; | ||
| 477 | if (regno < 32) { | ||
| 478 | for (i = 0; i < 8; i++) | ||
| 479 | fb_helper->funcs->gamma_set(crtc, red, | ||
| 480 | green, blue, pindex + i); | ||
| 481 | } | ||
| 482 | |||
| 483 | fb_helper->funcs->gamma_get(crtc, &r, | ||
| 484 | &g, &b, | ||
| 485 | pindex >> 1); | ||
| 486 | |||
| 487 | for (i = 0; i < 4; i++) | ||
| 488 | fb_helper->funcs->gamma_set(crtc, r, | ||
| 489 | green, b, | ||
| 490 | (pindex >> 1) + i); | ||
| 491 | } | ||
| 492 | } | ||
| 493 | |||
| 494 | if (fb->depth != 16) | ||
| 495 | fb_helper->funcs->gamma_set(crtc, red, green, blue, pindex); | ||
| 496 | |||
| 497 | if (regno < 16 && info->fix.visual == FB_VISUAL_DIRECTCOLOR) { | ||
| 498 | ((u32 *) fb->pseudo_palette)[regno] = | ||
| 499 | (regno << info->var.red.offset) | | ||
| 500 | (regno << info->var.green.offset) | | ||
| 501 | (regno << info->var.blue.offset); | ||
| 502 | } | ||
| 503 | } | ||
| 504 | |||
| 505 | int drm_fb_helper_setcmap(struct fb_cmap *cmap, struct fb_info *info) | ||
| 506 | { | ||
| 507 | struct drm_fb_helper *fb_helper = info->par; | ||
| 508 | struct drm_device *dev = fb_helper->dev; | ||
| 509 | u16 *red, *green, *blue, *transp; | ||
| 510 | struct drm_crtc *crtc; | ||
| 511 | int i, rc = 0; | ||
| 512 | int start; | ||
| 513 | |||
| 514 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { | ||
| 515 | struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; | ||
| 516 | for (i = 0; i < fb_helper->crtc_count; i++) { | ||
| 517 | if (crtc->base.id == fb_helper->crtc_info[i].crtc_id) | ||
| 518 | break; | ||
| 519 | } | ||
| 520 | if (i == fb_helper->crtc_count) | ||
| 521 | continue; | ||
| 522 | |||
| 523 | red = cmap->red; | ||
| 524 | green = cmap->green; | ||
| 525 | blue = cmap->blue; | ||
| 526 | transp = cmap->transp; | ||
| 527 | start = cmap->start; | ||
| 528 | |||
| 529 | for (i = 0; i < cmap->len; i++) { | ||
| 530 | u16 hred, hgreen, hblue, htransp = 0xffff; | ||
| 531 | |||
| 532 | hred = *red++; | ||
| 533 | hgreen = *green++; | ||
| 534 | hblue = *blue++; | ||
| 535 | |||
| 536 | if (transp) | ||
| 537 | htransp = *transp++; | ||
| 538 | |||
| 539 | setcolreg(crtc, hred, hgreen, hblue, start++, info); | ||
| 540 | } | ||
| 541 | crtc_funcs->load_lut(crtc); | ||
| 542 | } | ||
| 543 | return rc; | ||
| 544 | } | ||
| 545 | EXPORT_SYMBOL(drm_fb_helper_setcmap); | ||
| 546 | |||
| 457 | int drm_fb_helper_setcolreg(unsigned regno, | 547 | int drm_fb_helper_setcolreg(unsigned regno, |
| 458 | unsigned red, | 548 | unsigned red, |
| 459 | unsigned green, | 549 | unsigned green, |
| @@ -466,9 +556,11 @@ int drm_fb_helper_setcolreg(unsigned regno, | |||
| 466 | struct drm_crtc *crtc; | 556 | struct drm_crtc *crtc; |
| 467 | int i; | 557 | int i; |
| 468 | 558 | ||
| 469 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { | 559 | if (regno > 255) |
| 470 | struct drm_framebuffer *fb = fb_helper->fb; | 560 | return 1; |
| 471 | 561 | ||
| 562 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { | ||
| 563 | struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; | ||
| 472 | for (i = 0; i < fb_helper->crtc_count; i++) { | 564 | for (i = 0; i < fb_helper->crtc_count; i++) { |
| 473 | if (crtc->base.id == fb_helper->crtc_info[i].crtc_id) | 565 | if (crtc->base.id == fb_helper->crtc_info[i].crtc_id) |
| 474 | break; | 566 | break; |
| @@ -476,35 +568,9 @@ int drm_fb_helper_setcolreg(unsigned regno, | |||
| 476 | if (i == fb_helper->crtc_count) | 568 | if (i == fb_helper->crtc_count) |
| 477 | continue; | 569 | continue; |
| 478 | 570 | ||
| 479 | if (regno > 255) | ||
| 480 | return 1; | ||
| 481 | |||
| 482 | if (fb->depth == 8) { | ||
| 483 | fb_helper->funcs->gamma_set(crtc, red, green, blue, regno); | ||
| 484 | return 0; | ||
| 485 | } | ||
| 486 | 571 | ||
| 487 | if (regno < 16) { | 572 | setcolreg(crtc, red, green, blue, regno, info); |
| 488 | switch (fb->depth) { | 573 | crtc_funcs->load_lut(crtc); |
| 489 | case 15: | ||
| 490 | fb->pseudo_palette[regno] = ((red & 0xf800) >> 1) | | ||
| 491 | ((green & 0xf800) >> 6) | | ||
| 492 | ((blue & 0xf800) >> 11); | ||
| 493 | break; | ||
| 494 | case 16: | ||
| 495 | fb->pseudo_palette[regno] = (red & 0xf800) | | ||
| 496 | ((green & 0xfc00) >> 5) | | ||
| 497 | ((blue & 0xf800) >> 11); | ||
| 498 | break; | ||
| 499 | case 24: | ||
| 500 | case 32: | ||
| 501 | fb->pseudo_palette[regno] = | ||
| 502 | (((red >> 8) & 0xff) << info->var.red.offset) | | ||
| 503 | (((green >> 8) & 0xff) << info->var.green.offset) | | ||
| 504 | (((blue >> 8) & 0xff) << info->var.blue.offset); | ||
| 505 | break; | ||
| 506 | } | ||
| 507 | } | ||
| 508 | } | 574 | } |
| 509 | return 0; | 575 | return 0; |
| 510 | } | 576 | } |
| @@ -674,6 +740,7 @@ int drm_fb_helper_pan_display(struct fb_var_screeninfo *var, | |||
| 674 | EXPORT_SYMBOL(drm_fb_helper_pan_display); | 740 | EXPORT_SYMBOL(drm_fb_helper_pan_display); |
| 675 | 741 | ||
| 676 | int drm_fb_helper_single_fb_probe(struct drm_device *dev, | 742 | int drm_fb_helper_single_fb_probe(struct drm_device *dev, |
| 743 | int preferred_bpp, | ||
| 677 | int (*fb_create)(struct drm_device *dev, | 744 | int (*fb_create)(struct drm_device *dev, |
| 678 | uint32_t fb_width, | 745 | uint32_t fb_width, |
| 679 | uint32_t fb_height, | 746 | uint32_t fb_height, |
| @@ -696,6 +763,11 @@ int drm_fb_helper_single_fb_probe(struct drm_device *dev, | |||
| 696 | struct drm_fb_helper *fb_helper; | 763 | struct drm_fb_helper *fb_helper; |
| 697 | uint32_t surface_depth = 24, surface_bpp = 32; | 764 | uint32_t surface_depth = 24, surface_bpp = 32; |
| 698 | 765 | ||
| 766 | /* if driver picks 8 or 16 by default use that | ||
| 767 | for both depth/bpp */ | ||
| 768 | if (preferred_bpp != surface_bpp) { | ||
| 769 | surface_depth = surface_bpp = preferred_bpp; | ||
| 770 | } | ||
| 699 | /* first up get a count of crtcs now in use and new min/maxes width/heights */ | 771 | /* first up get a count of crtcs now in use and new min/maxes width/heights */ |
| 700 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { | 772 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { |
| 701 | struct drm_fb_helper_connector *fb_help_conn = connector->fb_helper_private; | 773 | struct drm_fb_helper_connector *fb_help_conn = connector->fb_helper_private; |
| @@ -851,10 +923,12 @@ void drm_fb_helper_free(struct drm_fb_helper *helper) | |||
| 851 | } | 923 | } |
| 852 | EXPORT_SYMBOL(drm_fb_helper_free); | 924 | EXPORT_SYMBOL(drm_fb_helper_free); |
| 853 | 925 | ||
| 854 | void drm_fb_helper_fill_fix(struct fb_info *info, uint32_t pitch) | 926 | void drm_fb_helper_fill_fix(struct fb_info *info, uint32_t pitch, |
| 927 | uint32_t depth) | ||
| 855 | { | 928 | { |
| 856 | info->fix.type = FB_TYPE_PACKED_PIXELS; | 929 | info->fix.type = FB_TYPE_PACKED_PIXELS; |
| 857 | info->fix.visual = FB_VISUAL_TRUECOLOR; | 930 | info->fix.visual = depth == 8 ? FB_VISUAL_PSEUDOCOLOR : |
| 931 | FB_VISUAL_DIRECTCOLOR; | ||
| 858 | info->fix.type_aux = 0; | 932 | info->fix.type_aux = 0; |
| 859 | info->fix.xpanstep = 1; /* doing it in hw */ | 933 | info->fix.xpanstep = 1; /* doing it in hw */ |
| 860 | info->fix.ypanstep = 1; /* doing it in hw */ | 934 | info->fix.ypanstep = 1; /* doing it in hw */ |
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 93ff6c03733e..ffa39671751f 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c | |||
| @@ -3244,6 +3244,16 @@ void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green, | |||
| 3244 | intel_crtc->lut_b[regno] = blue >> 8; | 3244 | intel_crtc->lut_b[regno] = blue >> 8; |
| 3245 | } | 3245 | } |
| 3246 | 3246 | ||
| 3247 | void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green, | ||
| 3248 | u16 *blue, int regno) | ||
| 3249 | { | ||
| 3250 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | ||
| 3251 | |||
| 3252 | *red = intel_crtc->lut_r[regno] << 8; | ||
| 3253 | *green = intel_crtc->lut_g[regno] << 8; | ||
| 3254 | *blue = intel_crtc->lut_b[regno] << 8; | ||
| 3255 | } | ||
| 3256 | |||
| 3247 | static void intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, | 3257 | static void intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, |
| 3248 | u16 *blue, uint32_t size) | 3258 | u16 *blue, uint32_t size) |
| 3249 | { | 3259 | { |
| @@ -3835,6 +3845,7 @@ static const struct drm_crtc_helper_funcs intel_helper_funcs = { | |||
| 3835 | .mode_set_base = intel_pipe_set_base, | 3845 | .mode_set_base = intel_pipe_set_base, |
| 3836 | .prepare = intel_crtc_prepare, | 3846 | .prepare = intel_crtc_prepare, |
| 3837 | .commit = intel_crtc_commit, | 3847 | .commit = intel_crtc_commit, |
| 3848 | .load_lut = intel_crtc_load_lut, | ||
| 3838 | }; | 3849 | }; |
| 3839 | 3850 | ||
| 3840 | static const struct drm_crtc_funcs intel_crtc_funcs = { | 3851 | static const struct drm_crtc_funcs intel_crtc_funcs = { |
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 8aa4b7f30daa..ef61fe9507e2 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h | |||
| @@ -175,6 +175,8 @@ extern int intelfb_resize(struct drm_device *dev, struct drm_crtc *crtc); | |||
| 175 | extern void intelfb_restore(void); | 175 | extern void intelfb_restore(void); |
| 176 | extern void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green, | 176 | extern void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green, |
| 177 | u16 blue, int regno); | 177 | u16 blue, int regno); |
| 178 | extern void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green, | ||
| 179 | u16 *blue, int regno); | ||
| 178 | 180 | ||
| 179 | extern int intel_framebuffer_create(struct drm_device *dev, | 181 | extern int intel_framebuffer_create(struct drm_device *dev, |
| 180 | struct drm_mode_fb_cmd *mode_cmd, | 182 | struct drm_mode_fb_cmd *mode_cmd, |
diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c index e85d7e9eed7d..2b0fe54cd92c 100644 --- a/drivers/gpu/drm/i915/intel_fb.c +++ b/drivers/gpu/drm/i915/intel_fb.c | |||
| @@ -60,10 +60,12 @@ static struct fb_ops intelfb_ops = { | |||
| 60 | .fb_imageblit = cfb_imageblit, | 60 | .fb_imageblit = cfb_imageblit, |
| 61 | .fb_pan_display = drm_fb_helper_pan_display, | 61 | .fb_pan_display = drm_fb_helper_pan_display, |
| 62 | .fb_blank = drm_fb_helper_blank, | 62 | .fb_blank = drm_fb_helper_blank, |
| 63 | .fb_setcmap = drm_fb_helper_setcmap, | ||
| 63 | }; | 64 | }; |
| 64 | 65 | ||
| 65 | static struct drm_fb_helper_funcs intel_fb_helper_funcs = { | 66 | static struct drm_fb_helper_funcs intel_fb_helper_funcs = { |
| 66 | .gamma_set = intel_crtc_fb_gamma_set, | 67 | .gamma_set = intel_crtc_fb_gamma_set, |
| 68 | .gamma_get = intel_crtc_fb_gamma_get, | ||
| 67 | }; | 69 | }; |
| 68 | 70 | ||
| 69 | 71 | ||
| @@ -123,6 +125,10 @@ static int intelfb_create(struct drm_device *dev, uint32_t fb_width, | |||
| 123 | struct device *device = &dev->pdev->dev; | 125 | struct device *device = &dev->pdev->dev; |
| 124 | int size, ret, mmio_bar = IS_I9XX(dev) ? 0 : 1; | 126 | int size, ret, mmio_bar = IS_I9XX(dev) ? 0 : 1; |
| 125 | 127 | ||
| 128 | /* we don't do packed 24bpp */ | ||
| 129 | if (surface_bpp == 24) | ||
| 130 | surface_bpp = 32; | ||
| 131 | |||
| 126 | mode_cmd.width = surface_width; | 132 | mode_cmd.width = surface_width; |
| 127 | mode_cmd.height = surface_height; | 133 | mode_cmd.height = surface_height; |
| 128 | 134 | ||
| @@ -206,7 +212,7 @@ static int intelfb_create(struct drm_device *dev, uint32_t fb_width, | |||
| 206 | 212 | ||
| 207 | // memset(info->screen_base, 0, size); | 213 | // memset(info->screen_base, 0, size); |
| 208 | 214 | ||
| 209 | drm_fb_helper_fill_fix(info, fb->pitch); | 215 | drm_fb_helper_fill_fix(info, fb->pitch, fb->depth); |
| 210 | drm_fb_helper_fill_var(info, fb, fb_width, fb_height); | 216 | drm_fb_helper_fill_var(info, fb, fb_width, fb_height); |
| 211 | 217 | ||
| 212 | /* FIXME: we really shouldn't expose mmio space at all */ | 218 | /* FIXME: we really shouldn't expose mmio space at all */ |
| @@ -244,7 +250,7 @@ int intelfb_probe(struct drm_device *dev) | |||
| 244 | int ret; | 250 | int ret; |
| 245 | 251 | ||
| 246 | DRM_DEBUG("\n"); | 252 | DRM_DEBUG("\n"); |
| 247 | ret = drm_fb_helper_single_fb_probe(dev, intelfb_create); | 253 | ret = drm_fb_helper_single_fb_probe(dev, 32, intelfb_create); |
| 248 | return ret; | 254 | return ret; |
| 249 | } | 255 | } |
| 250 | EXPORT_SYMBOL(intelfb_probe); | 256 | EXPORT_SYMBOL(intelfb_probe); |
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c index 6a015929deee..14fa9701aeb3 100644 --- a/drivers/gpu/drm/radeon/atombios_crtc.c +++ b/drivers/gpu/drm/radeon/atombios_crtc.c | |||
| @@ -733,6 +733,7 @@ static const struct drm_crtc_helper_funcs atombios_helper_funcs = { | |||
| 733 | .mode_set_base = atombios_crtc_set_base, | 733 | .mode_set_base = atombios_crtc_set_base, |
| 734 | .prepare = atombios_crtc_prepare, | 734 | .prepare = atombios_crtc_prepare, |
| 735 | .commit = atombios_crtc_commit, | 735 | .commit = atombios_crtc_commit, |
| 736 | .load_lut = radeon_crtc_load_lut, | ||
| 736 | }; | 737 | }; |
| 737 | 738 | ||
| 738 | void radeon_atombios_init_crtc(struct drm_device *dev, | 739 | void radeon_atombios_init_crtc(struct drm_device *dev, |
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c index e6cce24de802..161094c07d94 100644 --- a/drivers/gpu/drm/radeon/r100.c +++ b/drivers/gpu/drm/radeon/r100.c | |||
| @@ -32,6 +32,9 @@ | |||
| 32 | #include "radeon_reg.h" | 32 | #include "radeon_reg.h" |
| 33 | #include "radeon.h" | 33 | #include "radeon.h" |
| 34 | #include "r100d.h" | 34 | #include "r100d.h" |
| 35 | #include "rs100d.h" | ||
| 36 | #include "rv200d.h" | ||
| 37 | #include "rv250d.h" | ||
| 35 | 38 | ||
| 36 | #include <linux/firmware.h> | 39 | #include <linux/firmware.h> |
| 37 | #include <linux/platform_device.h> | 40 | #include <linux/platform_device.h> |
| @@ -60,18 +63,7 @@ MODULE_FIRMWARE(FIRMWARE_R520); | |||
| 60 | 63 | ||
| 61 | /* This files gather functions specifics to: | 64 | /* This files gather functions specifics to: |
| 62 | * r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280 | 65 | * r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280 |
| 63 | * | ||
| 64 | * Some of these functions might be used by newer ASICs. | ||
| 65 | */ | 66 | */ |
| 66 | int r200_init(struct radeon_device *rdev); | ||
| 67 | void r100_hdp_reset(struct radeon_device *rdev); | ||
| 68 | void r100_gpu_init(struct radeon_device *rdev); | ||
| 69 | int r100_gui_wait_for_idle(struct radeon_device *rdev); | ||
| 70 | int r100_mc_wait_for_idle(struct radeon_device *rdev); | ||
| 71 | void r100_gpu_wait_for_vsync(struct radeon_device *rdev); | ||
| 72 | void r100_gpu_wait_for_vsync2(struct radeon_device *rdev); | ||
| 73 | int r100_debugfs_mc_info_init(struct radeon_device *rdev); | ||
| 74 | |||
| 75 | 67 | ||
| 76 | /* | 68 | /* |
| 77 | * PCI GART | 69 | * PCI GART |
| @@ -152,136 +144,6 @@ void r100_pci_gart_fini(struct radeon_device *rdev) | |||
| 152 | radeon_gart_fini(rdev); | 144 | radeon_gart_fini(rdev); |
| 153 | } | 145 | } |
| 154 | 146 | ||
| 155 | |||
| 156 | /* | ||
| 157 | * MC | ||
| 158 | */ | ||
| 159 | void r100_mc_disable_clients(struct radeon_device *rdev) | ||
| 160 | { | ||
| 161 | uint32_t ov0_scale_cntl, crtc_ext_cntl, crtc_gen_cntl, crtc2_gen_cntl; | ||
| 162 | |||
| 163 | /* FIXME: is this function correct for rs100,rs200,rs300 ? */ | ||
| 164 | if (r100_gui_wait_for_idle(rdev)) { | ||
| 165 | printk(KERN_WARNING "Failed to wait GUI idle while " | ||
| 166 | "programming pipes. Bad things might happen.\n"); | ||
| 167 | } | ||
| 168 | |||
| 169 | /* stop display and memory access */ | ||
| 170 | ov0_scale_cntl = RREG32(RADEON_OV0_SCALE_CNTL); | ||
| 171 | WREG32(RADEON_OV0_SCALE_CNTL, ov0_scale_cntl & ~RADEON_SCALER_ENABLE); | ||
| 172 | crtc_ext_cntl = RREG32(RADEON_CRTC_EXT_CNTL); | ||
| 173 | WREG32(RADEON_CRTC_EXT_CNTL, crtc_ext_cntl | RADEON_CRTC_DISPLAY_DIS); | ||
| 174 | crtc_gen_cntl = RREG32(RADEON_CRTC_GEN_CNTL); | ||
| 175 | |||
| 176 | r100_gpu_wait_for_vsync(rdev); | ||
| 177 | |||
| 178 | WREG32(RADEON_CRTC_GEN_CNTL, | ||
| 179 | (crtc_gen_cntl & ~(RADEON_CRTC_CUR_EN | RADEON_CRTC_ICON_EN)) | | ||
| 180 | RADEON_CRTC_DISP_REQ_EN_B | RADEON_CRTC_EXT_DISP_EN); | ||
| 181 | |||
| 182 | if (!(rdev->flags & RADEON_SINGLE_CRTC)) { | ||
| 183 | crtc2_gen_cntl = RREG32(RADEON_CRTC2_GEN_CNTL); | ||
| 184 | |||
| 185 | r100_gpu_wait_for_vsync2(rdev); | ||
| 186 | WREG32(RADEON_CRTC2_GEN_CNTL, | ||
| 187 | (crtc2_gen_cntl & | ||
| 188 | ~(RADEON_CRTC2_CUR_EN | RADEON_CRTC2_ICON_EN)) | | ||
| 189 | RADEON_CRTC2_DISP_REQ_EN_B); | ||
| 190 | } | ||
| 191 | |||
| 192 | udelay(500); | ||
| 193 | } | ||
| 194 | |||
| 195 | void r100_mc_setup(struct radeon_device *rdev) | ||
| 196 | { | ||
| 197 | uint32_t tmp; | ||
| 198 | int r; | ||
| 199 | |||
| 200 | r = r100_debugfs_mc_info_init(rdev); | ||
| 201 | if (r) { | ||
| 202 | DRM_ERROR("Failed to register debugfs file for R100 MC !\n"); | ||
| 203 | } | ||
| 204 | /* Write VRAM size in case we are limiting it */ | ||
| 205 | WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.real_vram_size); | ||
| 206 | /* Novell bug 204882 for RN50/M6/M7 with 8/16/32MB VRAM, | ||
| 207 | * if the aperture is 64MB but we have 32MB VRAM | ||
| 208 | * we report only 32MB VRAM but we have to set MC_FB_LOCATION | ||
| 209 | * to 64MB, otherwise the gpu accidentially dies */ | ||
| 210 | tmp = rdev->mc.vram_location + rdev->mc.mc_vram_size - 1; | ||
| 211 | tmp = REG_SET(RADEON_MC_FB_TOP, tmp >> 16); | ||
| 212 | tmp |= REG_SET(RADEON_MC_FB_START, rdev->mc.vram_location >> 16); | ||
| 213 | WREG32(RADEON_MC_FB_LOCATION, tmp); | ||
| 214 | |||
| 215 | /* Enable bus mastering */ | ||
| 216 | tmp = RREG32(RADEON_BUS_CNTL) & ~RADEON_BUS_MASTER_DIS; | ||
| 217 | WREG32(RADEON_BUS_CNTL, tmp); | ||
| 218 | |||
| 219 | if (rdev->flags & RADEON_IS_AGP) { | ||
| 220 | tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - 1; | ||
| 221 | tmp = REG_SET(RADEON_MC_AGP_TOP, tmp >> 16); | ||
| 222 | tmp |= REG_SET(RADEON_MC_AGP_START, rdev->mc.gtt_location >> 16); | ||
| 223 | WREG32(RADEON_MC_AGP_LOCATION, tmp); | ||
| 224 | WREG32(RADEON_AGP_BASE, rdev->mc.agp_base); | ||
| 225 | } else { | ||
| 226 | WREG32(RADEON_MC_AGP_LOCATION, 0x0FFFFFFF); | ||
| 227 | WREG32(RADEON_AGP_BASE, 0); | ||
| 228 | } | ||
| 229 | |||
| 230 | tmp = RREG32(RADEON_HOST_PATH_CNTL) & RADEON_HDP_APER_CNTL; | ||
| 231 | tmp |= (7 << 28); | ||
| 232 | WREG32(RADEON_HOST_PATH_CNTL, tmp | RADEON_HDP_SOFT_RESET | RADEON_HDP_READ_BUFFER_INVALIDATE); | ||
| 233 | (void)RREG32(RADEON_HOST_PATH_CNTL); | ||
| 234 | WREG32(RADEON_HOST_PATH_CNTL, tmp); | ||
| 235 | (void)RREG32(RADEON_HOST_PATH_CNTL); | ||
| 236 | } | ||
| 237 | |||
| 238 | int r100_mc_init(struct radeon_device *rdev) | ||
| 239 | { | ||
| 240 | int r; | ||
| 241 | |||
| 242 | if (r100_debugfs_rbbm_init(rdev)) { | ||
| 243 | DRM_ERROR("Failed to register debugfs file for RBBM !\n"); | ||
| 244 | } | ||
| 245 | |||
| 246 | r100_gpu_init(rdev); | ||
| 247 | /* Disable gart which also disable out of gart access */ | ||
| 248 | r100_pci_gart_disable(rdev); | ||
| 249 | |||
| 250 | /* Setup GPU memory space */ | ||
| 251 | rdev->mc.gtt_location = 0xFFFFFFFFUL; | ||
| 252 | if (rdev->flags & RADEON_IS_AGP) { | ||
| 253 | r = radeon_agp_init(rdev); | ||
| 254 | if (r) { | ||
| 255 | printk(KERN_WARNING "[drm] Disabling AGP\n"); | ||
| 256 | rdev->flags &= ~RADEON_IS_AGP; | ||
| 257 | rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024; | ||
| 258 | } else { | ||
| 259 | rdev->mc.gtt_location = rdev->mc.agp_base; | ||
| 260 | } | ||
| 261 | } | ||
| 262 | r = radeon_mc_setup(rdev); | ||
| 263 | if (r) { | ||
| 264 | return r; | ||
| 265 | } | ||
| 266 | |||
| 267 | r100_mc_disable_clients(rdev); | ||
| 268 | if (r100_mc_wait_for_idle(rdev)) { | ||
| 269 | printk(KERN_WARNING "Failed to wait MC idle while " | ||
| 270 | "programming pipes. Bad things might happen.\n"); | ||
| 271 | } | ||
| 272 | |||
| 273 | r100_mc_setup(rdev); | ||
| 274 | return 0; | ||
| 275 | } | ||
| 276 | |||
| 277 | void r100_mc_fini(struct radeon_device *rdev) | ||
| 278 | { | ||
| 279 | } | ||
| 280 | |||
| 281 | |||
| 282 | /* | ||
| 283 | * Interrupts | ||
| 284 | */ | ||
| 285 | int r100_irq_set(struct radeon_device *rdev) | 147 | int r100_irq_set(struct radeon_device *rdev) |
| 286 | { | 148 | { |
| 287 | uint32_t tmp = 0; | 149 | uint32_t tmp = 0; |
| @@ -358,10 +220,6 @@ u32 r100_get_vblank_counter(struct radeon_device *rdev, int crtc) | |||
| 358 | return RREG32(RADEON_CRTC2_CRNT_FRAME); | 220 | return RREG32(RADEON_CRTC2_CRNT_FRAME); |
| 359 | } | 221 | } |
| 360 | 222 | ||
| 361 | |||
| 362 | /* | ||
| 363 | * Fence emission | ||
| 364 | */ | ||
| 365 | void r100_fence_ring_emit(struct radeon_device *rdev, | 223 | void r100_fence_ring_emit(struct radeon_device *rdev, |
| 366 | struct radeon_fence *fence) | 224 | struct radeon_fence *fence) |
| 367 | { | 225 | { |
| @@ -377,10 +235,6 @@ void r100_fence_ring_emit(struct radeon_device *rdev, | |||
| 377 | radeon_ring_write(rdev, RADEON_SW_INT_FIRE); | 235 | radeon_ring_write(rdev, RADEON_SW_INT_FIRE); |
| 378 | } | 236 | } |
| 379 | 237 | ||
| 380 | |||
| 381 | /* | ||
| 382 | * Writeback | ||
| 383 | */ | ||
| 384 | int r100_wb_init(struct radeon_device *rdev) | 238 | int r100_wb_init(struct radeon_device *rdev) |
| 385 | { | 239 | { |
| 386 | int r; | 240 | int r; |
| @@ -504,10 +358,6 @@ int r100_copy_blit(struct radeon_device *rdev, | |||
| 504 | return r; | 358 | return r; |
| 505 | } | 359 | } |
| 506 | 360 | ||
| 507 | |||
| 508 | /* | ||
| 509 | * CP | ||
| 510 | */ | ||
| 511 | static int r100_cp_wait_for_idle(struct radeon_device *rdev) | 361 | static int r100_cp_wait_for_idle(struct radeon_device *rdev) |
| 512 | { | 362 | { |
| 513 | unsigned i; | 363 | unsigned i; |
| @@ -612,6 +462,7 @@ static int r100_cp_init_microcode(struct radeon_device *rdev) | |||
| 612 | } | 462 | } |
| 613 | return err; | 463 | return err; |
| 614 | } | 464 | } |
| 465 | |||
| 615 | static void r100_cp_load_microcode(struct radeon_device *rdev) | 466 | static void r100_cp_load_microcode(struct radeon_device *rdev) |
| 616 | { | 467 | { |
| 617 | const __be32 *fw_data; | 468 | const __be32 *fw_data; |
| @@ -978,7 +829,7 @@ int r100_cs_packet_parse_vline(struct radeon_cs_parser *p) | |||
| 978 | 829 | ||
| 979 | header = radeon_get_ib_value(p, h_idx); | 830 | header = radeon_get_ib_value(p, h_idx); |
| 980 | crtc_id = radeon_get_ib_value(p, h_idx + 5); | 831 | crtc_id = radeon_get_ib_value(p, h_idx + 5); |
| 981 | reg = header >> 2; | 832 | reg = CP_PACKET0_GET_REG(header); |
| 982 | mutex_lock(&p->rdev->ddev->mode_config.mutex); | 833 | mutex_lock(&p->rdev->ddev->mode_config.mutex); |
| 983 | obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC); | 834 | obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC); |
| 984 | if (!obj) { | 835 | if (!obj) { |
| @@ -1990,7 +1841,7 @@ void r100_pll_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) | |||
| 1990 | r100_pll_errata_after_data(rdev); | 1841 | r100_pll_errata_after_data(rdev); |
| 1991 | } | 1842 | } |
| 1992 | 1843 | ||
| 1993 | int r100_init(struct radeon_device *rdev) | 1844 | void r100_set_safe_registers(struct radeon_device *rdev) |
| 1994 | { | 1845 | { |
| 1995 | if (ASIC_IS_RN50(rdev)) { | 1846 | if (ASIC_IS_RN50(rdev)) { |
| 1996 | rdev->config.r100.reg_safe_bm = rn50_reg_safe_bm; | 1847 | rdev->config.r100.reg_safe_bm = rn50_reg_safe_bm; |
| @@ -1999,9 +1850,8 @@ int r100_init(struct radeon_device *rdev) | |||
| 1999 | rdev->config.r100.reg_safe_bm = r100_reg_safe_bm; | 1850 | rdev->config.r100.reg_safe_bm = r100_reg_safe_bm; |
| 2000 | rdev->config.r100.reg_safe_bm_size = ARRAY_SIZE(r100_reg_safe_bm); | 1851 | rdev->config.r100.reg_safe_bm_size = ARRAY_SIZE(r100_reg_safe_bm); |
| 2001 | } else { | 1852 | } else { |
| 2002 | return r200_init(rdev); | 1853 | r200_set_safe_registers(rdev); |
| 2003 | } | 1854 | } |
| 2004 | return 0; | ||
| 2005 | } | 1855 | } |
| 2006 | 1856 | ||
| 2007 | /* | 1857 | /* |
| @@ -2299,9 +2149,11 @@ void r100_bandwidth_update(struct radeon_device *rdev) | |||
| 2299 | mode1 = &rdev->mode_info.crtcs[0]->base.mode; | 2149 | mode1 = &rdev->mode_info.crtcs[0]->base.mode; |
| 2300 | pixel_bytes1 = rdev->mode_info.crtcs[0]->base.fb->bits_per_pixel / 8; | 2150 | pixel_bytes1 = rdev->mode_info.crtcs[0]->base.fb->bits_per_pixel / 8; |
| 2301 | } | 2151 | } |
| 2302 | if (rdev->mode_info.crtcs[1]->base.enabled) { | 2152 | if (!(rdev->flags & RADEON_SINGLE_CRTC)) { |
| 2303 | mode2 = &rdev->mode_info.crtcs[1]->base.mode; | 2153 | if (rdev->mode_info.crtcs[1]->base.enabled) { |
| 2304 | pixel_bytes2 = rdev->mode_info.crtcs[1]->base.fb->bits_per_pixel / 8; | 2154 | mode2 = &rdev->mode_info.crtcs[1]->base.mode; |
| 2155 | pixel_bytes2 = rdev->mode_info.crtcs[1]->base.fb->bits_per_pixel / 8; | ||
| 2156 | } | ||
| 2305 | } | 2157 | } |
| 2306 | 2158 | ||
| 2307 | min_mem_eff.full = rfixed_const_8(0); | 2159 | min_mem_eff.full = rfixed_const_8(0); |
| @@ -3114,7 +2966,7 @@ void r100_mc_stop(struct radeon_device *rdev, struct r100_mc_save *save) | |||
| 3114 | WREG32(R_000740_CP_CSQ_CNTL, 0); | 2966 | WREG32(R_000740_CP_CSQ_CNTL, 0); |
| 3115 | 2967 | ||
| 3116 | /* Save few CRTC registers */ | 2968 | /* Save few CRTC registers */ |
| 3117 | save->GENMO_WT = RREG32(R_0003C0_GENMO_WT); | 2969 | save->GENMO_WT = RREG8(R_0003C2_GENMO_WT); |
| 3118 | save->CRTC_EXT_CNTL = RREG32(R_000054_CRTC_EXT_CNTL); | 2970 | save->CRTC_EXT_CNTL = RREG32(R_000054_CRTC_EXT_CNTL); |
| 3119 | save->CRTC_GEN_CNTL = RREG32(R_000050_CRTC_GEN_CNTL); | 2971 | save->CRTC_GEN_CNTL = RREG32(R_000050_CRTC_GEN_CNTL); |
| 3120 | save->CUR_OFFSET = RREG32(R_000260_CUR_OFFSET); | 2972 | save->CUR_OFFSET = RREG32(R_000260_CUR_OFFSET); |
| @@ -3124,7 +2976,7 @@ void r100_mc_stop(struct radeon_device *rdev, struct r100_mc_save *save) | |||
| 3124 | } | 2976 | } |
| 3125 | 2977 | ||
| 3126 | /* Disable VGA aperture access */ | 2978 | /* Disable VGA aperture access */ |
| 3127 | WREG32(R_0003C0_GENMO_WT, C_0003C0_VGA_RAM_EN & save->GENMO_WT); | 2979 | WREG8(R_0003C2_GENMO_WT, C_0003C2_VGA_RAM_EN & save->GENMO_WT); |
| 3128 | /* Disable cursor, overlay, crtc */ | 2980 | /* Disable cursor, overlay, crtc */ |
| 3129 | WREG32(R_000260_CUR_OFFSET, save->CUR_OFFSET | S_000260_CUR_LOCK(1)); | 2981 | WREG32(R_000260_CUR_OFFSET, save->CUR_OFFSET | S_000260_CUR_LOCK(1)); |
| 3130 | WREG32(R_000054_CRTC_EXT_CNTL, save->CRTC_EXT_CNTL | | 2982 | WREG32(R_000054_CRTC_EXT_CNTL, save->CRTC_EXT_CNTL | |
| @@ -3156,10 +3008,264 @@ void r100_mc_resume(struct radeon_device *rdev, struct r100_mc_save *save) | |||
| 3156 | rdev->mc.vram_location); | 3008 | rdev->mc.vram_location); |
| 3157 | } | 3009 | } |
| 3158 | /* Restore CRTC registers */ | 3010 | /* Restore CRTC registers */ |
| 3159 | WREG32(R_0003C0_GENMO_WT, save->GENMO_WT); | 3011 | WREG8(R_0003C2_GENMO_WT, save->GENMO_WT); |
| 3160 | WREG32(R_000054_CRTC_EXT_CNTL, save->CRTC_EXT_CNTL); | 3012 | WREG32(R_000054_CRTC_EXT_CNTL, save->CRTC_EXT_CNTL); |
| 3161 | WREG32(R_000050_CRTC_GEN_CNTL, save->CRTC_GEN_CNTL); | 3013 | WREG32(R_000050_CRTC_GEN_CNTL, save->CRTC_GEN_CNTL); |
| 3162 | if (!(rdev->flags & RADEON_SINGLE_CRTC)) { | 3014 | if (!(rdev->flags & RADEON_SINGLE_CRTC)) { |
| 3163 | WREG32(R_0003F8_CRTC2_GEN_CNTL, save->CRTC2_GEN_CNTL); | 3015 | WREG32(R_0003F8_CRTC2_GEN_CNTL, save->CRTC2_GEN_CNTL); |
| 3164 | } | 3016 | } |
| 3165 | } | 3017 | } |
| 3018 | |||
| 3019 | void r100_vga_render_disable(struct radeon_device *rdev) | ||
| 3020 | { | ||
| 3021 | u32 tmp; | ||
| 3022 | |||
| 3023 | tmp = RREG8(R_0003C2_GENMO_WT); | ||
| 3024 | WREG8(R_0003C2_GENMO_WT, C_0003C2_VGA_RAM_EN & tmp); | ||
| 3025 | } | ||
| 3026 | |||
| 3027 | static void r100_debugfs(struct radeon_device *rdev) | ||
| 3028 | { | ||
| 3029 | int r; | ||
| 3030 | |||
| 3031 | r = r100_debugfs_mc_info_init(rdev); | ||
| 3032 | if (r) | ||
| 3033 | dev_warn(rdev->dev, "Failed to create r100_mc debugfs file.\n"); | ||
| 3034 | } | ||
| 3035 | |||
| 3036 | static void r100_mc_program(struct radeon_device *rdev) | ||
| 3037 | { | ||
| 3038 | struct r100_mc_save save; | ||
| 3039 | |||
| 3040 | /* Stops all mc clients */ | ||
| 3041 | r100_mc_stop(rdev, &save); | ||
| 3042 | if (rdev->flags & RADEON_IS_AGP) { | ||
| 3043 | WREG32(R_00014C_MC_AGP_LOCATION, | ||
| 3044 | S_00014C_MC_AGP_START(rdev->mc.gtt_start >> 16) | | ||
| 3045 | S_00014C_MC_AGP_TOP(rdev->mc.gtt_end >> 16)); | ||
| 3046 | WREG32(R_000170_AGP_BASE, lower_32_bits(rdev->mc.agp_base)); | ||
| 3047 | if (rdev->family > CHIP_RV200) | ||
| 3048 | WREG32(R_00015C_AGP_BASE_2, | ||
| 3049 | upper_32_bits(rdev->mc.agp_base) & 0xff); | ||
| 3050 | } else { | ||
| 3051 | WREG32(R_00014C_MC_AGP_LOCATION, 0x0FFFFFFF); | ||
| 3052 | WREG32(R_000170_AGP_BASE, 0); | ||
| 3053 | if (rdev->family > CHIP_RV200) | ||
| 3054 | WREG32(R_00015C_AGP_BASE_2, 0); | ||
| 3055 | } | ||
| 3056 | /* Wait for mc idle */ | ||
| 3057 | if (r100_mc_wait_for_idle(rdev)) | ||
| 3058 | dev_warn(rdev->dev, "Wait for MC idle timeout.\n"); | ||
| 3059 | /* Program MC, should be a 32bits limited address space */ | ||
| 3060 | WREG32(R_000148_MC_FB_LOCATION, | ||
| 3061 | S_000148_MC_FB_START(rdev->mc.vram_start >> 16) | | ||
| 3062 | S_000148_MC_FB_TOP(rdev->mc.vram_end >> 16)); | ||
| 3063 | r100_mc_resume(rdev, &save); | ||
| 3064 | } | ||
| 3065 | |||
| 3066 | void r100_clock_startup(struct radeon_device *rdev) | ||
| 3067 | { | ||
| 3068 | u32 tmp; | ||
| 3069 | |||
| 3070 | if (radeon_dynclks != -1 && radeon_dynclks) | ||
| 3071 | radeon_legacy_set_clock_gating(rdev, 1); | ||
| 3072 | /* We need to force on some of the block */ | ||
| 3073 | tmp = RREG32_PLL(R_00000D_SCLK_CNTL); | ||
| 3074 | tmp |= S_00000D_FORCE_CP(1) | S_00000D_FORCE_VIP(1); | ||
| 3075 | if ((rdev->family == CHIP_RV250) || (rdev->family == CHIP_RV280)) | ||
| 3076 | tmp |= S_00000D_FORCE_DISP1(1) | S_00000D_FORCE_DISP2(1); | ||
| 3077 | WREG32_PLL(R_00000D_SCLK_CNTL, tmp); | ||
| 3078 | } | ||
| 3079 | |||
| 3080 | static int r100_startup(struct radeon_device *rdev) | ||
| 3081 | { | ||
| 3082 | int r; | ||
| 3083 | |||
| 3084 | r100_mc_program(rdev); | ||
| 3085 | /* Resume clock */ | ||
| 3086 | r100_clock_startup(rdev); | ||
| 3087 | /* Initialize GPU configuration (# pipes, ...) */ | ||
| 3088 | r100_gpu_init(rdev); | ||
| 3089 | /* Initialize GART (initialize after TTM so we can allocate | ||
| 3090 | * memory through TTM but finalize after TTM) */ | ||
| 3091 | if (rdev->flags & RADEON_IS_PCI) { | ||
| 3092 | r = r100_pci_gart_enable(rdev); | ||
| 3093 | if (r) | ||
| 3094 | return r; | ||
| 3095 | } | ||
| 3096 | /* Enable IRQ */ | ||
| 3097 | rdev->irq.sw_int = true; | ||
| 3098 | r100_irq_set(rdev); | ||
| 3099 | /* 1M ring buffer */ | ||
| 3100 | r = r100_cp_init(rdev, 1024 * 1024); | ||
| 3101 | if (r) { | ||
| 3102 | dev_err(rdev->dev, "failled initializing CP (%d).\n", r); | ||
| 3103 | return r; | ||
| 3104 | } | ||
| 3105 | r = r100_wb_init(rdev); | ||
| 3106 | if (r) | ||
| 3107 | dev_err(rdev->dev, "failled initializing WB (%d).\n", r); | ||
| 3108 | r = r100_ib_init(rdev); | ||
| 3109 | if (r) { | ||
| 3110 | dev_err(rdev->dev, "failled initializing IB (%d).\n", r); | ||
| 3111 | return r; | ||
| 3112 | } | ||
| 3113 | return 0; | ||
| 3114 | } | ||
| 3115 | |||
| 3116 | int r100_resume(struct radeon_device *rdev) | ||
| 3117 | { | ||
| 3118 | /* Make sur GART are not working */ | ||
| 3119 | if (rdev->flags & RADEON_IS_PCI) | ||
| 3120 | r100_pci_gart_disable(rdev); | ||
| 3121 | /* Resume clock before doing reset */ | ||
| 3122 | r100_clock_startup(rdev); | ||
| 3123 | /* Reset gpu before posting otherwise ATOM will enter infinite loop */ | ||
| 3124 | if (radeon_gpu_reset(rdev)) { | ||
| 3125 | dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", | ||
| 3126 | RREG32(R_000E40_RBBM_STATUS), | ||
| 3127 | RREG32(R_0007C0_CP_STAT)); | ||
| 3128 | } | ||
| 3129 | /* post */ | ||
| 3130 | radeon_combios_asic_init(rdev->ddev); | ||
| 3131 | /* Resume clock after posting */ | ||
| 3132 | r100_clock_startup(rdev); | ||
| 3133 | return r100_startup(rdev); | ||
| 3134 | } | ||
| 3135 | |||
| 3136 | int r100_suspend(struct radeon_device *rdev) | ||
| 3137 | { | ||
| 3138 | r100_cp_disable(rdev); | ||
| 3139 | r100_wb_disable(rdev); | ||
| 3140 | r100_irq_disable(rdev); | ||
| 3141 | if (rdev->flags & RADEON_IS_PCI) | ||
| 3142 | r100_pci_gart_disable(rdev); | ||
| 3143 | return 0; | ||
| 3144 | } | ||
| 3145 | |||
| 3146 | void r100_fini(struct radeon_device *rdev) | ||
| 3147 | { | ||
| 3148 | r100_suspend(rdev); | ||
| 3149 | r100_cp_fini(rdev); | ||
| 3150 | r100_wb_fini(rdev); | ||
| 3151 | r100_ib_fini(rdev); | ||
| 3152 | radeon_gem_fini(rdev); | ||
| 3153 | if (rdev->flags & RADEON_IS_PCI) | ||
| 3154 | r100_pci_gart_fini(rdev); | ||
| 3155 | radeon_irq_kms_fini(rdev); | ||
| 3156 | radeon_fence_driver_fini(rdev); | ||
| 3157 | radeon_object_fini(rdev); | ||
| 3158 | radeon_atombios_fini(rdev); | ||
| 3159 | kfree(rdev->bios); | ||
| 3160 | rdev->bios = NULL; | ||
| 3161 | } | ||
| 3162 | |||
| 3163 | int r100_mc_init(struct radeon_device *rdev) | ||
| 3164 | { | ||
| 3165 | int r; | ||
| 3166 | u32 tmp; | ||
| 3167 | |||
| 3168 | /* Setup GPU memory space */ | ||
| 3169 | rdev->mc.vram_location = 0xFFFFFFFFUL; | ||
| 3170 | rdev->mc.gtt_location = 0xFFFFFFFFUL; | ||
| 3171 | if (rdev->flags & RADEON_IS_IGP) { | ||
| 3172 | tmp = G_00015C_MC_FB_START(RREG32(R_00015C_NB_TOM)); | ||
| 3173 | rdev->mc.vram_location = tmp << 16; | ||
| 3174 | } | ||
| 3175 | if (rdev->flags & RADEON_IS_AGP) { | ||
| 3176 | r = radeon_agp_init(rdev); | ||
| 3177 | if (r) { | ||
| 3178 | printk(KERN_WARNING "[drm] Disabling AGP\n"); | ||
| 3179 | rdev->flags &= ~RADEON_IS_AGP; | ||
| 3180 | rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024; | ||
| 3181 | } else { | ||
| 3182 | rdev->mc.gtt_location = rdev->mc.agp_base; | ||
| 3183 | } | ||
| 3184 | } | ||
| 3185 | r = radeon_mc_setup(rdev); | ||
| 3186 | if (r) | ||
| 3187 | return r; | ||
| 3188 | return 0; | ||
| 3189 | } | ||
| 3190 | |||
| 3191 | int r100_init(struct radeon_device *rdev) | ||
| 3192 | { | ||
| 3193 | int r; | ||
| 3194 | |||
| 3195 | /* Register debugfs file specific to this group of asics */ | ||
| 3196 | r100_debugfs(rdev); | ||
| 3197 | /* Disable VGA */ | ||
| 3198 | r100_vga_render_disable(rdev); | ||
| 3199 | /* Initialize scratch registers */ | ||
| 3200 | radeon_scratch_init(rdev); | ||
| 3201 | /* Initialize surface registers */ | ||
| 3202 | radeon_surface_init(rdev); | ||
| 3203 | /* TODO: disable VGA need to use VGA request */ | ||
| 3204 | /* BIOS*/ | ||
| 3205 | if (!radeon_get_bios(rdev)) { | ||
| 3206 | if (ASIC_IS_AVIVO(rdev)) | ||
| 3207 | return -EINVAL; | ||
| 3208 | } | ||
| 3209 | if (rdev->is_atom_bios) { | ||
| 3210 | dev_err(rdev->dev, "Expecting combios for RS400/RS480 GPU\n"); | ||
| 3211 | return -EINVAL; | ||
| 3212 | } else { | ||
| 3213 | r = radeon_combios_init(rdev); | ||
| 3214 | if (r) | ||
| 3215 | return r; | ||
| 3216 | } | ||
| 3217 | /* Reset gpu before posting otherwise ATOM will enter infinite loop */ | ||
| 3218 | if (radeon_gpu_reset(rdev)) { | ||
| 3219 | dev_warn(rdev->dev, | ||
| 3220 | "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", | ||
| 3221 | RREG32(R_000E40_RBBM_STATUS), | ||
| 3222 | RREG32(R_0007C0_CP_STAT)); | ||
| 3223 | } | ||
| 3224 | /* check if cards are posted or not */ | ||
| 3225 | if (!radeon_card_posted(rdev) && rdev->bios) { | ||
| 3226 | DRM_INFO("GPU not posted. posting now...\n"); | ||
| 3227 | radeon_combios_asic_init(rdev->ddev); | ||
| 3228 | } | ||
| 3229 | /* Set asic errata */ | ||
| 3230 | r100_errata(rdev); | ||
| 3231 | /* Initialize clocks */ | ||
| 3232 | radeon_get_clock_info(rdev->ddev); | ||
| 3233 | /* Get vram informations */ | ||
| 3234 | r100_vram_info(rdev); | ||
| 3235 | /* Initialize memory controller (also test AGP) */ | ||
| 3236 | r = r100_mc_init(rdev); | ||
| 3237 | if (r) | ||
| 3238 | return r; | ||
| 3239 | /* Fence driver */ | ||
| 3240 | r = radeon_fence_driver_init(rdev); | ||
| 3241 | if (r) | ||
| 3242 | return r; | ||
| 3243 | r = radeon_irq_kms_init(rdev); | ||
| 3244 | if (r) | ||
| 3245 | return r; | ||
| 3246 | /* Memory manager */ | ||
| 3247 | r = radeon_object_init(rdev); | ||
| 3248 | if (r) | ||
| 3249 | return r; | ||
| 3250 | if (rdev->flags & RADEON_IS_PCI) { | ||
| 3251 | r = r100_pci_gart_init(rdev); | ||
| 3252 | if (r) | ||
| 3253 | return r; | ||
| 3254 | } | ||
| 3255 | r100_set_safe_registers(rdev); | ||
| 3256 | rdev->accel_working = true; | ||
| 3257 | r = r100_startup(rdev); | ||
| 3258 | if (r) { | ||
| 3259 | /* Somethings want wront with the accel init stop accel */ | ||
| 3260 | dev_err(rdev->dev, "Disabling GPU acceleration\n"); | ||
| 3261 | r100_suspend(rdev); | ||
| 3262 | r100_cp_fini(rdev); | ||
| 3263 | r100_wb_fini(rdev); | ||
| 3264 | r100_ib_fini(rdev); | ||
| 3265 | if (rdev->flags & RADEON_IS_PCI) | ||
| 3266 | r100_pci_gart_fini(rdev); | ||
| 3267 | radeon_irq_kms_fini(rdev); | ||
| 3268 | rdev->accel_working = false; | ||
| 3269 | } | ||
| 3270 | return 0; | ||
| 3271 | } | ||
diff --git a/drivers/gpu/drm/radeon/r100d.h b/drivers/gpu/drm/radeon/r100d.h index c4b257ec920e..df29a630c466 100644 --- a/drivers/gpu/drm/radeon/r100d.h +++ b/drivers/gpu/drm/radeon/r100d.h | |||
| @@ -381,6 +381,24 @@ | |||
| 381 | #define S_000054_VCRTC_IDX_MASTER(x) (((x) & 0x7F) << 24) | 381 | #define S_000054_VCRTC_IDX_MASTER(x) (((x) & 0x7F) << 24) |
| 382 | #define G_000054_VCRTC_IDX_MASTER(x) (((x) >> 24) & 0x7F) | 382 | #define G_000054_VCRTC_IDX_MASTER(x) (((x) >> 24) & 0x7F) |
| 383 | #define C_000054_VCRTC_IDX_MASTER 0x80FFFFFF | 383 | #define C_000054_VCRTC_IDX_MASTER 0x80FFFFFF |
| 384 | #define R_000148_MC_FB_LOCATION 0x000148 | ||
| 385 | #define S_000148_MC_FB_START(x) (((x) & 0xFFFF) << 0) | ||
| 386 | #define G_000148_MC_FB_START(x) (((x) >> 0) & 0xFFFF) | ||
| 387 | #define C_000148_MC_FB_START 0xFFFF0000 | ||
| 388 | #define S_000148_MC_FB_TOP(x) (((x) & 0xFFFF) << 16) | ||
| 389 | #define G_000148_MC_FB_TOP(x) (((x) >> 16) & 0xFFFF) | ||
| 390 | #define C_000148_MC_FB_TOP 0x0000FFFF | ||
| 391 | #define R_00014C_MC_AGP_LOCATION 0x00014C | ||
| 392 | #define S_00014C_MC_AGP_START(x) (((x) & 0xFFFF) << 0) | ||
| 393 | #define G_00014C_MC_AGP_START(x) (((x) >> 0) & 0xFFFF) | ||
| 394 | #define C_00014C_MC_AGP_START 0xFFFF0000 | ||
| 395 | #define S_00014C_MC_AGP_TOP(x) (((x) & 0xFFFF) << 16) | ||
| 396 | #define G_00014C_MC_AGP_TOP(x) (((x) >> 16) & 0xFFFF) | ||
| 397 | #define C_00014C_MC_AGP_TOP 0x0000FFFF | ||
| 398 | #define R_000170_AGP_BASE 0x000170 | ||
| 399 | #define S_000170_AGP_BASE_ADDR(x) (((x) & 0xFFFFFFFF) << 0) | ||
| 400 | #define G_000170_AGP_BASE_ADDR(x) (((x) >> 0) & 0xFFFFFFFF) | ||
| 401 | #define C_000170_AGP_BASE_ADDR 0x00000000 | ||
| 384 | #define R_00023C_DISPLAY_BASE_ADDR 0x00023C | 402 | #define R_00023C_DISPLAY_BASE_ADDR 0x00023C |
| 385 | #define S_00023C_DISPLAY_BASE_ADDR(x) (((x) & 0xFFFFFFFF) << 0) | 403 | #define S_00023C_DISPLAY_BASE_ADDR(x) (((x) & 0xFFFFFFFF) << 0) |
| 386 | #define G_00023C_DISPLAY_BASE_ADDR(x) (((x) >> 0) & 0xFFFFFFFF) | 404 | #define G_00023C_DISPLAY_BASE_ADDR(x) (((x) >> 0) & 0xFFFFFFFF) |
| @@ -403,25 +421,25 @@ | |||
| 403 | #define S_000360_CUR2_LOCK(x) (((x) & 0x1) << 31) | 421 | #define S_000360_CUR2_LOCK(x) (((x) & 0x1) << 31) |
| 404 | #define G_000360_CUR2_LOCK(x) (((x) >> 31) & 0x1) | 422 | #define G_000360_CUR2_LOCK(x) (((x) >> 31) & 0x1) |
| 405 | #define C_000360_CUR2_LOCK 0x7FFFFFFF | 423 | #define C_000360_CUR2_LOCK 0x7FFFFFFF |
| 406 | #define R_0003C0_GENMO_WT 0x0003C0 | 424 | #define R_0003C2_GENMO_WT 0x0003C0 |
| 407 | #define S_0003C0_GENMO_MONO_ADDRESS_B(x) (((x) & 0x1) << 0) | 425 | #define S_0003C2_GENMO_MONO_ADDRESS_B(x) (((x) & 0x1) << 0) |
| 408 | #define G_0003C0_GENMO_MONO_ADDRESS_B(x) (((x) >> 0) & 0x1) | 426 | #define G_0003C2_GENMO_MONO_ADDRESS_B(x) (((x) >> 0) & 0x1) |
| 409 | #define C_0003C0_GENMO_MONO_ADDRESS_B 0xFFFFFFFE | 427 | #define C_0003C2_GENMO_MONO_ADDRESS_B 0xFE |
| 410 | #define S_0003C0_VGA_RAM_EN(x) (((x) & 0x1) << 1) | 428 | #define S_0003C2_VGA_RAM_EN(x) (((x) & 0x1) << 1) |
| 411 | #define G_0003C0_VGA_RAM_EN(x) (((x) >> 1) & 0x1) | 429 | #define G_0003C2_VGA_RAM_EN(x) (((x) >> 1) & 0x1) |
| 412 | #define C_0003C0_VGA_RAM_EN 0xFFFFFFFD | 430 | #define C_0003C2_VGA_RAM_EN 0xFD |
| 413 | #define S_0003C0_VGA_CKSEL(x) (((x) & 0x3) << 2) | 431 | #define S_0003C2_VGA_CKSEL(x) (((x) & 0x3) << 2) |
| 414 | #define G_0003C0_VGA_CKSEL(x) (((x) >> 2) & 0x3) | 432 | #define G_0003C2_VGA_CKSEL(x) (((x) >> 2) & 0x3) |
| 415 | #define C_0003C0_VGA_CKSEL 0xFFFFFFF3 | 433 | #define C_0003C2_VGA_CKSEL 0xF3 |
| 416 | #define S_0003C0_ODD_EVEN_MD_PGSEL(x) (((x) & 0x1) << 5) | 434 | #define S_0003C2_ODD_EVEN_MD_PGSEL(x) (((x) & 0x1) << 5) |
| 417 | #define G_0003C0_ODD_EVEN_MD_PGSEL(x) (((x) >> 5) & 0x1) | 435 | #define G_0003C2_ODD_EVEN_MD_PGSEL(x) (((x) >> 5) & 0x1) |
| 418 | #define C_0003C0_ODD_EVEN_MD_PGSEL 0xFFFFFFDF | 436 | #define C_0003C2_ODD_EVEN_MD_PGSEL 0xDF |
| 419 | #define S_0003C0_VGA_HSYNC_POL(x) (((x) & 0x1) << 6) | 437 | #define S_0003C2_VGA_HSYNC_POL(x) (((x) & 0x1) << 6) |
| 420 | #define G_0003C0_VGA_HSYNC_POL(x) (((x) >> 6) & 0x1) | 438 | #define G_0003C2_VGA_HSYNC_POL(x) (((x) >> 6) & 0x1) |
| 421 | #define C_0003C0_VGA_HSYNC_POL 0xFFFFFFBF | 439 | #define C_0003C2_VGA_HSYNC_POL 0xBF |
| 422 | #define S_0003C0_VGA_VSYNC_POL(x) (((x) & 0x1) << 7) | 440 | #define S_0003C2_VGA_VSYNC_POL(x) (((x) & 0x1) << 7) |
| 423 | #define G_0003C0_VGA_VSYNC_POL(x) (((x) >> 7) & 0x1) | 441 | #define G_0003C2_VGA_VSYNC_POL(x) (((x) >> 7) & 0x1) |
| 424 | #define C_0003C0_VGA_VSYNC_POL 0xFFFFFF7F | 442 | #define C_0003C2_VGA_VSYNC_POL 0x7F |
| 425 | #define R_0003F8_CRTC2_GEN_CNTL 0x0003F8 | 443 | #define R_0003F8_CRTC2_GEN_CNTL 0x0003F8 |
| 426 | #define S_0003F8_CRTC2_DBL_SCAN_EN(x) (((x) & 0x1) << 0) | 444 | #define S_0003F8_CRTC2_DBL_SCAN_EN(x) (((x) & 0x1) << 0) |
| 427 | #define G_0003F8_CRTC2_DBL_SCAN_EN(x) (((x) >> 0) & 0x1) | 445 | #define G_0003F8_CRTC2_DBL_SCAN_EN(x) (((x) >> 0) & 0x1) |
| @@ -545,6 +563,46 @@ | |||
| 545 | #define S_000774_SCRATCH_ADDR(x) (((x) & 0x7FFFFFF) << 5) | 563 | #define S_000774_SCRATCH_ADDR(x) (((x) & 0x7FFFFFF) << 5) |
| 546 | #define G_000774_SCRATCH_ADDR(x) (((x) >> 5) & 0x7FFFFFF) | 564 | #define G_000774_SCRATCH_ADDR(x) (((x) >> 5) & 0x7FFFFFF) |
| 547 | #define C_000774_SCRATCH_ADDR 0x0000001F | 565 | #define C_000774_SCRATCH_ADDR 0x0000001F |
| 566 | #define R_0007C0_CP_STAT 0x0007C0 | ||
| 567 | #define S_0007C0_MRU_BUSY(x) (((x) & 0x1) << 0) | ||
| 568 | #define G_0007C0_MRU_BUSY(x) (((x) >> 0) & 0x1) | ||
| 569 | #define C_0007C0_MRU_BUSY 0xFFFFFFFE | ||
| 570 | #define S_0007C0_MWU_BUSY(x) (((x) & 0x1) << 1) | ||
| 571 | #define G_0007C0_MWU_BUSY(x) (((x) >> 1) & 0x1) | ||
| 572 | #define C_0007C0_MWU_BUSY 0xFFFFFFFD | ||
| 573 | #define S_0007C0_RSIU_BUSY(x) (((x) & 0x1) << 2) | ||
| 574 | #define G_0007C0_RSIU_BUSY(x) (((x) >> 2) & 0x1) | ||
| 575 | #define C_0007C0_RSIU_BUSY 0xFFFFFFFB | ||
| 576 | #define S_0007C0_RCIU_BUSY(x) (((x) & 0x1) << 3) | ||
| 577 | #define G_0007C0_RCIU_BUSY(x) (((x) >> 3) & 0x1) | ||
| 578 | #define C_0007C0_RCIU_BUSY 0xFFFFFFF7 | ||
| 579 | #define S_0007C0_CSF_PRIMARY_BUSY(x) (((x) & 0x1) << 9) | ||
| 580 | #define G_0007C0_CSF_PRIMARY_BUSY(x) (((x) >> 9) & 0x1) | ||
| 581 | #define C_0007C0_CSF_PRIMARY_BUSY 0xFFFFFDFF | ||
| 582 | #define S_0007C0_CSF_INDIRECT_BUSY(x) (((x) & 0x1) << 10) | ||
| 583 | #define G_0007C0_CSF_INDIRECT_BUSY(x) (((x) >> 10) & 0x1) | ||
| 584 | #define C_0007C0_CSF_INDIRECT_BUSY 0xFFFFFBFF | ||
| 585 | #define S_0007C0_CSQ_PRIMARY_BUSY(x) (((x) & 0x1) << 11) | ||
| 586 | #define G_0007C0_CSQ_PRIMARY_BUSY(x) (((x) >> 11) & 0x1) | ||
| 587 | #define C_0007C0_CSQ_PRIMARY_BUSY 0xFFFFF7FF | ||
| 588 | #define S_0007C0_CSQ_INDIRECT_BUSY(x) (((x) & 0x1) << 12) | ||
| 589 | #define G_0007C0_CSQ_INDIRECT_BUSY(x) (((x) >> 12) & 0x1) | ||
| 590 | #define C_0007C0_CSQ_INDIRECT_BUSY 0xFFFFEFFF | ||
| 591 | #define S_0007C0_CSI_BUSY(x) (((x) & 0x1) << 13) | ||
| 592 | #define G_0007C0_CSI_BUSY(x) (((x) >> 13) & 0x1) | ||
| 593 | #define C_0007C0_CSI_BUSY 0xFFFFDFFF | ||
| 594 | #define S_0007C0_GUIDMA_BUSY(x) (((x) & 0x1) << 28) | ||
| 595 | #define G_0007C0_GUIDMA_BUSY(x) (((x) >> 28) & 0x1) | ||
| 596 | #define C_0007C0_GUIDMA_BUSY 0xEFFFFFFF | ||
| 597 | #define S_0007C0_VIDDMA_BUSY(x) (((x) & 0x1) << 29) | ||
| 598 | #define G_0007C0_VIDDMA_BUSY(x) (((x) >> 29) & 0x1) | ||
| 599 | #define C_0007C0_VIDDMA_BUSY 0xDFFFFFFF | ||
| 600 | #define S_0007C0_CMDSTRM_BUSY(x) (((x) & 0x1) << 30) | ||
| 601 | #define G_0007C0_CMDSTRM_BUSY(x) (((x) >> 30) & 0x1) | ||
| 602 | #define C_0007C0_CMDSTRM_BUSY 0xBFFFFFFF | ||
| 603 | #define S_0007C0_CP_BUSY(x) (((x) & 0x1) << 31) | ||
| 604 | #define G_0007C0_CP_BUSY(x) (((x) >> 31) & 0x1) | ||
| 605 | #define C_0007C0_CP_BUSY 0x7FFFFFFF | ||
| 548 | #define R_000E40_RBBM_STATUS 0x000E40 | 606 | #define R_000E40_RBBM_STATUS 0x000E40 |
| 549 | #define S_000E40_CMDFIFO_AVAIL(x) (((x) & 0x7F) << 0) | 607 | #define S_000E40_CMDFIFO_AVAIL(x) (((x) & 0x7F) << 0) |
| 550 | #define G_000E40_CMDFIFO_AVAIL(x) (((x) >> 0) & 0x7F) | 608 | #define G_000E40_CMDFIFO_AVAIL(x) (((x) >> 0) & 0x7F) |
| @@ -604,4 +662,53 @@ | |||
| 604 | #define G_000E40_GUI_ACTIVE(x) (((x) >> 31) & 0x1) | 662 | #define G_000E40_GUI_ACTIVE(x) (((x) >> 31) & 0x1) |
| 605 | #define C_000E40_GUI_ACTIVE 0x7FFFFFFF | 663 | #define C_000E40_GUI_ACTIVE 0x7FFFFFFF |
| 606 | 664 | ||
| 665 | |||
| 666 | #define R_00000D_SCLK_CNTL 0x00000D | ||
| 667 | #define S_00000D_SCLK_SRC_SEL(x) (((x) & 0x7) << 0) | ||
| 668 | #define G_00000D_SCLK_SRC_SEL(x) (((x) >> 0) & 0x7) | ||
| 669 | #define C_00000D_SCLK_SRC_SEL 0xFFFFFFF8 | ||
| 670 | #define S_00000D_TCLK_SRC_SEL(x) (((x) & 0x7) << 8) | ||
| 671 | #define G_00000D_TCLK_SRC_SEL(x) (((x) >> 8) & 0x7) | ||
| 672 | #define C_00000D_TCLK_SRC_SEL 0xFFFFF8FF | ||
| 673 | #define S_00000D_FORCE_CP(x) (((x) & 0x1) << 16) | ||
| 674 | #define G_00000D_FORCE_CP(x) (((x) >> 16) & 0x1) | ||
| 675 | #define C_00000D_FORCE_CP 0xFFFEFFFF | ||
| 676 | #define S_00000D_FORCE_HDP(x) (((x) & 0x1) << 17) | ||
| 677 | #define G_00000D_FORCE_HDP(x) (((x) >> 17) & 0x1) | ||
| 678 | #define C_00000D_FORCE_HDP 0xFFFDFFFF | ||
| 679 | #define S_00000D_FORCE_DISP(x) (((x) & 0x1) << 18) | ||
| 680 | #define G_00000D_FORCE_DISP(x) (((x) >> 18) & 0x1) | ||
| 681 | #define C_00000D_FORCE_DISP 0xFFFBFFFF | ||
| 682 | #define S_00000D_FORCE_TOP(x) (((x) & 0x1) << 19) | ||
| 683 | #define G_00000D_FORCE_TOP(x) (((x) >> 19) & 0x1) | ||
| 684 | #define C_00000D_FORCE_TOP 0xFFF7FFFF | ||
| 685 | #define S_00000D_FORCE_E2(x) (((x) & 0x1) << 20) | ||
| 686 | #define G_00000D_FORCE_E2(x) (((x) >> 20) & 0x1) | ||
| 687 | #define C_00000D_FORCE_E2 0xFFEFFFFF | ||
| 688 | #define S_00000D_FORCE_SE(x) (((x) & 0x1) << 21) | ||
| 689 | #define G_00000D_FORCE_SE(x) (((x) >> 21) & 0x1) | ||
| 690 | #define C_00000D_FORCE_SE 0xFFDFFFFF | ||
| 691 | #define S_00000D_FORCE_IDCT(x) (((x) & 0x1) << 22) | ||
| 692 | #define G_00000D_FORCE_IDCT(x) (((x) >> 22) & 0x1) | ||
| 693 | #define C_00000D_FORCE_IDCT 0xFFBFFFFF | ||
| 694 | #define S_00000D_FORCE_VIP(x) (((x) & 0x1) << 23) | ||
| 695 | #define G_00000D_FORCE_VIP(x) (((x) >> 23) & 0x1) | ||
| 696 | #define C_00000D_FORCE_VIP 0xFF7FFFFF | ||
| 697 | #define S_00000D_FORCE_RE(x) (((x) & 0x1) << 24) | ||
| 698 | #define G_00000D_FORCE_RE(x) (((x) >> 24) & 0x1) | ||
| 699 | #define C_00000D_FORCE_RE 0xFEFFFFFF | ||
| 700 | #define S_00000D_FORCE_PB(x) (((x) & 0x1) << 25) | ||
| 701 | #define G_00000D_FORCE_PB(x) (((x) >> 25) & 0x1) | ||
| 702 | #define C_00000D_FORCE_PB 0xFDFFFFFF | ||
| 703 | #define S_00000D_FORCE_TAM(x) (((x) & 0x1) << 26) | ||
| 704 | #define G_00000D_FORCE_TAM(x) (((x) >> 26) & 0x1) | ||
| 705 | #define C_00000D_FORCE_TAM 0xFBFFFFFF | ||
| 706 | #define S_00000D_FORCE_TDM(x) (((x) & 0x1) << 27) | ||
| 707 | #define G_00000D_FORCE_TDM(x) (((x) >> 27) & 0x1) | ||
| 708 | #define C_00000D_FORCE_TDM 0xF7FFFFFF | ||
| 709 | #define S_00000D_FORCE_RB(x) (((x) & 0x1) << 28) | ||
| 710 | #define G_00000D_FORCE_RB(x) (((x) >> 28) & 0x1) | ||
| 711 | #define C_00000D_FORCE_RB 0xEFFFFFFF | ||
| 712 | |||
| 713 | |||
| 607 | #endif | 714 | #endif |
diff --git a/drivers/gpu/drm/radeon/r200.c b/drivers/gpu/drm/radeon/r200.c index cf7fea5ff2e5..eb740fc3549f 100644 --- a/drivers/gpu/drm/radeon/r200.c +++ b/drivers/gpu/drm/radeon/r200.c | |||
| @@ -447,9 +447,8 @@ int r200_packet0_check(struct radeon_cs_parser *p, | |||
| 447 | return 0; | 447 | return 0; |
| 448 | } | 448 | } |
| 449 | 449 | ||
| 450 | int r200_init(struct radeon_device *rdev) | 450 | void r200_set_safe_registers(struct radeon_device *rdev) |
| 451 | { | 451 | { |
| 452 | rdev->config.r100.reg_safe_bm = r200_reg_safe_bm; | 452 | rdev->config.r100.reg_safe_bm = r200_reg_safe_bm; |
| 453 | rdev->config.r100.reg_safe_bm_size = ARRAY_SIZE(r200_reg_safe_bm); | 453 | rdev->config.r100.reg_safe_bm_size = ARRAY_SIZE(r200_reg_safe_bm); |
| 454 | return 0; | ||
| 455 | } | 454 | } |
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c index 1ebea8cc8c93..e08c4a8974ca 100644 --- a/drivers/gpu/drm/radeon/r300.c +++ b/drivers/gpu/drm/radeon/r300.c | |||
| @@ -33,43 +33,16 @@ | |||
| 33 | #include "radeon_drm.h" | 33 | #include "radeon_drm.h" |
| 34 | #include "r100_track.h" | 34 | #include "r100_track.h" |
| 35 | #include "r300d.h" | 35 | #include "r300d.h" |
| 36 | 36 | #include "rv350d.h" | |
| 37 | #include "r300_reg_safe.h" | 37 | #include "r300_reg_safe.h" |
| 38 | 38 | ||
| 39 | /* r300,r350,rv350,rv370,rv380 depends on : */ | 39 | /* This files gather functions specifics to: r300,r350,rv350,rv370,rv380 */ |
| 40 | void r100_hdp_reset(struct radeon_device *rdev); | ||
| 41 | int r100_cp_reset(struct radeon_device *rdev); | ||
| 42 | int r100_rb2d_reset(struct radeon_device *rdev); | ||
| 43 | int r100_cp_init(struct radeon_device *rdev, unsigned ring_size); | ||
| 44 | int r100_pci_gart_enable(struct radeon_device *rdev); | ||
| 45 | void r100_mc_setup(struct radeon_device *rdev); | ||
| 46 | void r100_mc_disable_clients(struct radeon_device *rdev); | ||
| 47 | int r100_gui_wait_for_idle(struct radeon_device *rdev); | ||
| 48 | int r100_cs_packet_parse(struct radeon_cs_parser *p, | ||
| 49 | struct radeon_cs_packet *pkt, | ||
| 50 | unsigned idx); | ||
| 51 | int r100_cs_packet_parse_vline(struct radeon_cs_parser *p); | ||
| 52 | int r100_cs_parse_packet0(struct radeon_cs_parser *p, | ||
| 53 | struct radeon_cs_packet *pkt, | ||
| 54 | const unsigned *auth, unsigned n, | ||
| 55 | radeon_packet0_check_t check); | ||
| 56 | int r100_cs_track_check_pkt3_indx_buffer(struct radeon_cs_parser *p, | ||
| 57 | struct radeon_cs_packet *pkt, | ||
| 58 | struct radeon_object *robj); | ||
| 59 | |||
| 60 | /* This files gather functions specifics to: | ||
| 61 | * r300,r350,rv350,rv370,rv380 | ||
| 62 | * | ||
| 63 | * Some of these functions might be used by newer ASICs. | ||
| 64 | */ | ||
| 65 | void r300_gpu_init(struct radeon_device *rdev); | ||
| 66 | int r300_mc_wait_for_idle(struct radeon_device *rdev); | ||
| 67 | int rv370_debugfs_pcie_gart_info_init(struct radeon_device *rdev); | ||
| 68 | |||
| 69 | 40 | ||
| 70 | /* | 41 | /* |
| 71 | * rv370,rv380 PCIE GART | 42 | * rv370,rv380 PCIE GART |
| 72 | */ | 43 | */ |
| 44 | static int rv370_debugfs_pcie_gart_info_init(struct radeon_device *rdev); | ||
| 45 | |||
| 73 | void rv370_pcie_gart_tlb_flush(struct radeon_device *rdev) | 46 | void rv370_pcie_gart_tlb_flush(struct radeon_device *rdev) |
| 74 | { | 47 | { |
| 75 | uint32_t tmp; | 48 | uint32_t tmp; |
| @@ -182,59 +155,6 @@ void rv370_pcie_gart_fini(struct radeon_device *rdev) | |||
| 182 | radeon_gart_fini(rdev); | 155 | radeon_gart_fini(rdev); |
| 183 | } | 156 | } |
| 184 | 157 | ||
| 185 | /* | ||
| 186 | * MC | ||
| 187 | */ | ||
| 188 | int r300_mc_init(struct radeon_device *rdev) | ||
| 189 | { | ||
| 190 | int r; | ||
| 191 | |||
| 192 | if (r100_debugfs_rbbm_init(rdev)) { | ||
| 193 | DRM_ERROR("Failed to register debugfs file for RBBM !\n"); | ||
| 194 | } | ||
| 195 | |||
| 196 | r300_gpu_init(rdev); | ||
| 197 | r100_pci_gart_disable(rdev); | ||
| 198 | if (rdev->flags & RADEON_IS_PCIE) { | ||
| 199 | rv370_pcie_gart_disable(rdev); | ||
| 200 | } | ||
| 201 | |||
| 202 | /* Setup GPU memory space */ | ||
| 203 | rdev->mc.vram_location = 0xFFFFFFFFUL; | ||
| 204 | rdev->mc.gtt_location = 0xFFFFFFFFUL; | ||
| 205 | if (rdev->flags & RADEON_IS_AGP) { | ||
| 206 | r = radeon_agp_init(rdev); | ||
| 207 | if (r) { | ||
| 208 | printk(KERN_WARNING "[drm] Disabling AGP\n"); | ||
| 209 | rdev->flags &= ~RADEON_IS_AGP; | ||
| 210 | rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024; | ||
| 211 | } else { | ||
| 212 | rdev->mc.gtt_location = rdev->mc.agp_base; | ||
| 213 | } | ||
| 214 | } | ||
| 215 | r = radeon_mc_setup(rdev); | ||
| 216 | if (r) { | ||
| 217 | return r; | ||
| 218 | } | ||
| 219 | |||
| 220 | /* Program GPU memory space */ | ||
| 221 | r100_mc_disable_clients(rdev); | ||
| 222 | if (r300_mc_wait_for_idle(rdev)) { | ||
| 223 | printk(KERN_WARNING "Failed to wait MC idle while " | ||
| 224 | "programming pipes. Bad things might happen.\n"); | ||
| 225 | } | ||
| 226 | r100_mc_setup(rdev); | ||
| 227 | return 0; | ||
| 228 | } | ||
| 229 | |||
| 230 | void r300_mc_fini(struct radeon_device *rdev) | ||
| 231 | { | ||
| 232 | } | ||
| 233 | |||
| 234 | |||
| 235 | /* | ||
| 236 | * Fence emission | ||
| 237 | */ | ||
| 238 | void r300_fence_ring_emit(struct radeon_device *rdev, | 158 | void r300_fence_ring_emit(struct radeon_device *rdev, |
| 239 | struct radeon_fence *fence) | 159 | struct radeon_fence *fence) |
| 240 | { | 160 | { |
| @@ -260,10 +180,6 @@ void r300_fence_ring_emit(struct radeon_device *rdev, | |||
| 260 | radeon_ring_write(rdev, RADEON_SW_INT_FIRE); | 180 | radeon_ring_write(rdev, RADEON_SW_INT_FIRE); |
| 261 | } | 181 | } |
| 262 | 182 | ||
| 263 | |||
| 264 | /* | ||
| 265 | * Global GPU functions | ||
| 266 | */ | ||
| 267 | int r300_copy_dma(struct radeon_device *rdev, | 183 | int r300_copy_dma(struct radeon_device *rdev, |
| 268 | uint64_t src_offset, | 184 | uint64_t src_offset, |
| 269 | uint64_t dst_offset, | 185 | uint64_t dst_offset, |
| @@ -582,11 +498,6 @@ void r300_vram_info(struct radeon_device *rdev) | |||
| 582 | r100_vram_init_sizes(rdev); | 498 | r100_vram_init_sizes(rdev); |
| 583 | } | 499 | } |
| 584 | 500 | ||
| 585 | |||
| 586 | /* | ||
| 587 | * PCIE Lanes | ||
| 588 | */ | ||
| 589 | |||
| 590 | void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes) | 501 | void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes) |
| 591 | { | 502 | { |
| 592 | uint32_t link_width_cntl, mask; | 503 | uint32_t link_width_cntl, mask; |
| @@ -646,10 +557,6 @@ void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes) | |||
| 646 | 557 | ||
| 647 | } | 558 | } |
| 648 | 559 | ||
| 649 | |||
| 650 | /* | ||
| 651 | * Debugfs info | ||
| 652 | */ | ||
| 653 | #if defined(CONFIG_DEBUG_FS) | 560 | #if defined(CONFIG_DEBUG_FS) |
| 654 | static int rv370_debugfs_pcie_gart_info(struct seq_file *m, void *data) | 561 | static int rv370_debugfs_pcie_gart_info(struct seq_file *m, void *data) |
| 655 | { | 562 | { |
| @@ -680,7 +587,7 @@ static struct drm_info_list rv370_pcie_gart_info_list[] = { | |||
| 680 | }; | 587 | }; |
| 681 | #endif | 588 | #endif |
| 682 | 589 | ||
| 683 | int rv370_debugfs_pcie_gart_info_init(struct radeon_device *rdev) | 590 | static int rv370_debugfs_pcie_gart_info_init(struct radeon_device *rdev) |
| 684 | { | 591 | { |
| 685 | #if defined(CONFIG_DEBUG_FS) | 592 | #if defined(CONFIG_DEBUG_FS) |
| 686 | return radeon_debugfs_add_files(rdev, rv370_pcie_gart_info_list, 1); | 593 | return radeon_debugfs_add_files(rdev, rv370_pcie_gart_info_list, 1); |
| @@ -689,10 +596,6 @@ int rv370_debugfs_pcie_gart_info_init(struct radeon_device *rdev) | |||
| 689 | #endif | 596 | #endif |
| 690 | } | 597 | } |
| 691 | 598 | ||
| 692 | |||
| 693 | /* | ||
| 694 | * CS functions | ||
| 695 | */ | ||
| 696 | static int r300_packet0_check(struct radeon_cs_parser *p, | 599 | static int r300_packet0_check(struct radeon_cs_parser *p, |
| 697 | struct radeon_cs_packet *pkt, | 600 | struct radeon_cs_packet *pkt, |
| 698 | unsigned idx, unsigned reg) | 601 | unsigned idx, unsigned reg) |
| @@ -1226,12 +1129,6 @@ void r300_set_reg_safe(struct radeon_device *rdev) | |||
| 1226 | rdev->config.r300.reg_safe_bm_size = ARRAY_SIZE(r300_reg_safe_bm); | 1129 | rdev->config.r300.reg_safe_bm_size = ARRAY_SIZE(r300_reg_safe_bm); |
| 1227 | } | 1130 | } |
| 1228 | 1131 | ||
| 1229 | int r300_init(struct radeon_device *rdev) | ||
| 1230 | { | ||
| 1231 | r300_set_reg_safe(rdev); | ||
| 1232 | return 0; | ||
| 1233 | } | ||
| 1234 | |||
| 1235 | void r300_mc_program(struct radeon_device *rdev) | 1132 | void r300_mc_program(struct radeon_device *rdev) |
| 1236 | { | 1133 | { |
| 1237 | struct r100_mc_save save; | 1134 | struct r100_mc_save save; |
| @@ -1265,3 +1162,198 @@ void r300_mc_program(struct radeon_device *rdev) | |||
| 1265 | S_000148_MC_FB_TOP(rdev->mc.vram_end >> 16)); | 1162 | S_000148_MC_FB_TOP(rdev->mc.vram_end >> 16)); |
| 1266 | r100_mc_resume(rdev, &save); | 1163 | r100_mc_resume(rdev, &save); |
| 1267 | } | 1164 | } |
| 1165 | |||
| 1166 | void r300_clock_startup(struct radeon_device *rdev) | ||
| 1167 | { | ||
| 1168 | u32 tmp; | ||
| 1169 | |||
| 1170 | if (radeon_dynclks != -1 && radeon_dynclks) | ||
| 1171 | radeon_legacy_set_clock_gating(rdev, 1); | ||
| 1172 | /* We need to force on some of the block */ | ||
| 1173 | tmp = RREG32_PLL(R_00000D_SCLK_CNTL); | ||
| 1174 | tmp |= S_00000D_FORCE_CP(1) | S_00000D_FORCE_VIP(1); | ||
| 1175 | if ((rdev->family == CHIP_RV350) || (rdev->family == CHIP_RV380)) | ||
| 1176 | tmp |= S_00000D_FORCE_VAP(1); | ||
| 1177 | WREG32_PLL(R_00000D_SCLK_CNTL, tmp); | ||
| 1178 | } | ||
| 1179 | |||
| 1180 | static int r300_startup(struct radeon_device *rdev) | ||
| 1181 | { | ||
| 1182 | int r; | ||
| 1183 | |||
| 1184 | r300_mc_program(rdev); | ||
| 1185 | /* Resume clock */ | ||
| 1186 | r300_clock_startup(rdev); | ||
| 1187 | /* Initialize GPU configuration (# pipes, ...) */ | ||
| 1188 | r300_gpu_init(rdev); | ||
| 1189 | /* Initialize GART (initialize after TTM so we can allocate | ||
| 1190 | * memory through TTM but finalize after TTM) */ | ||
| 1191 | if (rdev->flags & RADEON_IS_PCIE) { | ||
| 1192 | r = rv370_pcie_gart_enable(rdev); | ||
| 1193 | if (r) | ||
| 1194 | return r; | ||
| 1195 | } | ||
| 1196 | if (rdev->flags & RADEON_IS_PCI) { | ||
| 1197 | r = r100_pci_gart_enable(rdev); | ||
| 1198 | if (r) | ||
| 1199 | return r; | ||
| 1200 | } | ||
| 1201 | /* Enable IRQ */ | ||
| 1202 | rdev->irq.sw_int = true; | ||
| 1203 | r100_irq_set(rdev); | ||
| 1204 | /* 1M ring buffer */ | ||
| 1205 | r = r100_cp_init(rdev, 1024 * 1024); | ||
| 1206 | if (r) { | ||
| 1207 | dev_err(rdev->dev, "failled initializing CP (%d).\n", r); | ||
| 1208 | return r; | ||
| 1209 | } | ||
| 1210 | r = r100_wb_init(rdev); | ||
| 1211 | if (r) | ||
| 1212 | dev_err(rdev->dev, "failled initializing WB (%d).\n", r); | ||
| 1213 | r = r100_ib_init(rdev); | ||
| 1214 | if (r) { | ||
| 1215 | dev_err(rdev->dev, "failled initializing IB (%d).\n", r); | ||
| 1216 | return r; | ||
| 1217 | } | ||
| 1218 | return 0; | ||
| 1219 | } | ||
| 1220 | |||
| 1221 | int r300_resume(struct radeon_device *rdev) | ||
| 1222 | { | ||
| 1223 | /* Make sur GART are not working */ | ||
| 1224 | if (rdev->flags & RADEON_IS_PCIE) | ||
| 1225 | rv370_pcie_gart_disable(rdev); | ||
| 1226 | if (rdev->flags & RADEON_IS_PCI) | ||
| 1227 | r100_pci_gart_disable(rdev); | ||
| 1228 | /* Resume clock before doing reset */ | ||
| 1229 | r300_clock_startup(rdev); | ||
| 1230 | /* Reset gpu before posting otherwise ATOM will enter infinite loop */ | ||
| 1231 | if (radeon_gpu_reset(rdev)) { | ||
| 1232 | dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", | ||
| 1233 | RREG32(R_000E40_RBBM_STATUS), | ||
| 1234 | RREG32(R_0007C0_CP_STAT)); | ||
| 1235 | } | ||
| 1236 | /* post */ | ||
| 1237 | radeon_combios_asic_init(rdev->ddev); | ||
| 1238 | /* Resume clock after posting */ | ||
| 1239 | r300_clock_startup(rdev); | ||
| 1240 | return r300_startup(rdev); | ||
| 1241 | } | ||
| 1242 | |||
| 1243 | int r300_suspend(struct radeon_device *rdev) | ||
| 1244 | { | ||
| 1245 | r100_cp_disable(rdev); | ||
| 1246 | r100_wb_disable(rdev); | ||
| 1247 | r100_irq_disable(rdev); | ||
| 1248 | if (rdev->flags & RADEON_IS_PCIE) | ||
| 1249 | rv370_pcie_gart_disable(rdev); | ||
| 1250 | if (rdev->flags & RADEON_IS_PCI) | ||
| 1251 | r100_pci_gart_disable(rdev); | ||
| 1252 | return 0; | ||
| 1253 | } | ||
| 1254 | |||
| 1255 | void r300_fini(struct radeon_device *rdev) | ||
| 1256 | { | ||
| 1257 | r300_suspend(rdev); | ||
| 1258 | r100_cp_fini(rdev); | ||
| 1259 | r100_wb_fini(rdev); | ||
| 1260 | r100_ib_fini(rdev); | ||
| 1261 | radeon_gem_fini(rdev); | ||
| 1262 | if (rdev->flags & RADEON_IS_PCIE) | ||
| 1263 | rv370_pcie_gart_fini(rdev); | ||
| 1264 | if (rdev->flags & RADEON_IS_PCI) | ||
| 1265 | r100_pci_gart_fini(rdev); | ||
| 1266 | radeon_irq_kms_fini(rdev); | ||
| 1267 | radeon_fence_driver_fini(rdev); | ||
| 1268 | radeon_object_fini(rdev); | ||
| 1269 | radeon_atombios_fini(rdev); | ||
| 1270 | kfree(rdev->bios); | ||
| 1271 | rdev->bios = NULL; | ||
| 1272 | } | ||
| 1273 | |||
| 1274 | int r300_init(struct radeon_device *rdev) | ||
| 1275 | { | ||
| 1276 | int r; | ||
| 1277 | |||
| 1278 | /* Disable VGA */ | ||
| 1279 | r100_vga_render_disable(rdev); | ||
| 1280 | /* Initialize scratch registers */ | ||
| 1281 | radeon_scratch_init(rdev); | ||
| 1282 | /* Initialize surface registers */ | ||
| 1283 | radeon_surface_init(rdev); | ||
| 1284 | /* TODO: disable VGA need to use VGA request */ | ||
| 1285 | /* BIOS*/ | ||
| 1286 | if (!radeon_get_bios(rdev)) { | ||
| 1287 | if (ASIC_IS_AVIVO(rdev)) | ||
| 1288 | return -EINVAL; | ||
| 1289 | } | ||
| 1290 | if (rdev->is_atom_bios) { | ||
| 1291 | dev_err(rdev->dev, "Expecting combios for RS400/RS480 GPU\n"); | ||
| 1292 | return -EINVAL; | ||
| 1293 | } else { | ||
| 1294 | r = radeon_combios_init(rdev); | ||
| 1295 | if (r) | ||
| 1296 | return r; | ||
| 1297 | } | ||
| 1298 | /* Reset gpu before posting otherwise ATOM will enter infinite loop */ | ||
| 1299 | if (radeon_gpu_reset(rdev)) { | ||
| 1300 | dev_warn(rdev->dev, | ||
| 1301 | "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", | ||
| 1302 | RREG32(R_000E40_RBBM_STATUS), | ||
| 1303 | RREG32(R_0007C0_CP_STAT)); | ||
| 1304 | } | ||
| 1305 | /* check if cards are posted or not */ | ||
| 1306 | if (!radeon_card_posted(rdev) && rdev->bios) { | ||
| 1307 | DRM_INFO("GPU not posted. posting now...\n"); | ||
| 1308 | radeon_combios_asic_init(rdev->ddev); | ||
| 1309 | } | ||
| 1310 | /* Set asic errata */ | ||
| 1311 | r300_errata(rdev); | ||
| 1312 | /* Initialize clocks */ | ||
| 1313 | radeon_get_clock_info(rdev->ddev); | ||
| 1314 | /* Get vram informations */ | ||
| 1315 | r300_vram_info(rdev); | ||
| 1316 | /* Initialize memory controller (also test AGP) */ | ||
| 1317 | r = r420_mc_init(rdev); | ||
| 1318 | if (r) | ||
| 1319 | return r; | ||
| 1320 | /* Fence driver */ | ||
| 1321 | r = radeon_fence_driver_init(rdev); | ||
| 1322 | if (r) | ||
| 1323 | return r; | ||
| 1324 | r = radeon_irq_kms_init(rdev); | ||
| 1325 | if (r) | ||
| 1326 | return r; | ||
| 1327 | /* Memory manager */ | ||
| 1328 | r = radeon_object_init(rdev); | ||
| 1329 | if (r) | ||
| 1330 | return r; | ||
| 1331 | if (rdev->flags & RADEON_IS_PCIE) { | ||
| 1332 | r = rv370_pcie_gart_init(rdev); | ||
| 1333 | if (r) | ||
| 1334 | return r; | ||
| 1335 | } | ||
| 1336 | if (rdev->flags & RADEON_IS_PCI) { | ||
| 1337 | r = r100_pci_gart_init(rdev); | ||
| 1338 | if (r) | ||
| 1339 | return r; | ||
| 1340 | } | ||
| 1341 | r300_set_reg_safe(rdev); | ||
| 1342 | rdev->accel_working = true; | ||
| 1343 | r = r300_startup(rdev); | ||
| 1344 | if (r) { | ||
| 1345 | /* Somethings want wront with the accel init stop accel */ | ||
| 1346 | dev_err(rdev->dev, "Disabling GPU acceleration\n"); | ||
| 1347 | r300_suspend(rdev); | ||
| 1348 | r100_cp_fini(rdev); | ||
| 1349 | r100_wb_fini(rdev); | ||
| 1350 | r100_ib_fini(rdev); | ||
| 1351 | if (rdev->flags & RADEON_IS_PCIE) | ||
| 1352 | rv370_pcie_gart_fini(rdev); | ||
| 1353 | if (rdev->flags & RADEON_IS_PCI) | ||
| 1354 | r100_pci_gart_fini(rdev); | ||
| 1355 | radeon_irq_kms_fini(rdev); | ||
| 1356 | rdev->accel_working = false; | ||
| 1357 | } | ||
| 1358 | return 0; | ||
| 1359 | } | ||
diff --git a/drivers/gpu/drm/radeon/r300d.h b/drivers/gpu/drm/radeon/r300d.h index d4fa3eb1074f..4c73114f0de9 100644 --- a/drivers/gpu/drm/radeon/r300d.h +++ b/drivers/gpu/drm/radeon/r300d.h | |||
| @@ -96,6 +96,211 @@ | |||
| 96 | #define S_000170_AGP_BASE_ADDR(x) (((x) & 0xFFFFFFFF) << 0) | 96 | #define S_000170_AGP_BASE_ADDR(x) (((x) & 0xFFFFFFFF) << 0) |
| 97 | #define G_000170_AGP_BASE_ADDR(x) (((x) >> 0) & 0xFFFFFFFF) | 97 | #define G_000170_AGP_BASE_ADDR(x) (((x) >> 0) & 0xFFFFFFFF) |
| 98 | #define C_000170_AGP_BASE_ADDR 0x00000000 | 98 | #define C_000170_AGP_BASE_ADDR 0x00000000 |
| 99 | #define R_0007C0_CP_STAT 0x0007C0 | ||
| 100 | #define S_0007C0_MRU_BUSY(x) (((x) & 0x1) << 0) | ||
| 101 | #define G_0007C0_MRU_BUSY(x) (((x) >> 0) & 0x1) | ||
| 102 | #define C_0007C0_MRU_BUSY 0xFFFFFFFE | ||
| 103 | #define S_0007C0_MWU_BUSY(x) (((x) & 0x1) << 1) | ||
| 104 | #define G_0007C0_MWU_BUSY(x) (((x) >> 1) & 0x1) | ||
| 105 | #define C_0007C0_MWU_BUSY 0xFFFFFFFD | ||
| 106 | #define S_0007C0_RSIU_BUSY(x) (((x) & 0x1) << 2) | ||
| 107 | #define G_0007C0_RSIU_BUSY(x) (((x) >> 2) & 0x1) | ||
| 108 | #define C_0007C0_RSIU_BUSY 0xFFFFFFFB | ||
| 109 | #define S_0007C0_RCIU_BUSY(x) (((x) & 0x1) << 3) | ||
| 110 | #define G_0007C0_RCIU_BUSY(x) (((x) >> 3) & 0x1) | ||
| 111 | #define C_0007C0_RCIU_BUSY 0xFFFFFFF7 | ||
| 112 | #define S_0007C0_CSF_PRIMARY_BUSY(x) (((x) & 0x1) << 9) | ||
| 113 | #define G_0007C0_CSF_PRIMARY_BUSY(x) (((x) >> 9) & 0x1) | ||
| 114 | #define C_0007C0_CSF_PRIMARY_BUSY 0xFFFFFDFF | ||
| 115 | #define S_0007C0_CSF_INDIRECT_BUSY(x) (((x) & 0x1) << 10) | ||
| 116 | #define G_0007C0_CSF_INDIRECT_BUSY(x) (((x) >> 10) & 0x1) | ||
| 117 | #define C_0007C0_CSF_INDIRECT_BUSY 0xFFFFFBFF | ||
| 118 | #define S_0007C0_CSQ_PRIMARY_BUSY(x) (((x) & 0x1) << 11) | ||
| 119 | #define G_0007C0_CSQ_PRIMARY_BUSY(x) (((x) >> 11) & 0x1) | ||
| 120 | #define C_0007C0_CSQ_PRIMARY_BUSY 0xFFFFF7FF | ||
| 121 | #define S_0007C0_CSQ_INDIRECT_BUSY(x) (((x) & 0x1) << 12) | ||
| 122 | #define G_0007C0_CSQ_INDIRECT_BUSY(x) (((x) >> 12) & 0x1) | ||
| 123 | #define C_0007C0_CSQ_INDIRECT_BUSY 0xFFFFEFFF | ||
| 124 | #define S_0007C0_CSI_BUSY(x) (((x) & 0x1) << 13) | ||
| 125 | #define G_0007C0_CSI_BUSY(x) (((x) >> 13) & 0x1) | ||
| 126 | #define C_0007C0_CSI_BUSY 0xFFFFDFFF | ||
| 127 | #define S_0007C0_CSF_INDIRECT2_BUSY(x) (((x) & 0x1) << 14) | ||
| 128 | #define G_0007C0_CSF_INDIRECT2_BUSY(x) (((x) >> 14) & 0x1) | ||
| 129 | #define C_0007C0_CSF_INDIRECT2_BUSY 0xFFFFBFFF | ||
| 130 | #define S_0007C0_CSQ_INDIRECT2_BUSY(x) (((x) & 0x1) << 15) | ||
| 131 | #define G_0007C0_CSQ_INDIRECT2_BUSY(x) (((x) >> 15) & 0x1) | ||
| 132 | #define C_0007C0_CSQ_INDIRECT2_BUSY 0xFFFF7FFF | ||
| 133 | #define S_0007C0_GUIDMA_BUSY(x) (((x) & 0x1) << 28) | ||
| 134 | #define G_0007C0_GUIDMA_BUSY(x) (((x) >> 28) & 0x1) | ||
| 135 | #define C_0007C0_GUIDMA_BUSY 0xEFFFFFFF | ||
| 136 | #define S_0007C0_VIDDMA_BUSY(x) (((x) & 0x1) << 29) | ||
| 137 | #define G_0007C0_VIDDMA_BUSY(x) (((x) >> 29) & 0x1) | ||
| 138 | #define C_0007C0_VIDDMA_BUSY 0xDFFFFFFF | ||
| 139 | #define S_0007C0_CMDSTRM_BUSY(x) (((x) & 0x1) << 30) | ||
| 140 | #define G_0007C0_CMDSTRM_BUSY(x) (((x) >> 30) & 0x1) | ||
| 141 | #define C_0007C0_CMDSTRM_BUSY 0xBFFFFFFF | ||
| 142 | #define S_0007C0_CP_BUSY(x) (((x) & 0x1) << 31) | ||
| 143 | #define G_0007C0_CP_BUSY(x) (((x) >> 31) & 0x1) | ||
| 144 | #define C_0007C0_CP_BUSY 0x7FFFFFFF | ||
| 145 | #define R_000E40_RBBM_STATUS 0x000E40 | ||
| 146 | #define S_000E40_CMDFIFO_AVAIL(x) (((x) & 0x7F) << 0) | ||
| 147 | #define G_000E40_CMDFIFO_AVAIL(x) (((x) >> 0) & 0x7F) | ||
| 148 | #define C_000E40_CMDFIFO_AVAIL 0xFFFFFF80 | ||
| 149 | #define S_000E40_HIRQ_ON_RBB(x) (((x) & 0x1) << 8) | ||
| 150 | #define G_000E40_HIRQ_ON_RBB(x) (((x) >> 8) & 0x1) | ||
| 151 | #define C_000E40_HIRQ_ON_RBB 0xFFFFFEFF | ||
| 152 | #define S_000E40_CPRQ_ON_RBB(x) (((x) & 0x1) << 9) | ||
| 153 | #define G_000E40_CPRQ_ON_RBB(x) (((x) >> 9) & 0x1) | ||
| 154 | #define C_000E40_CPRQ_ON_RBB 0xFFFFFDFF | ||
| 155 | #define S_000E40_CFRQ_ON_RBB(x) (((x) & 0x1) << 10) | ||
| 156 | #define G_000E40_CFRQ_ON_RBB(x) (((x) >> 10) & 0x1) | ||
| 157 | #define C_000E40_CFRQ_ON_RBB 0xFFFFFBFF | ||
| 158 | #define S_000E40_HIRQ_IN_RTBUF(x) (((x) & 0x1) << 11) | ||
| 159 | #define G_000E40_HIRQ_IN_RTBUF(x) (((x) >> 11) & 0x1) | ||
| 160 | #define C_000E40_HIRQ_IN_RTBUF 0xFFFFF7FF | ||
| 161 | #define S_000E40_CPRQ_IN_RTBUF(x) (((x) & 0x1) << 12) | ||
| 162 | #define G_000E40_CPRQ_IN_RTBUF(x) (((x) >> 12) & 0x1) | ||
| 163 | #define C_000E40_CPRQ_IN_RTBUF 0xFFFFEFFF | ||
| 164 | #define S_000E40_CFRQ_IN_RTBUF(x) (((x) & 0x1) << 13) | ||
| 165 | #define G_000E40_CFRQ_IN_RTBUF(x) (((x) >> 13) & 0x1) | ||
| 166 | #define C_000E40_CFRQ_IN_RTBUF 0xFFFFDFFF | ||
| 167 | #define S_000E40_CF_PIPE_BUSY(x) (((x) & 0x1) << 14) | ||
| 168 | #define G_000E40_CF_PIPE_BUSY(x) (((x) >> 14) & 0x1) | ||
| 169 | #define C_000E40_CF_PIPE_BUSY 0xFFFFBFFF | ||
| 170 | #define S_000E40_ENG_EV_BUSY(x) (((x) & 0x1) << 15) | ||
| 171 | #define G_000E40_ENG_EV_BUSY(x) (((x) >> 15) & 0x1) | ||
| 172 | #define C_000E40_ENG_EV_BUSY 0xFFFF7FFF | ||
| 173 | #define S_000E40_CP_CMDSTRM_BUSY(x) (((x) & 0x1) << 16) | ||
| 174 | #define G_000E40_CP_CMDSTRM_BUSY(x) (((x) >> 16) & 0x1) | ||
| 175 | #define C_000E40_CP_CMDSTRM_BUSY 0xFFFEFFFF | ||
| 176 | #define S_000E40_E2_BUSY(x) (((x) & 0x1) << 17) | ||
| 177 | #define G_000E40_E2_BUSY(x) (((x) >> 17) & 0x1) | ||
| 178 | #define C_000E40_E2_BUSY 0xFFFDFFFF | ||
| 179 | #define S_000E40_RB2D_BUSY(x) (((x) & 0x1) << 18) | ||
| 180 | #define G_000E40_RB2D_BUSY(x) (((x) >> 18) & 0x1) | ||
| 181 | #define C_000E40_RB2D_BUSY 0xFFFBFFFF | ||
| 182 | #define S_000E40_RB3D_BUSY(x) (((x) & 0x1) << 19) | ||
| 183 | #define G_000E40_RB3D_BUSY(x) (((x) >> 19) & 0x1) | ||
| 184 | #define C_000E40_RB3D_BUSY 0xFFF7FFFF | ||
| 185 | #define S_000E40_VAP_BUSY(x) (((x) & 0x1) << 20) | ||
| 186 | #define G_000E40_VAP_BUSY(x) (((x) >> 20) & 0x1) | ||
| 187 | #define C_000E40_VAP_BUSY 0xFFEFFFFF | ||
| 188 | #define S_000E40_RE_BUSY(x) (((x) & 0x1) << 21) | ||
| 189 | #define G_000E40_RE_BUSY(x) (((x) >> 21) & 0x1) | ||
| 190 | #define C_000E40_RE_BUSY 0xFFDFFFFF | ||
| 191 | #define S_000E40_TAM_BUSY(x) (((x) & 0x1) << 22) | ||
| 192 | #define G_000E40_TAM_BUSY(x) (((x) >> 22) & 0x1) | ||
| 193 | #define C_000E40_TAM_BUSY 0xFFBFFFFF | ||
| 194 | #define S_000E40_TDM_BUSY(x) (((x) & 0x1) << 23) | ||
| 195 | #define G_000E40_TDM_BUSY(x) (((x) >> 23) & 0x1) | ||
| 196 | #define C_000E40_TDM_BUSY 0xFF7FFFFF | ||
| 197 | #define S_000E40_PB_BUSY(x) (((x) & 0x1) << 24) | ||
| 198 | #define G_000E40_PB_BUSY(x) (((x) >> 24) & 0x1) | ||
| 199 | #define C_000E40_PB_BUSY 0xFEFFFFFF | ||
| 200 | #define S_000E40_TIM_BUSY(x) (((x) & 0x1) << 25) | ||
| 201 | #define G_000E40_TIM_BUSY(x) (((x) >> 25) & 0x1) | ||
| 202 | #define C_000E40_TIM_BUSY 0xFDFFFFFF | ||
| 203 | #define S_000E40_GA_BUSY(x) (((x) & 0x1) << 26) | ||
| 204 | #define G_000E40_GA_BUSY(x) (((x) >> 26) & 0x1) | ||
| 205 | #define C_000E40_GA_BUSY 0xFBFFFFFF | ||
| 206 | #define S_000E40_CBA2D_BUSY(x) (((x) & 0x1) << 27) | ||
| 207 | #define G_000E40_CBA2D_BUSY(x) (((x) >> 27) & 0x1) | ||
| 208 | #define C_000E40_CBA2D_BUSY 0xF7FFFFFF | ||
| 209 | #define S_000E40_GUI_ACTIVE(x) (((x) & 0x1) << 31) | ||
| 210 | #define G_000E40_GUI_ACTIVE(x) (((x) >> 31) & 0x1) | ||
| 211 | #define C_000E40_GUI_ACTIVE 0x7FFFFFFF | ||
| 99 | 212 | ||
| 100 | 213 | ||
| 214 | #define R_00000D_SCLK_CNTL 0x00000D | ||
| 215 | #define S_00000D_SCLK_SRC_SEL(x) (((x) & 0x7) << 0) | ||
| 216 | #define G_00000D_SCLK_SRC_SEL(x) (((x) >> 0) & 0x7) | ||
| 217 | #define C_00000D_SCLK_SRC_SEL 0xFFFFFFF8 | ||
| 218 | #define S_00000D_CP_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 3) | ||
| 219 | #define G_00000D_CP_MAX_DYN_STOP_LAT(x) (((x) >> 3) & 0x1) | ||
| 220 | #define C_00000D_CP_MAX_DYN_STOP_LAT 0xFFFFFFF7 | ||
| 221 | #define S_00000D_HDP_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 4) | ||
| 222 | #define G_00000D_HDP_MAX_DYN_STOP_LAT(x) (((x) >> 4) & 0x1) | ||
| 223 | #define C_00000D_HDP_MAX_DYN_STOP_LAT 0xFFFFFFEF | ||
| 224 | #define S_00000D_TV_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 5) | ||
| 225 | #define G_00000D_TV_MAX_DYN_STOP_LAT(x) (((x) >> 5) & 0x1) | ||
| 226 | #define C_00000D_TV_MAX_DYN_STOP_LAT 0xFFFFFFDF | ||
| 227 | #define S_00000D_E2_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 6) | ||
| 228 | #define G_00000D_E2_MAX_DYN_STOP_LAT(x) (((x) >> 6) & 0x1) | ||
| 229 | #define C_00000D_E2_MAX_DYN_STOP_LAT 0xFFFFFFBF | ||
| 230 | #define S_00000D_SE_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 7) | ||
| 231 | #define G_00000D_SE_MAX_DYN_STOP_LAT(x) (((x) >> 7) & 0x1) | ||
| 232 | #define C_00000D_SE_MAX_DYN_STOP_LAT 0xFFFFFF7F | ||
| 233 | #define S_00000D_IDCT_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 8) | ||
| 234 | #define G_00000D_IDCT_MAX_DYN_STOP_LAT(x) (((x) >> 8) & 0x1) | ||
| 235 | #define C_00000D_IDCT_MAX_DYN_STOP_LAT 0xFFFFFEFF | ||
| 236 | #define S_00000D_VIP_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 9) | ||
| 237 | #define G_00000D_VIP_MAX_DYN_STOP_LAT(x) (((x) >> 9) & 0x1) | ||
| 238 | #define C_00000D_VIP_MAX_DYN_STOP_LAT 0xFFFFFDFF | ||
| 239 | #define S_00000D_RE_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 10) | ||
| 240 | #define G_00000D_RE_MAX_DYN_STOP_LAT(x) (((x) >> 10) & 0x1) | ||
| 241 | #define C_00000D_RE_MAX_DYN_STOP_LAT 0xFFFFFBFF | ||
| 242 | #define S_00000D_PB_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 11) | ||
| 243 | #define G_00000D_PB_MAX_DYN_STOP_LAT(x) (((x) >> 11) & 0x1) | ||
| 244 | #define C_00000D_PB_MAX_DYN_STOP_LAT 0xFFFFF7FF | ||
| 245 | #define S_00000D_TAM_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 12) | ||
| 246 | #define G_00000D_TAM_MAX_DYN_STOP_LAT(x) (((x) >> 12) & 0x1) | ||
| 247 | #define C_00000D_TAM_MAX_DYN_STOP_LAT 0xFFFFEFFF | ||
| 248 | #define S_00000D_TDM_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 13) | ||
| 249 | #define G_00000D_TDM_MAX_DYN_STOP_LAT(x) (((x) >> 13) & 0x1) | ||
| 250 | #define C_00000D_TDM_MAX_DYN_STOP_LAT 0xFFFFDFFF | ||
| 251 | #define S_00000D_RB_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 14) | ||
| 252 | #define G_00000D_RB_MAX_DYN_STOP_LAT(x) (((x) >> 14) & 0x1) | ||
| 253 | #define C_00000D_RB_MAX_DYN_STOP_LAT 0xFFFFBFFF | ||
| 254 | #define S_00000D_FORCE_DISP2(x) (((x) & 0x1) << 15) | ||
| 255 | #define G_00000D_FORCE_DISP2(x) (((x) >> 15) & 0x1) | ||
| 256 | #define C_00000D_FORCE_DISP2 0xFFFF7FFF | ||
| 257 | #define S_00000D_FORCE_CP(x) (((x) & 0x1) << 16) | ||
| 258 | #define G_00000D_FORCE_CP(x) (((x) >> 16) & 0x1) | ||
| 259 | #define C_00000D_FORCE_CP 0xFFFEFFFF | ||
| 260 | #define S_00000D_FORCE_HDP(x) (((x) & 0x1) << 17) | ||
| 261 | #define G_00000D_FORCE_HDP(x) (((x) >> 17) & 0x1) | ||
| 262 | #define C_00000D_FORCE_HDP 0xFFFDFFFF | ||
| 263 | #define S_00000D_FORCE_DISP1(x) (((x) & 0x1) << 18) | ||
| 264 | #define G_00000D_FORCE_DISP1(x) (((x) >> 18) & 0x1) | ||
| 265 | #define C_00000D_FORCE_DISP1 0xFFFBFFFF | ||
| 266 | #define S_00000D_FORCE_TOP(x) (((x) & 0x1) << 19) | ||
| 267 | #define G_00000D_FORCE_TOP(x) (((x) >> 19) & 0x1) | ||
| 268 | #define C_00000D_FORCE_TOP 0xFFF7FFFF | ||
| 269 | #define S_00000D_FORCE_E2(x) (((x) & 0x1) << 20) | ||
| 270 | #define G_00000D_FORCE_E2(x) (((x) >> 20) & 0x1) | ||
| 271 | #define C_00000D_FORCE_E2 0xFFEFFFFF | ||
| 272 | #define S_00000D_FORCE_SE(x) (((x) & 0x1) << 21) | ||
| 273 | #define G_00000D_FORCE_SE(x) (((x) >> 21) & 0x1) | ||
| 274 | #define C_00000D_FORCE_SE 0xFFDFFFFF | ||
| 275 | #define S_00000D_FORCE_IDCT(x) (((x) & 0x1) << 22) | ||
| 276 | #define G_00000D_FORCE_IDCT(x) (((x) >> 22) & 0x1) | ||
| 277 | #define C_00000D_FORCE_IDCT 0xFFBFFFFF | ||
| 278 | #define S_00000D_FORCE_VIP(x) (((x) & 0x1) << 23) | ||
| 279 | #define G_00000D_FORCE_VIP(x) (((x) >> 23) & 0x1) | ||
| 280 | #define C_00000D_FORCE_VIP 0xFF7FFFFF | ||
| 281 | #define S_00000D_FORCE_RE(x) (((x) & 0x1) << 24) | ||
| 282 | #define G_00000D_FORCE_RE(x) (((x) >> 24) & 0x1) | ||
| 283 | #define C_00000D_FORCE_RE 0xFEFFFFFF | ||
| 284 | #define S_00000D_FORCE_PB(x) (((x) & 0x1) << 25) | ||
| 285 | #define G_00000D_FORCE_PB(x) (((x) >> 25) & 0x1) | ||
| 286 | #define C_00000D_FORCE_PB 0xFDFFFFFF | ||
| 287 | #define S_00000D_FORCE_TAM(x) (((x) & 0x1) << 26) | ||
| 288 | #define G_00000D_FORCE_TAM(x) (((x) >> 26) & 0x1) | ||
| 289 | #define C_00000D_FORCE_TAM 0xFBFFFFFF | ||
| 290 | #define S_00000D_FORCE_TDM(x) (((x) & 0x1) << 27) | ||
| 291 | #define G_00000D_FORCE_TDM(x) (((x) >> 27) & 0x1) | ||
| 292 | #define C_00000D_FORCE_TDM 0xF7FFFFFF | ||
| 293 | #define S_00000D_FORCE_RB(x) (((x) & 0x1) << 28) | ||
| 294 | #define G_00000D_FORCE_RB(x) (((x) >> 28) & 0x1) | ||
| 295 | #define C_00000D_FORCE_RB 0xEFFFFFFF | ||
| 296 | #define S_00000D_FORCE_TV_SCLK(x) (((x) & 0x1) << 29) | ||
| 297 | #define G_00000D_FORCE_TV_SCLK(x) (((x) >> 29) & 0x1) | ||
| 298 | #define C_00000D_FORCE_TV_SCLK 0xDFFFFFFF | ||
| 299 | #define S_00000D_FORCE_SUBPIC(x) (((x) & 0x1) << 30) | ||
| 300 | #define G_00000D_FORCE_SUBPIC(x) (((x) >> 30) & 0x1) | ||
| 301 | #define C_00000D_FORCE_SUBPIC 0xBFFFFFFF | ||
| 302 | #define S_00000D_FORCE_OV0(x) (((x) & 0x1) << 31) | ||
| 303 | #define G_00000D_FORCE_OV0(x) (((x) >> 31) & 0x1) | ||
| 304 | #define C_00000D_FORCE_OV0 0x7FFFFFFF | ||
| 305 | |||
| 101 | #endif | 306 | #endif |
diff --git a/drivers/gpu/drm/radeon/r420.c b/drivers/gpu/drm/radeon/r420.c index 49a2fdc57d27..5c7fe52de30e 100644 --- a/drivers/gpu/drm/radeon/r420.c +++ b/drivers/gpu/drm/radeon/r420.c | |||
| @@ -155,6 +155,9 @@ static void r420_debugfs(struct radeon_device *rdev) | |||
| 155 | static void r420_clock_resume(struct radeon_device *rdev) | 155 | static void r420_clock_resume(struct radeon_device *rdev) |
| 156 | { | 156 | { |
| 157 | u32 sclk_cntl; | 157 | u32 sclk_cntl; |
| 158 | |||
| 159 | if (radeon_dynclks != -1 && radeon_dynclks) | ||
| 160 | radeon_atom_set_clock_gating(rdev, 1); | ||
| 158 | sclk_cntl = RREG32_PLL(R_00000D_SCLK_CNTL); | 161 | sclk_cntl = RREG32_PLL(R_00000D_SCLK_CNTL); |
| 159 | sclk_cntl |= S_00000D_FORCE_CP(1) | S_00000D_FORCE_VIP(1); | 162 | sclk_cntl |= S_00000D_FORCE_CP(1) | S_00000D_FORCE_VIP(1); |
| 160 | if (rdev->family == CHIP_R420) | 163 | if (rdev->family == CHIP_R420) |
| @@ -167,6 +170,8 @@ static int r420_startup(struct radeon_device *rdev) | |||
| 167 | int r; | 170 | int r; |
| 168 | 171 | ||
| 169 | r300_mc_program(rdev); | 172 | r300_mc_program(rdev); |
| 173 | /* Resume clock */ | ||
| 174 | r420_clock_resume(rdev); | ||
| 170 | /* Initialize GART (initialize after TTM so we can allocate | 175 | /* Initialize GART (initialize after TTM so we can allocate |
| 171 | * memory through TTM but finalize after TTM) */ | 176 | * memory through TTM but finalize after TTM) */ |
| 172 | if (rdev->flags & RADEON_IS_PCIE) { | 177 | if (rdev->flags & RADEON_IS_PCIE) { |
| @@ -267,7 +272,6 @@ int r420_init(struct radeon_device *rdev) | |||
| 267 | { | 272 | { |
| 268 | int r; | 273 | int r; |
| 269 | 274 | ||
| 270 | rdev->new_init_path = true; | ||
| 271 | /* Initialize scratch registers */ | 275 | /* Initialize scratch registers */ |
| 272 | radeon_scratch_init(rdev); | 276 | radeon_scratch_init(rdev); |
| 273 | /* Initialize surface registers */ | 277 | /* Initialize surface registers */ |
diff --git a/drivers/gpu/drm/radeon/r420d.h b/drivers/gpu/drm/radeon/r420d.h index a48a7db1e2aa..fc78d31a0b4a 100644 --- a/drivers/gpu/drm/radeon/r420d.h +++ b/drivers/gpu/drm/radeon/r420d.h | |||
| @@ -212,9 +212,9 @@ | |||
| 212 | #define S_00000D_FORCE_E2(x) (((x) & 0x1) << 20) | 212 | #define S_00000D_FORCE_E2(x) (((x) & 0x1) << 20) |
| 213 | #define G_00000D_FORCE_E2(x) (((x) >> 20) & 0x1) | 213 | #define G_00000D_FORCE_E2(x) (((x) >> 20) & 0x1) |
| 214 | #define C_00000D_FORCE_E2 0xFFEFFFFF | 214 | #define C_00000D_FORCE_E2 0xFFEFFFFF |
| 215 | #define S_00000D_FORCE_SE(x) (((x) & 0x1) << 21) | 215 | #define S_00000D_FORCE_VAP(x) (((x) & 0x1) << 21) |
| 216 | #define G_00000D_FORCE_SE(x) (((x) >> 21) & 0x1) | 216 | #define G_00000D_FORCE_VAP(x) (((x) >> 21) & 0x1) |
| 217 | #define C_00000D_FORCE_SE 0xFFDFFFFF | 217 | #define C_00000D_FORCE_VAP 0xFFDFFFFF |
| 218 | #define S_00000D_FORCE_IDCT(x) (((x) & 0x1) << 22) | 218 | #define S_00000D_FORCE_IDCT(x) (((x) & 0x1) << 22) |
| 219 | #define G_00000D_FORCE_IDCT(x) (((x) >> 22) & 0x1) | 219 | #define G_00000D_FORCE_IDCT(x) (((x) >> 22) & 0x1) |
| 220 | #define C_00000D_FORCE_IDCT 0xFFBFFFFF | 220 | #define C_00000D_FORCE_IDCT 0xFFBFFFFF |
| @@ -224,24 +224,24 @@ | |||
| 224 | #define S_00000D_FORCE_RE(x) (((x) & 0x1) << 24) | 224 | #define S_00000D_FORCE_RE(x) (((x) & 0x1) << 24) |
| 225 | #define G_00000D_FORCE_RE(x) (((x) >> 24) & 0x1) | 225 | #define G_00000D_FORCE_RE(x) (((x) >> 24) & 0x1) |
| 226 | #define C_00000D_FORCE_RE 0xFEFFFFFF | 226 | #define C_00000D_FORCE_RE 0xFEFFFFFF |
| 227 | #define S_00000D_FORCE_PB(x) (((x) & 0x1) << 25) | 227 | #define S_00000D_FORCE_SR(x) (((x) & 0x1) << 25) |
| 228 | #define G_00000D_FORCE_PB(x) (((x) >> 25) & 0x1) | 228 | #define G_00000D_FORCE_SR(x) (((x) >> 25) & 0x1) |
| 229 | #define C_00000D_FORCE_PB 0xFDFFFFFF | 229 | #define C_00000D_FORCE_SR 0xFDFFFFFF |
| 230 | #define S_00000D_FORCE_PX(x) (((x) & 0x1) << 26) | 230 | #define S_00000D_FORCE_PX(x) (((x) & 0x1) << 26) |
| 231 | #define G_00000D_FORCE_PX(x) (((x) >> 26) & 0x1) | 231 | #define G_00000D_FORCE_PX(x) (((x) >> 26) & 0x1) |
| 232 | #define C_00000D_FORCE_PX 0xFBFFFFFF | 232 | #define C_00000D_FORCE_PX 0xFBFFFFFF |
| 233 | #define S_00000D_FORCE_TX(x) (((x) & 0x1) << 27) | 233 | #define S_00000D_FORCE_TX(x) (((x) & 0x1) << 27) |
| 234 | #define G_00000D_FORCE_TX(x) (((x) >> 27) & 0x1) | 234 | #define G_00000D_FORCE_TX(x) (((x) >> 27) & 0x1) |
| 235 | #define C_00000D_FORCE_TX 0xF7FFFFFF | 235 | #define C_00000D_FORCE_TX 0xF7FFFFFF |
| 236 | #define S_00000D_FORCE_RB(x) (((x) & 0x1) << 28) | 236 | #define S_00000D_FORCE_US(x) (((x) & 0x1) << 28) |
| 237 | #define G_00000D_FORCE_RB(x) (((x) >> 28) & 0x1) | 237 | #define G_00000D_FORCE_US(x) (((x) >> 28) & 0x1) |
| 238 | #define C_00000D_FORCE_RB 0xEFFFFFFF | 238 | #define C_00000D_FORCE_US 0xEFFFFFFF |
| 239 | #define S_00000D_FORCE_TV_SCLK(x) (((x) & 0x1) << 29) | 239 | #define S_00000D_FORCE_TV_SCLK(x) (((x) & 0x1) << 29) |
| 240 | #define G_00000D_FORCE_TV_SCLK(x) (((x) >> 29) & 0x1) | 240 | #define G_00000D_FORCE_TV_SCLK(x) (((x) >> 29) & 0x1) |
| 241 | #define C_00000D_FORCE_TV_SCLK 0xDFFFFFFF | 241 | #define C_00000D_FORCE_TV_SCLK 0xDFFFFFFF |
| 242 | #define S_00000D_FORCE_SUBPIC(x) (((x) & 0x1) << 30) | 242 | #define S_00000D_FORCE_SU(x) (((x) & 0x1) << 30) |
| 243 | #define G_00000D_FORCE_SUBPIC(x) (((x) >> 30) & 0x1) | 243 | #define G_00000D_FORCE_SU(x) (((x) >> 30) & 0x1) |
| 244 | #define C_00000D_FORCE_SUBPIC 0xBFFFFFFF | 244 | #define C_00000D_FORCE_SU 0xBFFFFFFF |
| 245 | #define S_00000D_FORCE_OV0(x) (((x) & 0x1) << 31) | 245 | #define S_00000D_FORCE_OV0(x) (((x) & 0x1) << 31) |
| 246 | #define G_00000D_FORCE_OV0(x) (((x) >> 31) & 0x1) | 246 | #define G_00000D_FORCE_OV0(x) (((x) >> 31) & 0x1) |
| 247 | #define C_00000D_FORCE_OV0 0x7FFFFFFF | 247 | #define C_00000D_FORCE_OV0 0x7FFFFFFF |
diff --git a/drivers/gpu/drm/radeon/r520.c b/drivers/gpu/drm/radeon/r520.c index 0bf13fccdaf2..a555b7b19b48 100644 --- a/drivers/gpu/drm/radeon/r520.c +++ b/drivers/gpu/drm/radeon/r520.c | |||
| @@ -186,7 +186,7 @@ static int r520_startup(struct radeon_device *rdev) | |||
| 186 | } | 186 | } |
| 187 | /* Enable IRQ */ | 187 | /* Enable IRQ */ |
| 188 | rdev->irq.sw_int = true; | 188 | rdev->irq.sw_int = true; |
| 189 | r100_irq_set(rdev); | 189 | rs600_irq_set(rdev); |
| 190 | /* 1M ring buffer */ | 190 | /* 1M ring buffer */ |
| 191 | r = r100_cp_init(rdev, 1024 * 1024); | 191 | r = r100_cp_init(rdev, 1024 * 1024); |
| 192 | if (r) { | 192 | if (r) { |
| @@ -228,7 +228,6 @@ int r520_init(struct radeon_device *rdev) | |||
| 228 | { | 228 | { |
| 229 | int r; | 229 | int r; |
| 230 | 230 | ||
| 231 | rdev->new_init_path = true; | ||
| 232 | /* Initialize scratch registers */ | 231 | /* Initialize scratch registers */ |
| 233 | radeon_scratch_init(rdev); | 232 | radeon_scratch_init(rdev); |
| 234 | /* Initialize surface registers */ | 233 | /* Initialize surface registers */ |
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index 2e4e60edbff4..609719490ec2 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c | |||
| @@ -65,16 +65,11 @@ MODULE_FIRMWARE("radeon/RV710_me.bin"); | |||
| 65 | 65 | ||
| 66 | int r600_debugfs_mc_info_init(struct radeon_device *rdev); | 66 | int r600_debugfs_mc_info_init(struct radeon_device *rdev); |
| 67 | 67 | ||
| 68 | /* This files gather functions specifics to: | 68 | /* r600,rv610,rv630,rv620,rv635,rv670 */ |
| 69 | * r600,rv610,rv630,rv620,rv635,rv670 | ||
| 70 | * | ||
| 71 | * Some of these functions might be used by newer ASICs. | ||
| 72 | */ | ||
| 73 | int r600_mc_wait_for_idle(struct radeon_device *rdev); | 69 | int r600_mc_wait_for_idle(struct radeon_device *rdev); |
| 74 | void r600_gpu_init(struct radeon_device *rdev); | 70 | void r600_gpu_init(struct radeon_device *rdev); |
| 75 | void r600_fini(struct radeon_device *rdev); | 71 | void r600_fini(struct radeon_device *rdev); |
| 76 | 72 | ||
| 77 | |||
| 78 | /* | 73 | /* |
| 79 | * R600 PCIE GART | 74 | * R600 PCIE GART |
| 80 | */ | 75 | */ |
| @@ -168,7 +163,7 @@ int r600_pcie_gart_enable(struct radeon_device *rdev) | |||
| 168 | WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE); | 163 | WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE); |
| 169 | WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE); | 164 | WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE); |
| 170 | WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12); | 165 | WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12); |
| 171 | WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, (rdev->mc.gtt_end - 1) >> 12); | 166 | WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12); |
| 172 | WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12); | 167 | WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12); |
| 173 | WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) | | 168 | WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) | |
| 174 | RANGE_PROTECTION_FAULT_ENABLE_DEFAULT); | 169 | RANGE_PROTECTION_FAULT_ENABLE_DEFAULT); |
| @@ -225,6 +220,40 @@ void r600_pcie_gart_fini(struct radeon_device *rdev) | |||
| 225 | radeon_gart_fini(rdev); | 220 | radeon_gart_fini(rdev); |
| 226 | } | 221 | } |
| 227 | 222 | ||
| 223 | void r600_agp_enable(struct radeon_device *rdev) | ||
| 224 | { | ||
| 225 | u32 tmp; | ||
| 226 | int i; | ||
| 227 | |||
| 228 | /* Setup L2 cache */ | ||
| 229 | WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING | | ||
| 230 | ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE | | ||
| 231 | EFFECTIVE_L2_QUEUE_SIZE(7)); | ||
| 232 | WREG32(VM_L2_CNTL2, 0); | ||
| 233 | WREG32(VM_L2_CNTL3, BANK_SELECT_0(0) | BANK_SELECT_1(1)); | ||
| 234 | /* Setup TLB control */ | ||
| 235 | tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING | | ||
| 236 | SYSTEM_ACCESS_MODE_NOT_IN_SYS | | ||
| 237 | EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5) | | ||
| 238 | ENABLE_WAIT_L2_QUERY; | ||
| 239 | WREG32(MC_VM_L1_TLB_MCB_RD_SYS_CNTL, tmp); | ||
| 240 | WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL, tmp); | ||
| 241 | WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp | ENABLE_L1_STRICT_ORDERING); | ||
| 242 | WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp); | ||
| 243 | WREG32(MC_VM_L1_TLB_MCD_RD_A_CNTL, tmp); | ||
| 244 | WREG32(MC_VM_L1_TLB_MCD_WR_A_CNTL, tmp); | ||
| 245 | WREG32(MC_VM_L1_TLB_MCD_RD_B_CNTL, tmp); | ||
| 246 | WREG32(MC_VM_L1_TLB_MCD_WR_B_CNTL, tmp); | ||
| 247 | WREG32(MC_VM_L1_TLB_MCB_RD_GFX_CNTL, tmp); | ||
| 248 | WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL, tmp); | ||
| 249 | WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL, tmp); | ||
| 250 | WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL, tmp); | ||
| 251 | WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE); | ||
| 252 | WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE); | ||
| 253 | for (i = 0; i < 7; i++) | ||
| 254 | WREG32(VM_CONTEXT0_CNTL + (i * 4), 0); | ||
| 255 | } | ||
| 256 | |||
| 228 | int r600_mc_wait_for_idle(struct radeon_device *rdev) | 257 | int r600_mc_wait_for_idle(struct radeon_device *rdev) |
| 229 | { | 258 | { |
| 230 | unsigned i; | 259 | unsigned i; |
| @@ -240,14 +269,9 @@ int r600_mc_wait_for_idle(struct radeon_device *rdev) | |||
| 240 | return -1; | 269 | return -1; |
| 241 | } | 270 | } |
| 242 | 271 | ||
| 243 | static void r600_mc_resume(struct radeon_device *rdev) | 272 | static void r600_mc_program(struct radeon_device *rdev) |
| 244 | { | 273 | { |
| 245 | u32 d1vga_control, d2vga_control; | 274 | struct rv515_mc_save save; |
| 246 | u32 vga_render_control, vga_hdp_control; | ||
| 247 | u32 d1crtc_control, d2crtc_control; | ||
| 248 | u32 new_d1grph_primary, new_d1grph_secondary; | ||
| 249 | u32 new_d2grph_primary, new_d2grph_secondary; | ||
| 250 | u64 old_vram_start; | ||
| 251 | u32 tmp; | 275 | u32 tmp; |
| 252 | int i, j; | 276 | int i, j; |
| 253 | 277 | ||
| @@ -261,85 +285,51 @@ static void r600_mc_resume(struct radeon_device *rdev) | |||
| 261 | } | 285 | } |
| 262 | WREG32(HDP_REG_COHERENCY_FLUSH_CNTL, 0); | 286 | WREG32(HDP_REG_COHERENCY_FLUSH_CNTL, 0); |
| 263 | 287 | ||
| 264 | d1vga_control = RREG32(D1VGA_CONTROL); | 288 | rv515_mc_stop(rdev, &save); |
| 265 | d2vga_control = RREG32(D2VGA_CONTROL); | ||
| 266 | vga_render_control = RREG32(VGA_RENDER_CONTROL); | ||
| 267 | vga_hdp_control = RREG32(VGA_HDP_CONTROL); | ||
| 268 | d1crtc_control = RREG32(D1CRTC_CONTROL); | ||
| 269 | d2crtc_control = RREG32(D2CRTC_CONTROL); | ||
| 270 | old_vram_start = (u64)(RREG32(MC_VM_FB_LOCATION) & 0xFFFF) << 24; | ||
| 271 | new_d1grph_primary = RREG32(D1GRPH_PRIMARY_SURFACE_ADDRESS); | ||
| 272 | new_d1grph_secondary = RREG32(D1GRPH_SECONDARY_SURFACE_ADDRESS); | ||
| 273 | new_d1grph_primary += rdev->mc.vram_start - old_vram_start; | ||
| 274 | new_d1grph_secondary += rdev->mc.vram_start - old_vram_start; | ||
| 275 | new_d2grph_primary = RREG32(D2GRPH_PRIMARY_SURFACE_ADDRESS); | ||
| 276 | new_d2grph_secondary = RREG32(D2GRPH_SECONDARY_SURFACE_ADDRESS); | ||
| 277 | new_d2grph_primary += rdev->mc.vram_start - old_vram_start; | ||
| 278 | new_d2grph_secondary += rdev->mc.vram_start - old_vram_start; | ||
| 279 | |||
| 280 | /* Stop all video */ | ||
| 281 | WREG32(D1VGA_CONTROL, 0); | ||
| 282 | WREG32(D2VGA_CONTROL, 0); | ||
| 283 | WREG32(VGA_RENDER_CONTROL, 0); | ||
| 284 | WREG32(D1CRTC_UPDATE_LOCK, 1); | ||
| 285 | WREG32(D2CRTC_UPDATE_LOCK, 1); | ||
| 286 | WREG32(D1CRTC_CONTROL, 0); | ||
| 287 | WREG32(D2CRTC_CONTROL, 0); | ||
| 288 | WREG32(D1CRTC_UPDATE_LOCK, 0); | ||
| 289 | WREG32(D2CRTC_UPDATE_LOCK, 0); | ||
| 290 | |||
| 291 | mdelay(1); | ||
| 292 | if (r600_mc_wait_for_idle(rdev)) { | 289 | if (r600_mc_wait_for_idle(rdev)) { |
| 293 | printk(KERN_WARNING "[drm] MC not idle !\n"); | 290 | dev_warn(rdev->dev, "Wait for MC idle timedout !\n"); |
| 294 | } | 291 | } |
| 295 | 292 | /* Lockout access through VGA aperture (doesn't exist before R600) */ | |
| 296 | /* Lockout access through VGA aperture*/ | ||
| 297 | WREG32(VGA_HDP_CONTROL, VGA_MEMORY_DISABLE); | 293 | WREG32(VGA_HDP_CONTROL, VGA_MEMORY_DISABLE); |
| 298 | |||
| 299 | /* Update configuration */ | 294 | /* Update configuration */ |
| 300 | WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR, rdev->mc.vram_start >> 12); | 295 | if (rdev->flags & RADEON_IS_AGP) { |
| 301 | WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR, (rdev->mc.vram_end - 1) >> 12); | 296 | if (rdev->mc.vram_start < rdev->mc.gtt_start) { |
| 297 | /* VRAM before AGP */ | ||
| 298 | WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR, | ||
| 299 | rdev->mc.vram_start >> 12); | ||
| 300 | WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR, | ||
| 301 | rdev->mc.gtt_end >> 12); | ||
| 302 | } else { | ||
| 303 | /* VRAM after AGP */ | ||
| 304 | WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR, | ||
| 305 | rdev->mc.gtt_start >> 12); | ||
| 306 | WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR, | ||
| 307 | rdev->mc.vram_end >> 12); | ||
| 308 | } | ||
| 309 | } else { | ||
| 310 | WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR, rdev->mc.vram_start >> 12); | ||
| 311 | WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR, rdev->mc.vram_end >> 12); | ||
| 312 | } | ||
| 302 | WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, 0); | 313 | WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, 0); |
| 303 | tmp = (((rdev->mc.vram_end - 1) >> 24) & 0xFFFF) << 16; | 314 | tmp = ((rdev->mc.vram_end >> 24) & 0xFFFF) << 16; |
| 304 | tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF); | 315 | tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF); |
| 305 | WREG32(MC_VM_FB_LOCATION, tmp); | 316 | WREG32(MC_VM_FB_LOCATION, tmp); |
| 306 | WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8)); | 317 | WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8)); |
| 307 | WREG32(HDP_NONSURFACE_INFO, (2 << 7)); | 318 | WREG32(HDP_NONSURFACE_INFO, (2 << 7)); |
| 308 | WREG32(HDP_NONSURFACE_SIZE, (rdev->mc.mc_vram_size - 1) | 0x3FF); | 319 | WREG32(HDP_NONSURFACE_SIZE, rdev->mc.mc_vram_size | 0x3FF); |
| 309 | if (rdev->flags & RADEON_IS_AGP) { | 320 | if (rdev->flags & RADEON_IS_AGP) { |
| 310 | WREG32(MC_VM_AGP_TOP, (rdev->mc.gtt_end - 1) >> 16); | 321 | WREG32(MC_VM_AGP_TOP, rdev->mc.gtt_end >> 22); |
| 311 | WREG32(MC_VM_AGP_BOT, rdev->mc.gtt_start >> 16); | 322 | WREG32(MC_VM_AGP_BOT, rdev->mc.gtt_start >> 22); |
| 312 | WREG32(MC_VM_AGP_BASE, rdev->mc.agp_base >> 22); | 323 | WREG32(MC_VM_AGP_BASE, rdev->mc.agp_base >> 22); |
| 313 | } else { | 324 | } else { |
| 314 | WREG32(MC_VM_AGP_BASE, 0); | 325 | WREG32(MC_VM_AGP_BASE, 0); |
| 315 | WREG32(MC_VM_AGP_TOP, 0x0FFFFFFF); | 326 | WREG32(MC_VM_AGP_TOP, 0x0FFFFFFF); |
| 316 | WREG32(MC_VM_AGP_BOT, 0x0FFFFFFF); | 327 | WREG32(MC_VM_AGP_BOT, 0x0FFFFFFF); |
| 317 | } | 328 | } |
| 318 | WREG32(D1GRPH_PRIMARY_SURFACE_ADDRESS, new_d1grph_primary); | ||
| 319 | WREG32(D1GRPH_SECONDARY_SURFACE_ADDRESS, new_d1grph_secondary); | ||
| 320 | WREG32(D2GRPH_PRIMARY_SURFACE_ADDRESS, new_d2grph_primary); | ||
| 321 | WREG32(D2GRPH_SECONDARY_SURFACE_ADDRESS, new_d2grph_secondary); | ||
| 322 | WREG32(VGA_MEMORY_BASE_ADDRESS, rdev->mc.vram_start); | ||
| 323 | |||
| 324 | /* Unlock host access */ | ||
| 325 | WREG32(VGA_HDP_CONTROL, vga_hdp_control); | ||
| 326 | |||
| 327 | mdelay(1); | ||
| 328 | if (r600_mc_wait_for_idle(rdev)) { | 329 | if (r600_mc_wait_for_idle(rdev)) { |
| 329 | printk(KERN_WARNING "[drm] MC not idle !\n"); | 330 | dev_warn(rdev->dev, "Wait for MC idle timedout !\n"); |
| 330 | } | 331 | } |
| 331 | 332 | rv515_mc_resume(rdev, &save); | |
| 332 | /* Restore video state */ | ||
| 333 | WREG32(D1CRTC_UPDATE_LOCK, 1); | ||
| 334 | WREG32(D2CRTC_UPDATE_LOCK, 1); | ||
| 335 | WREG32(D1CRTC_CONTROL, d1crtc_control); | ||
| 336 | WREG32(D2CRTC_CONTROL, d2crtc_control); | ||
| 337 | WREG32(D1CRTC_UPDATE_LOCK, 0); | ||
| 338 | WREG32(D2CRTC_UPDATE_LOCK, 0); | ||
| 339 | WREG32(D1VGA_CONTROL, d1vga_control); | ||
| 340 | WREG32(D2VGA_CONTROL, d2vga_control); | ||
| 341 | WREG32(VGA_RENDER_CONTROL, vga_render_control); | ||
| 342 | |||
| 343 | /* we need to own VRAM, so turn off the VGA renderer here | 333 | /* we need to own VRAM, so turn off the VGA renderer here |
| 344 | * to stop it overwriting our objects */ | 334 | * to stop it overwriting our objects */ |
| 345 | rv515_vga_render_disable(rdev); | 335 | rv515_vga_render_disable(rdev); |
| @@ -445,9 +435,9 @@ int r600_mc_init(struct radeon_device *rdev) | |||
| 445 | } | 435 | } |
| 446 | } | 436 | } |
| 447 | rdev->mc.vram_start = rdev->mc.vram_location; | 437 | rdev->mc.vram_start = rdev->mc.vram_location; |
| 448 | rdev->mc.vram_end = rdev->mc.vram_location + rdev->mc.mc_vram_size; | 438 | rdev->mc.vram_end = rdev->mc.vram_location + rdev->mc.mc_vram_size - 1; |
| 449 | rdev->mc.gtt_start = rdev->mc.gtt_location; | 439 | rdev->mc.gtt_start = rdev->mc.gtt_location; |
| 450 | rdev->mc.gtt_end = rdev->mc.gtt_location + rdev->mc.gtt_size; | 440 | rdev->mc.gtt_end = rdev->mc.gtt_location + rdev->mc.gtt_size - 1; |
| 451 | /* FIXME: we should enforce default clock in case GPU is not in | 441 | /* FIXME: we should enforce default clock in case GPU is not in |
| 452 | * default setup | 442 | * default setup |
| 453 | */ | 443 | */ |
| @@ -463,6 +453,7 @@ int r600_mc_init(struct radeon_device *rdev) | |||
| 463 | */ | 453 | */ |
| 464 | int r600_gpu_soft_reset(struct radeon_device *rdev) | 454 | int r600_gpu_soft_reset(struct radeon_device *rdev) |
| 465 | { | 455 | { |
| 456 | struct rv515_mc_save save; | ||
| 466 | u32 grbm_busy_mask = S_008010_VC_BUSY(1) | S_008010_VGT_BUSY_NO_DMA(1) | | 457 | u32 grbm_busy_mask = S_008010_VC_BUSY(1) | S_008010_VGT_BUSY_NO_DMA(1) | |
| 467 | S_008010_VGT_BUSY(1) | S_008010_TA03_BUSY(1) | | 458 | S_008010_VGT_BUSY(1) | S_008010_TA03_BUSY(1) | |
| 468 | S_008010_TC_BUSY(1) | S_008010_SX_BUSY(1) | | 459 | S_008010_TC_BUSY(1) | S_008010_SX_BUSY(1) | |
| @@ -480,13 +471,25 @@ int r600_gpu_soft_reset(struct radeon_device *rdev) | |||
| 480 | S_008014_CB0_BUSY(1) | S_008014_CB1_BUSY(1) | | 471 | S_008014_CB0_BUSY(1) | S_008014_CB1_BUSY(1) | |
| 481 | S_008014_CB2_BUSY(1) | S_008014_CB3_BUSY(1); | 472 | S_008014_CB2_BUSY(1) | S_008014_CB3_BUSY(1); |
| 482 | u32 srbm_reset = 0; | 473 | u32 srbm_reset = 0; |
| 474 | u32 tmp; | ||
| 483 | 475 | ||
| 476 | dev_info(rdev->dev, "GPU softreset \n"); | ||
| 477 | dev_info(rdev->dev, " R_008010_GRBM_STATUS=0x%08X\n", | ||
| 478 | RREG32(R_008010_GRBM_STATUS)); | ||
| 479 | dev_info(rdev->dev, " R_008014_GRBM_STATUS2=0x%08X\n", | ||
| 480 | RREG32(R_008014_GRBM_STATUS2)); | ||
| 481 | dev_info(rdev->dev, " R_000E50_SRBM_STATUS=0x%08X\n", | ||
| 482 | RREG32(R_000E50_SRBM_STATUS)); | ||
| 483 | rv515_mc_stop(rdev, &save); | ||
| 484 | if (r600_mc_wait_for_idle(rdev)) { | ||
| 485 | dev_warn(rdev->dev, "Wait for MC idle timedout !\n"); | ||
| 486 | } | ||
| 484 | /* Disable CP parsing/prefetching */ | 487 | /* Disable CP parsing/prefetching */ |
| 485 | WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(0xff)); | 488 | WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(0xff)); |
| 486 | /* Check if any of the rendering block is busy and reset it */ | 489 | /* Check if any of the rendering block is busy and reset it */ |
| 487 | if ((RREG32(R_008010_GRBM_STATUS) & grbm_busy_mask) || | 490 | if ((RREG32(R_008010_GRBM_STATUS) & grbm_busy_mask) || |
| 488 | (RREG32(R_008014_GRBM_STATUS2) & grbm2_busy_mask)) { | 491 | (RREG32(R_008014_GRBM_STATUS2) & grbm2_busy_mask)) { |
| 489 | WREG32(R_008020_GRBM_SOFT_RESET, S_008020_SOFT_RESET_CR(1) | | 492 | tmp = S_008020_SOFT_RESET_CR(1) | |
| 490 | S_008020_SOFT_RESET_DB(1) | | 493 | S_008020_SOFT_RESET_DB(1) | |
| 491 | S_008020_SOFT_RESET_CB(1) | | 494 | S_008020_SOFT_RESET_CB(1) | |
| 492 | S_008020_SOFT_RESET_PA(1) | | 495 | S_008020_SOFT_RESET_PA(1) | |
| @@ -498,14 +501,18 @@ int r600_gpu_soft_reset(struct radeon_device *rdev) | |||
| 498 | S_008020_SOFT_RESET_TC(1) | | 501 | S_008020_SOFT_RESET_TC(1) | |
| 499 | S_008020_SOFT_RESET_TA(1) | | 502 | S_008020_SOFT_RESET_TA(1) | |
| 500 | S_008020_SOFT_RESET_VC(1) | | 503 | S_008020_SOFT_RESET_VC(1) | |
| 501 | S_008020_SOFT_RESET_VGT(1)); | 504 | S_008020_SOFT_RESET_VGT(1); |
| 505 | dev_info(rdev->dev, " R_008020_GRBM_SOFT_RESET=0x%08X\n", tmp); | ||
| 506 | WREG32(R_008020_GRBM_SOFT_RESET, tmp); | ||
| 502 | (void)RREG32(R_008020_GRBM_SOFT_RESET); | 507 | (void)RREG32(R_008020_GRBM_SOFT_RESET); |
| 503 | udelay(50); | 508 | udelay(50); |
| 504 | WREG32(R_008020_GRBM_SOFT_RESET, 0); | 509 | WREG32(R_008020_GRBM_SOFT_RESET, 0); |
| 505 | (void)RREG32(R_008020_GRBM_SOFT_RESET); | 510 | (void)RREG32(R_008020_GRBM_SOFT_RESET); |
| 506 | } | 511 | } |
| 507 | /* Reset CP (we always reset CP) */ | 512 | /* Reset CP (we always reset CP) */ |
| 508 | WREG32(R_008020_GRBM_SOFT_RESET, S_008020_SOFT_RESET_CP(1)); | 513 | tmp = S_008020_SOFT_RESET_CP(1); |
| 514 | dev_info(rdev->dev, "R_008020_GRBM_SOFT_RESET=0x%08X\n", tmp); | ||
| 515 | WREG32(R_008020_GRBM_SOFT_RESET, tmp); | ||
| 509 | (void)RREG32(R_008020_GRBM_SOFT_RESET); | 516 | (void)RREG32(R_008020_GRBM_SOFT_RESET); |
| 510 | udelay(50); | 517 | udelay(50); |
| 511 | WREG32(R_008020_GRBM_SOFT_RESET, 0); | 518 | WREG32(R_008020_GRBM_SOFT_RESET, 0); |
| @@ -533,6 +540,14 @@ int r600_gpu_soft_reset(struct radeon_device *rdev) | |||
| 533 | srbm_reset |= S_000E60_SOFT_RESET_RLC(1); | 540 | srbm_reset |= S_000E60_SOFT_RESET_RLC(1); |
| 534 | if (G_000E50_SEM_BUSY(RREG32(R_000E50_SRBM_STATUS))) | 541 | if (G_000E50_SEM_BUSY(RREG32(R_000E50_SRBM_STATUS))) |
| 535 | srbm_reset |= S_000E60_SOFT_RESET_SEM(1); | 542 | srbm_reset |= S_000E60_SOFT_RESET_SEM(1); |
| 543 | if (G_000E50_BIF_BUSY(RREG32(R_000E50_SRBM_STATUS))) | ||
| 544 | srbm_reset |= S_000E60_SOFT_RESET_BIF(1); | ||
| 545 | dev_info(rdev->dev, " R_000E60_SRBM_SOFT_RESET=0x%08X\n", srbm_reset); | ||
| 546 | WREG32(R_000E60_SRBM_SOFT_RESET, srbm_reset); | ||
| 547 | (void)RREG32(R_000E60_SRBM_SOFT_RESET); | ||
| 548 | udelay(50); | ||
| 549 | WREG32(R_000E60_SRBM_SOFT_RESET, 0); | ||
| 550 | (void)RREG32(R_000E60_SRBM_SOFT_RESET); | ||
| 536 | WREG32(R_000E60_SRBM_SOFT_RESET, srbm_reset); | 551 | WREG32(R_000E60_SRBM_SOFT_RESET, srbm_reset); |
| 537 | (void)RREG32(R_000E60_SRBM_SOFT_RESET); | 552 | (void)RREG32(R_000E60_SRBM_SOFT_RESET); |
| 538 | udelay(50); | 553 | udelay(50); |
| @@ -540,6 +555,17 @@ int r600_gpu_soft_reset(struct radeon_device *rdev) | |||
| 540 | (void)RREG32(R_000E60_SRBM_SOFT_RESET); | 555 | (void)RREG32(R_000E60_SRBM_SOFT_RESET); |
| 541 | /* Wait a little for things to settle down */ | 556 | /* Wait a little for things to settle down */ |
| 542 | udelay(50); | 557 | udelay(50); |
| 558 | dev_info(rdev->dev, " R_008010_GRBM_STATUS=0x%08X\n", | ||
| 559 | RREG32(R_008010_GRBM_STATUS)); | ||
| 560 | dev_info(rdev->dev, " R_008014_GRBM_STATUS2=0x%08X\n", | ||
| 561 | RREG32(R_008014_GRBM_STATUS2)); | ||
| 562 | dev_info(rdev->dev, " R_000E50_SRBM_STATUS=0x%08X\n", | ||
| 563 | RREG32(R_000E50_SRBM_STATUS)); | ||
| 564 | /* After reset we need to reinit the asic as GPU often endup in an | ||
| 565 | * incoherent state. | ||
| 566 | */ | ||
| 567 | atom_asic_init(rdev->mode_info.atom_context); | ||
| 568 | rv515_mc_resume(rdev, &save); | ||
| 543 | return 0; | 569 | return 0; |
| 544 | } | 570 | } |
| 545 | 571 | ||
| @@ -1350,32 +1376,47 @@ int r600_ring_test(struct radeon_device *rdev) | |||
| 1350 | return r; | 1376 | return r; |
| 1351 | } | 1377 | } |
| 1352 | 1378 | ||
| 1353 | /* | 1379 | void r600_wb_disable(struct radeon_device *rdev) |
| 1354 | * Writeback | 1380 | { |
| 1355 | */ | 1381 | WREG32(SCRATCH_UMSK, 0); |
| 1356 | int r600_wb_init(struct radeon_device *rdev) | 1382 | if (rdev->wb.wb_obj) { |
| 1383 | radeon_object_kunmap(rdev->wb.wb_obj); | ||
| 1384 | radeon_object_unpin(rdev->wb.wb_obj); | ||
| 1385 | } | ||
| 1386 | } | ||
| 1387 | |||
| 1388 | void r600_wb_fini(struct radeon_device *rdev) | ||
| 1389 | { | ||
| 1390 | r600_wb_disable(rdev); | ||
| 1391 | if (rdev->wb.wb_obj) { | ||
| 1392 | radeon_object_unref(&rdev->wb.wb_obj); | ||
| 1393 | rdev->wb.wb = NULL; | ||
| 1394 | rdev->wb.wb_obj = NULL; | ||
| 1395 | } | ||
| 1396 | } | ||
| 1397 | |||
| 1398 | int r600_wb_enable(struct radeon_device *rdev) | ||
| 1357 | { | 1399 | { |
| 1358 | int r; | 1400 | int r; |
| 1359 | 1401 | ||
| 1360 | if (rdev->wb.wb_obj == NULL) { | 1402 | if (rdev->wb.wb_obj == NULL) { |
| 1361 | r = radeon_object_create(rdev, NULL, 4096, | 1403 | r = radeon_object_create(rdev, NULL, 4096, true, |
| 1362 | true, | 1404 | RADEON_GEM_DOMAIN_GTT, false, &rdev->wb.wb_obj); |
| 1363 | RADEON_GEM_DOMAIN_GTT, | ||
| 1364 | false, &rdev->wb.wb_obj); | ||
| 1365 | if (r) { | 1405 | if (r) { |
| 1366 | DRM_ERROR("radeon: failed to create WB buffer (%d).\n", r); | 1406 | dev_warn(rdev->dev, "failed to create WB buffer (%d).\n", r); |
| 1367 | return r; | 1407 | return r; |
| 1368 | } | 1408 | } |
| 1369 | r = radeon_object_pin(rdev->wb.wb_obj, | 1409 | r = radeon_object_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT, |
| 1370 | RADEON_GEM_DOMAIN_GTT, | 1410 | &rdev->wb.gpu_addr); |
| 1371 | &rdev->wb.gpu_addr); | ||
| 1372 | if (r) { | 1411 | if (r) { |
| 1373 | DRM_ERROR("radeon: failed to pin WB buffer (%d).\n", r); | 1412 | dev_warn(rdev->dev, "failed to pin WB buffer (%d).\n", r); |
| 1413 | r600_wb_fini(rdev); | ||
| 1374 | return r; | 1414 | return r; |
| 1375 | } | 1415 | } |
| 1376 | r = radeon_object_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb); | 1416 | r = radeon_object_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb); |
| 1377 | if (r) { | 1417 | if (r) { |
| 1378 | DRM_ERROR("radeon: failed to map WB buffer (%d).\n", r); | 1418 | dev_warn(rdev->dev, "failed to map WB buffer (%d).\n", r); |
| 1419 | r600_wb_fini(rdev); | ||
| 1379 | return r; | 1420 | return r; |
| 1380 | } | 1421 | } |
| 1381 | } | 1422 | } |
| @@ -1386,21 +1427,6 @@ int r600_wb_init(struct radeon_device *rdev) | |||
| 1386 | return 0; | 1427 | return 0; |
| 1387 | } | 1428 | } |
| 1388 | 1429 | ||
| 1389 | void r600_wb_fini(struct radeon_device *rdev) | ||
| 1390 | { | ||
| 1391 | if (rdev->wb.wb_obj) { | ||
| 1392 | radeon_object_kunmap(rdev->wb.wb_obj); | ||
| 1393 | radeon_object_unpin(rdev->wb.wb_obj); | ||
| 1394 | radeon_object_unref(&rdev->wb.wb_obj); | ||
| 1395 | rdev->wb.wb = NULL; | ||
| 1396 | rdev->wb.wb_obj = NULL; | ||
| 1397 | } | ||
| 1398 | } | ||
| 1399 | |||
| 1400 | |||
| 1401 | /* | ||
| 1402 | * CS | ||
| 1403 | */ | ||
| 1404 | void r600_fence_ring_emit(struct radeon_device *rdev, | 1430 | void r600_fence_ring_emit(struct radeon_device *rdev, |
| 1405 | struct radeon_fence *fence) | 1431 | struct radeon_fence *fence) |
| 1406 | { | 1432 | { |
| @@ -1477,11 +1503,14 @@ int r600_startup(struct radeon_device *rdev) | |||
| 1477 | { | 1503 | { |
| 1478 | int r; | 1504 | int r; |
| 1479 | 1505 | ||
| 1480 | r600_gpu_reset(rdev); | 1506 | r600_mc_program(rdev); |
| 1481 | r600_mc_resume(rdev); | 1507 | if (rdev->flags & RADEON_IS_AGP) { |
| 1482 | r = r600_pcie_gart_enable(rdev); | 1508 | r600_agp_enable(rdev); |
| 1483 | if (r) | 1509 | } else { |
| 1484 | return r; | 1510 | r = r600_pcie_gart_enable(rdev); |
| 1511 | if (r) | ||
| 1512 | return r; | ||
| 1513 | } | ||
| 1485 | r600_gpu_init(rdev); | 1514 | r600_gpu_init(rdev); |
| 1486 | 1515 | ||
| 1487 | r = radeon_object_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM, | 1516 | r = radeon_object_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM, |
| @@ -1500,9 +1529,8 @@ int r600_startup(struct radeon_device *rdev) | |||
| 1500 | r = r600_cp_resume(rdev); | 1529 | r = r600_cp_resume(rdev); |
| 1501 | if (r) | 1530 | if (r) |
| 1502 | return r; | 1531 | return r; |
| 1503 | r = r600_wb_init(rdev); | 1532 | /* write back buffer are not vital so don't worry about failure */ |
| 1504 | if (r) | 1533 | r600_wb_enable(rdev); |
| 1505 | return r; | ||
| 1506 | return 0; | 1534 | return 0; |
| 1507 | } | 1535 | } |
| 1508 | 1536 | ||
| @@ -1524,15 +1552,12 @@ int r600_resume(struct radeon_device *rdev) | |||
| 1524 | { | 1552 | { |
| 1525 | int r; | 1553 | int r; |
| 1526 | 1554 | ||
| 1527 | if (radeon_gpu_reset(rdev)) { | 1555 | /* Do not reset GPU before posting, on r600 hw unlike on r500 hw, |
| 1528 | /* FIXME: what do we want to do here ? */ | 1556 | * posting will perform necessary task to bring back GPU into good |
| 1529 | } | 1557 | * shape. |
| 1558 | */ | ||
| 1530 | /* post card */ | 1559 | /* post card */ |
| 1531 | if (rdev->is_atom_bios) { | 1560 | atom_asic_init(rdev->mode_info.atom_context); |
| 1532 | atom_asic_init(rdev->mode_info.atom_context); | ||
| 1533 | } else { | ||
| 1534 | radeon_combios_asic_init(rdev->ddev); | ||
| 1535 | } | ||
| 1536 | /* Initialize clocks */ | 1561 | /* Initialize clocks */ |
| 1537 | r = radeon_clocks_init(rdev); | 1562 | r = radeon_clocks_init(rdev); |
| 1538 | if (r) { | 1563 | if (r) { |
| @@ -1545,7 +1570,7 @@ int r600_resume(struct radeon_device *rdev) | |||
| 1545 | return r; | 1570 | return r; |
| 1546 | } | 1571 | } |
| 1547 | 1572 | ||
| 1548 | r = radeon_ib_test(rdev); | 1573 | r = r600_ib_test(rdev); |
| 1549 | if (r) { | 1574 | if (r) { |
| 1550 | DRM_ERROR("radeon: failled testing IB (%d).\n", r); | 1575 | DRM_ERROR("radeon: failled testing IB (%d).\n", r); |
| 1551 | return r; | 1576 | return r; |
| @@ -1553,13 +1578,12 @@ int r600_resume(struct radeon_device *rdev) | |||
| 1553 | return r; | 1578 | return r; |
| 1554 | } | 1579 | } |
| 1555 | 1580 | ||
| 1556 | |||
| 1557 | int r600_suspend(struct radeon_device *rdev) | 1581 | int r600_suspend(struct radeon_device *rdev) |
| 1558 | { | 1582 | { |
| 1559 | /* FIXME: we should wait for ring to be empty */ | 1583 | /* FIXME: we should wait for ring to be empty */ |
| 1560 | r600_cp_stop(rdev); | 1584 | r600_cp_stop(rdev); |
| 1561 | rdev->cp.ready = false; | 1585 | rdev->cp.ready = false; |
| 1562 | 1586 | r600_wb_disable(rdev); | |
| 1563 | r600_pcie_gart_disable(rdev); | 1587 | r600_pcie_gart_disable(rdev); |
| 1564 | /* unpin shaders bo */ | 1588 | /* unpin shaders bo */ |
| 1565 | radeon_object_unpin(rdev->r600_blit.shader_obj); | 1589 | radeon_object_unpin(rdev->r600_blit.shader_obj); |
| @@ -1576,7 +1600,6 @@ int r600_init(struct radeon_device *rdev) | |||
| 1576 | { | 1600 | { |
| 1577 | int r; | 1601 | int r; |
| 1578 | 1602 | ||
| 1579 | rdev->new_init_path = true; | ||
| 1580 | r = radeon_dummy_page_init(rdev); | 1603 | r = radeon_dummy_page_init(rdev); |
| 1581 | if (r) | 1604 | if (r) |
| 1582 | return r; | 1605 | return r; |
| @@ -1593,8 +1616,10 @@ int r600_init(struct radeon_device *rdev) | |||
| 1593 | return -EINVAL; | 1616 | return -EINVAL; |
| 1594 | } | 1617 | } |
| 1595 | /* Must be an ATOMBIOS */ | 1618 | /* Must be an ATOMBIOS */ |
| 1596 | if (!rdev->is_atom_bios) | 1619 | if (!rdev->is_atom_bios) { |
| 1620 | dev_err(rdev->dev, "Expecting atombios for R600 GPU\n"); | ||
| 1597 | return -EINVAL; | 1621 | return -EINVAL; |
| 1622 | } | ||
| 1598 | r = radeon_atombios_init(rdev); | 1623 | r = radeon_atombios_init(rdev); |
| 1599 | if (r) | 1624 | if (r) |
| 1600 | return r; | 1625 | return r; |
| @@ -1616,15 +1641,8 @@ int r600_init(struct radeon_device *rdev) | |||
| 1616 | if (r) | 1641 | if (r) |
| 1617 | return r; | 1642 | return r; |
| 1618 | r = r600_mc_init(rdev); | 1643 | r = r600_mc_init(rdev); |
| 1619 | if (r) { | 1644 | if (r) |
| 1620 | if (rdev->flags & RADEON_IS_AGP) { | ||
| 1621 | /* Retry with disabling AGP */ | ||
| 1622 | r600_fini(rdev); | ||
| 1623 | rdev->flags &= ~RADEON_IS_AGP; | ||
| 1624 | return r600_init(rdev); | ||
| 1625 | } | ||
| 1626 | return r; | 1645 | return r; |
| 1627 | } | ||
| 1628 | /* Memory manager */ | 1646 | /* Memory manager */ |
| 1629 | r = radeon_object_init(rdev); | 1647 | r = radeon_object_init(rdev); |
| 1630 | if (r) | 1648 | if (r) |
| @@ -1653,12 +1671,10 @@ int r600_init(struct radeon_device *rdev) | |||
| 1653 | 1671 | ||
| 1654 | r = r600_startup(rdev); | 1672 | r = r600_startup(rdev); |
| 1655 | if (r) { | 1673 | if (r) { |
| 1656 | if (rdev->flags & RADEON_IS_AGP) { | 1674 | r600_suspend(rdev); |
| 1657 | /* Retry with disabling AGP */ | 1675 | r600_wb_fini(rdev); |
| 1658 | r600_fini(rdev); | 1676 | radeon_ring_fini(rdev); |
| 1659 | rdev->flags &= ~RADEON_IS_AGP; | 1677 | r600_pcie_gart_fini(rdev); |
| 1660 | return r600_init(rdev); | ||
| 1661 | } | ||
| 1662 | rdev->accel_working = false; | 1678 | rdev->accel_working = false; |
| 1663 | } | 1679 | } |
| 1664 | if (rdev->accel_working) { | 1680 | if (rdev->accel_working) { |
| @@ -1667,7 +1683,7 @@ int r600_init(struct radeon_device *rdev) | |||
| 1667 | DRM_ERROR("radeon: failled initializing IB pool (%d).\n", r); | 1683 | DRM_ERROR("radeon: failled initializing IB pool (%d).\n", r); |
| 1668 | rdev->accel_working = false; | 1684 | rdev->accel_working = false; |
| 1669 | } | 1685 | } |
| 1670 | r = radeon_ib_test(rdev); | 1686 | r = r600_ib_test(rdev); |
| 1671 | if (r) { | 1687 | if (r) { |
| 1672 | DRM_ERROR("radeon: failled testing IB (%d).\n", r); | 1688 | DRM_ERROR("radeon: failled testing IB (%d).\n", r); |
| 1673 | rdev->accel_working = false; | 1689 | rdev->accel_working = false; |
| @@ -1683,19 +1699,15 @@ void r600_fini(struct radeon_device *rdev) | |||
| 1683 | 1699 | ||
| 1684 | r600_blit_fini(rdev); | 1700 | r600_blit_fini(rdev); |
| 1685 | radeon_ring_fini(rdev); | 1701 | radeon_ring_fini(rdev); |
| 1702 | r600_wb_fini(rdev); | ||
| 1686 | r600_pcie_gart_fini(rdev); | 1703 | r600_pcie_gart_fini(rdev); |
| 1687 | radeon_gem_fini(rdev); | 1704 | radeon_gem_fini(rdev); |
| 1688 | radeon_fence_driver_fini(rdev); | 1705 | radeon_fence_driver_fini(rdev); |
| 1689 | radeon_clocks_fini(rdev); | 1706 | radeon_clocks_fini(rdev); |
| 1690 | #if __OS_HAS_AGP | ||
| 1691 | if (rdev->flags & RADEON_IS_AGP) | 1707 | if (rdev->flags & RADEON_IS_AGP) |
| 1692 | radeon_agp_fini(rdev); | 1708 | radeon_agp_fini(rdev); |
| 1693 | #endif | ||
| 1694 | radeon_object_fini(rdev); | 1709 | radeon_object_fini(rdev); |
| 1695 | if (rdev->is_atom_bios) | 1710 | radeon_atombios_fini(rdev); |
| 1696 | radeon_atombios_fini(rdev); | ||
| 1697 | else | ||
| 1698 | radeon_combios_fini(rdev); | ||
| 1699 | kfree(rdev->bios); | 1711 | kfree(rdev->bios); |
| 1700 | rdev->bios = NULL; | 1712 | rdev->bios = NULL; |
| 1701 | radeon_dummy_page_fini(rdev); | 1713 | radeon_dummy_page_fini(rdev); |
diff --git a/drivers/gpu/drm/radeon/r600_blit.c b/drivers/gpu/drm/radeon/r600_blit.c index d988eece0187..dec501081608 100644 --- a/drivers/gpu/drm/radeon/r600_blit.c +++ b/drivers/gpu/drm/radeon/r600_blit.c | |||
| @@ -582,8 +582,6 @@ r600_blit_copy(struct drm_device *dev, | |||
| 582 | u64 vb_addr; | 582 | u64 vb_addr; |
| 583 | u32 *vb; | 583 | u32 *vb; |
| 584 | 584 | ||
| 585 | vb = r600_nomm_get_vb_ptr(dev); | ||
| 586 | |||
| 587 | if ((size_bytes & 3) || (src_gpu_addr & 3) || (dst_gpu_addr & 3)) { | 585 | if ((size_bytes & 3) || (src_gpu_addr & 3) || (dst_gpu_addr & 3)) { |
| 588 | max_bytes = 8192; | 586 | max_bytes = 8192; |
| 589 | 587 | ||
| @@ -619,8 +617,8 @@ r600_blit_copy(struct drm_device *dev, | |||
| 619 | if (!dev_priv->blit_vb) | 617 | if (!dev_priv->blit_vb) |
| 620 | return; | 618 | return; |
| 621 | set_shaders(dev); | 619 | set_shaders(dev); |
| 622 | vb = r600_nomm_get_vb_ptr(dev); | ||
| 623 | } | 620 | } |
| 621 | vb = r600_nomm_get_vb_ptr(dev); | ||
| 624 | 622 | ||
| 625 | vb[0] = i2f(dst_x); | 623 | vb[0] = i2f(dst_x); |
| 626 | vb[1] = 0; | 624 | vb[1] = 0; |
| @@ -708,8 +706,8 @@ r600_blit_copy(struct drm_device *dev, | |||
| 708 | return; | 706 | return; |
| 709 | 707 | ||
| 710 | set_shaders(dev); | 708 | set_shaders(dev); |
| 711 | vb = r600_nomm_get_vb_ptr(dev); | ||
| 712 | } | 709 | } |
| 710 | vb = r600_nomm_get_vb_ptr(dev); | ||
| 713 | 711 | ||
| 714 | vb[0] = i2f(dst_x / 4); | 712 | vb[0] = i2f(dst_x / 4); |
| 715 | vb[1] = 0; | 713 | vb[1] = 0; |
| @@ -777,8 +775,6 @@ r600_blit_swap(struct drm_device *dev, | |||
| 777 | u64 vb_addr; | 775 | u64 vb_addr; |
| 778 | u32 *vb; | 776 | u32 *vb; |
| 779 | 777 | ||
| 780 | vb = r600_nomm_get_vb_ptr(dev); | ||
| 781 | |||
| 782 | if ((dev_priv->blit_vb->used + 48) > dev_priv->blit_vb->total) { | 778 | if ((dev_priv->blit_vb->used + 48) > dev_priv->blit_vb->total) { |
| 783 | 779 | ||
| 784 | r600_nomm_put_vb(dev); | 780 | r600_nomm_put_vb(dev); |
| @@ -787,8 +783,8 @@ r600_blit_swap(struct drm_device *dev, | |||
| 787 | return; | 783 | return; |
| 788 | 784 | ||
| 789 | set_shaders(dev); | 785 | set_shaders(dev); |
| 790 | vb = r600_nomm_get_vb_ptr(dev); | ||
| 791 | } | 786 | } |
| 787 | vb = r600_nomm_get_vb_ptr(dev); | ||
| 792 | 788 | ||
| 793 | if (cpp == 4) { | 789 | if (cpp == 4) { |
| 794 | cb_format = COLOR_8_8_8_8; | 790 | cb_format = COLOR_8_8_8_8; |
diff --git a/drivers/gpu/drm/radeon/r600_blit_kms.c b/drivers/gpu/drm/radeon/r600_blit_kms.c index acae33e2ad51..93108bb31d1d 100644 --- a/drivers/gpu/drm/radeon/r600_blit_kms.c +++ b/drivers/gpu/drm/radeon/r600_blit_kms.c | |||
| @@ -610,7 +610,6 @@ void r600_kms_blit_copy(struct radeon_device *rdev, | |||
| 610 | 610 | ||
| 611 | DRM_DEBUG("emitting copy %16llx %16llx %d %d\n", src_gpu_addr, dst_gpu_addr, | 611 | DRM_DEBUG("emitting copy %16llx %16llx %d %d\n", src_gpu_addr, dst_gpu_addr, |
| 612 | size_bytes, rdev->r600_blit.vb_used); | 612 | size_bytes, rdev->r600_blit.vb_used); |
| 613 | vb = (u32 *)(rdev->r600_blit.vb_ib->ptr + rdev->r600_blit.vb_used); | ||
| 614 | if ((size_bytes & 3) || (src_gpu_addr & 3) || (dst_gpu_addr & 3)) { | 613 | if ((size_bytes & 3) || (src_gpu_addr & 3) || (dst_gpu_addr & 3)) { |
| 615 | max_bytes = 8192; | 614 | max_bytes = 8192; |
| 616 | 615 | ||
| @@ -653,6 +652,7 @@ void r600_kms_blit_copy(struct radeon_device *rdev, | |||
| 653 | vb = r600_nomm_get_vb_ptr(dev); | 652 | vb = r600_nomm_get_vb_ptr(dev); |
| 654 | #endif | 653 | #endif |
| 655 | } | 654 | } |
| 655 | vb = (u32 *)(rdev->r600_blit.vb_ib->ptr + rdev->r600_blit.vb_used); | ||
| 656 | 656 | ||
| 657 | vb[0] = i2f(dst_x); | 657 | vb[0] = i2f(dst_x); |
| 658 | vb[1] = 0; | 658 | vb[1] = 0; |
| @@ -747,6 +747,7 @@ void r600_kms_blit_copy(struct radeon_device *rdev, | |||
| 747 | vb = r600_nomm_get_vb_ptr(dev); | 747 | vb = r600_nomm_get_vb_ptr(dev); |
| 748 | } | 748 | } |
| 749 | #endif | 749 | #endif |
| 750 | vb = (u32 *)(rdev->r600_blit.vb_ib->ptr + rdev->r600_blit.vb_used); | ||
| 750 | 751 | ||
| 751 | vb[0] = i2f(dst_x / 4); | 752 | vb[0] = i2f(dst_x / 4); |
| 752 | vb[1] = 0; | 753 | vb[1] = 0; |
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c index d28970db6a2d..17e42195c632 100644 --- a/drivers/gpu/drm/radeon/r600_cs.c +++ b/drivers/gpu/drm/radeon/r600_cs.c | |||
| @@ -252,7 +252,7 @@ static int r600_cs_packet_parse_vline(struct radeon_cs_parser *p) | |||
| 252 | 252 | ||
| 253 | header = radeon_get_ib_value(p, h_idx); | 253 | header = radeon_get_ib_value(p, h_idx); |
| 254 | crtc_id = radeon_get_ib_value(p, h_idx + 2 + 7 + 1); | 254 | crtc_id = radeon_get_ib_value(p, h_idx + 2 + 7 + 1); |
| 255 | reg = header >> 2; | 255 | reg = CP_PACKET0_GET_REG(header); |
| 256 | mutex_lock(&p->rdev->ddev->mode_config.mutex); | 256 | mutex_lock(&p->rdev->ddev->mode_config.mutex); |
| 257 | obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC); | 257 | obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC); |
| 258 | if (!obj) { | 258 | if (!obj) { |
diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h index 4a9028a85c9b..9b64d47f1f82 100644 --- a/drivers/gpu/drm/radeon/r600d.h +++ b/drivers/gpu/drm/radeon/r600d.h | |||
| @@ -643,6 +643,7 @@ | |||
| 643 | #define G_000E50_MCDW_BUSY(x) (((x) >> 13) & 1) | 643 | #define G_000E50_MCDW_BUSY(x) (((x) >> 13) & 1) |
| 644 | #define G_000E50_SEM_BUSY(x) (((x) >> 14) & 1) | 644 | #define G_000E50_SEM_BUSY(x) (((x) >> 14) & 1) |
| 645 | #define G_000E50_RLC_BUSY(x) (((x) >> 15) & 1) | 645 | #define G_000E50_RLC_BUSY(x) (((x) >> 15) & 1) |
| 646 | #define G_000E50_BIF_BUSY(x) (((x) >> 29) & 1) | ||
| 646 | #define R_000E60_SRBM_SOFT_RESET 0x0E60 | 647 | #define R_000E60_SRBM_SOFT_RESET 0x0E60 |
| 647 | #define S_000E60_SOFT_RESET_BIF(x) (((x) & 1) << 1) | 648 | #define S_000E60_SOFT_RESET_BIF(x) (((x) & 1) << 1) |
| 648 | #define S_000E60_SOFT_RESET_CG(x) (((x) & 1) << 2) | 649 | #define S_000E60_SOFT_RESET_CG(x) (((x) & 1) << 2) |
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 950b346e343f..5ab35b81c86b 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h | |||
| @@ -590,18 +590,8 @@ struct radeon_asic { | |||
| 590 | void (*fini)(struct radeon_device *rdev); | 590 | void (*fini)(struct radeon_device *rdev); |
| 591 | int (*resume)(struct radeon_device *rdev); | 591 | int (*resume)(struct radeon_device *rdev); |
| 592 | int (*suspend)(struct radeon_device *rdev); | 592 | int (*suspend)(struct radeon_device *rdev); |
| 593 | void (*errata)(struct radeon_device *rdev); | ||
| 594 | void (*vram_info)(struct radeon_device *rdev); | ||
| 595 | void (*vga_set_state)(struct radeon_device *rdev, bool state); | 593 | void (*vga_set_state)(struct radeon_device *rdev, bool state); |
| 596 | int (*gpu_reset)(struct radeon_device *rdev); | 594 | int (*gpu_reset)(struct radeon_device *rdev); |
| 597 | int (*mc_init)(struct radeon_device *rdev); | ||
| 598 | void (*mc_fini)(struct radeon_device *rdev); | ||
| 599 | int (*wb_init)(struct radeon_device *rdev); | ||
| 600 | void (*wb_fini)(struct radeon_device *rdev); | ||
| 601 | int (*gart_init)(struct radeon_device *rdev); | ||
| 602 | void (*gart_fini)(struct radeon_device *rdev); | ||
| 603 | int (*gart_enable)(struct radeon_device *rdev); | ||
| 604 | void (*gart_disable)(struct radeon_device *rdev); | ||
| 605 | void (*gart_tlb_flush)(struct radeon_device *rdev); | 595 | void (*gart_tlb_flush)(struct radeon_device *rdev); |
| 606 | int (*gart_set_page)(struct radeon_device *rdev, int i, uint64_t addr); | 596 | int (*gart_set_page)(struct radeon_device *rdev, int i, uint64_t addr); |
| 607 | int (*cp_init)(struct radeon_device *rdev, unsigned ring_size); | 597 | int (*cp_init)(struct radeon_device *rdev, unsigned ring_size); |
| @@ -611,7 +601,6 @@ struct radeon_asic { | |||
| 611 | void (*ring_start)(struct radeon_device *rdev); | 601 | void (*ring_start)(struct radeon_device *rdev); |
| 612 | int (*ring_test)(struct radeon_device *rdev); | 602 | int (*ring_test)(struct radeon_device *rdev); |
| 613 | void (*ring_ib_execute)(struct radeon_device *rdev, struct radeon_ib *ib); | 603 | void (*ring_ib_execute)(struct radeon_device *rdev, struct radeon_ib *ib); |
| 614 | int (*ib_test)(struct radeon_device *rdev); | ||
| 615 | int (*irq_set)(struct radeon_device *rdev); | 604 | int (*irq_set)(struct radeon_device *rdev); |
| 616 | int (*irq_process)(struct radeon_device *rdev); | 605 | int (*irq_process)(struct radeon_device *rdev); |
| 617 | u32 (*get_vblank_counter)(struct radeon_device *rdev, int crtc); | 606 | u32 (*get_vblank_counter)(struct radeon_device *rdev, int crtc); |
| @@ -789,7 +778,6 @@ struct radeon_device { | |||
| 789 | bool shutdown; | 778 | bool shutdown; |
| 790 | bool suspend; | 779 | bool suspend; |
| 791 | bool need_dma32; | 780 | bool need_dma32; |
| 792 | bool new_init_path; | ||
| 793 | bool accel_working; | 781 | bool accel_working; |
| 794 | struct radeon_surface_reg surface_regs[RADEON_GEM_MAX_SURFACES]; | 782 | struct radeon_surface_reg surface_regs[RADEON_GEM_MAX_SURFACES]; |
| 795 | const struct firmware *me_fw; /* all family ME firmware */ | 783 | const struct firmware *me_fw; /* all family ME firmware */ |
| @@ -949,28 +937,14 @@ static inline void radeon_ring_write(struct radeon_device *rdev, uint32_t v) | |||
| 949 | #define radeon_resume(rdev) (rdev)->asic->resume((rdev)) | 937 | #define radeon_resume(rdev) (rdev)->asic->resume((rdev)) |
| 950 | #define radeon_suspend(rdev) (rdev)->asic->suspend((rdev)) | 938 | #define radeon_suspend(rdev) (rdev)->asic->suspend((rdev)) |
| 951 | #define radeon_cs_parse(p) rdev->asic->cs_parse((p)) | 939 | #define radeon_cs_parse(p) rdev->asic->cs_parse((p)) |
| 952 | #define radeon_errata(rdev) (rdev)->asic->errata((rdev)) | ||
| 953 | #define radeon_vram_info(rdev) (rdev)->asic->vram_info((rdev)) | ||
| 954 | #define radeon_vga_set_state(rdev, state) (rdev)->asic->vga_set_state((rdev), (state)) | 940 | #define radeon_vga_set_state(rdev, state) (rdev)->asic->vga_set_state((rdev), (state)) |
| 955 | #define radeon_gpu_reset(rdev) (rdev)->asic->gpu_reset((rdev)) | 941 | #define radeon_gpu_reset(rdev) (rdev)->asic->gpu_reset((rdev)) |
| 956 | #define radeon_mc_init(rdev) (rdev)->asic->mc_init((rdev)) | ||
| 957 | #define radeon_mc_fini(rdev) (rdev)->asic->mc_fini((rdev)) | ||
| 958 | #define radeon_wb_init(rdev) (rdev)->asic->wb_init((rdev)) | ||
| 959 | #define radeon_wb_fini(rdev) (rdev)->asic->wb_fini((rdev)) | ||
| 960 | #define radeon_gpu_gart_init(rdev) (rdev)->asic->gart_init((rdev)) | ||
| 961 | #define radeon_gpu_gart_fini(rdev) (rdev)->asic->gart_fini((rdev)) | ||
| 962 | #define radeon_gart_enable(rdev) (rdev)->asic->gart_enable((rdev)) | ||
| 963 | #define radeon_gart_disable(rdev) (rdev)->asic->gart_disable((rdev)) | ||
| 964 | #define radeon_gart_tlb_flush(rdev) (rdev)->asic->gart_tlb_flush((rdev)) | 942 | #define radeon_gart_tlb_flush(rdev) (rdev)->asic->gart_tlb_flush((rdev)) |
| 965 | #define radeon_gart_set_page(rdev, i, p) (rdev)->asic->gart_set_page((rdev), (i), (p)) | 943 | #define radeon_gart_set_page(rdev, i, p) (rdev)->asic->gart_set_page((rdev), (i), (p)) |
| 966 | #define radeon_cp_init(rdev,rsize) (rdev)->asic->cp_init((rdev), (rsize)) | ||
| 967 | #define radeon_cp_fini(rdev) (rdev)->asic->cp_fini((rdev)) | ||
| 968 | #define radeon_cp_disable(rdev) (rdev)->asic->cp_disable((rdev)) | ||
| 969 | #define radeon_cp_commit(rdev) (rdev)->asic->cp_commit((rdev)) | 944 | #define radeon_cp_commit(rdev) (rdev)->asic->cp_commit((rdev)) |
| 970 | #define radeon_ring_start(rdev) (rdev)->asic->ring_start((rdev)) | 945 | #define radeon_ring_start(rdev) (rdev)->asic->ring_start((rdev)) |
| 971 | #define radeon_ring_test(rdev) (rdev)->asic->ring_test((rdev)) | 946 | #define radeon_ring_test(rdev) (rdev)->asic->ring_test((rdev)) |
| 972 | #define radeon_ring_ib_execute(rdev, ib) (rdev)->asic->ring_ib_execute((rdev), (ib)) | 947 | #define radeon_ring_ib_execute(rdev, ib) (rdev)->asic->ring_ib_execute((rdev), (ib)) |
| 973 | #define radeon_ib_test(rdev) (rdev)->asic->ib_test((rdev)) | ||
| 974 | #define radeon_irq_set(rdev) (rdev)->asic->irq_set((rdev)) | 948 | #define radeon_irq_set(rdev) (rdev)->asic->irq_set((rdev)) |
| 975 | #define radeon_irq_process(rdev) (rdev)->asic->irq_process((rdev)) | 949 | #define radeon_irq_process(rdev) (rdev)->asic->irq_process((rdev)) |
| 976 | #define radeon_get_vblank_counter(rdev, crtc) (rdev)->asic->get_vblank_counter((rdev), (crtc)) | 950 | #define radeon_get_vblank_counter(rdev, crtc) (rdev)->asic->get_vblank_counter((rdev), (crtc)) |
| @@ -996,6 +970,7 @@ extern void radeon_clocks_fini(struct radeon_device *rdev); | |||
| 996 | extern void radeon_scratch_init(struct radeon_device *rdev); | 970 | extern void radeon_scratch_init(struct radeon_device *rdev); |
| 997 | extern void radeon_surface_init(struct radeon_device *rdev); | 971 | extern void radeon_surface_init(struct radeon_device *rdev); |
| 998 | extern int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data); | 972 | extern int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data); |
| 973 | extern void radeon_legacy_set_clock_gating(struct radeon_device *rdev, int enable); | ||
| 999 | extern void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable); | 974 | extern void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable); |
| 1000 | 975 | ||
| 1001 | /* r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280 */ | 976 | /* r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280 */ |
| @@ -1031,11 +1006,27 @@ extern int r100_wb_init(struct radeon_device *rdev); | |||
| 1031 | extern void r100_hdp_reset(struct radeon_device *rdev); | 1006 | extern void r100_hdp_reset(struct radeon_device *rdev); |
| 1032 | extern int r100_rb2d_reset(struct radeon_device *rdev); | 1007 | extern int r100_rb2d_reset(struct radeon_device *rdev); |
| 1033 | extern int r100_cp_reset(struct radeon_device *rdev); | 1008 | extern int r100_cp_reset(struct radeon_device *rdev); |
| 1009 | extern void r100_vga_render_disable(struct radeon_device *rdev); | ||
| 1010 | extern int r100_cs_track_check_pkt3_indx_buffer(struct radeon_cs_parser *p, | ||
| 1011 | struct radeon_cs_packet *pkt, | ||
| 1012 | struct radeon_object *robj); | ||
| 1013 | extern int r100_cs_parse_packet0(struct radeon_cs_parser *p, | ||
| 1014 | struct radeon_cs_packet *pkt, | ||
| 1015 | const unsigned *auth, unsigned n, | ||
| 1016 | radeon_packet0_check_t check); | ||
| 1017 | extern int r100_cs_packet_parse(struct radeon_cs_parser *p, | ||
| 1018 | struct radeon_cs_packet *pkt, | ||
| 1019 | unsigned idx); | ||
| 1020 | |||
| 1021 | /* rv200,rv250,rv280 */ | ||
| 1022 | extern void r200_set_safe_registers(struct radeon_device *rdev); | ||
| 1034 | 1023 | ||
| 1035 | /* r300,r350,rv350,rv370,rv380 */ | 1024 | /* r300,r350,rv350,rv370,rv380 */ |
| 1036 | extern void r300_set_reg_safe(struct radeon_device *rdev); | 1025 | extern void r300_set_reg_safe(struct radeon_device *rdev); |
| 1037 | extern void r300_mc_program(struct radeon_device *rdev); | 1026 | extern void r300_mc_program(struct radeon_device *rdev); |
| 1038 | extern void r300_vram_info(struct radeon_device *rdev); | 1027 | extern void r300_vram_info(struct radeon_device *rdev); |
| 1028 | extern void r300_clock_startup(struct radeon_device *rdev); | ||
| 1029 | extern int r300_mc_wait_for_idle(struct radeon_device *rdev); | ||
| 1039 | extern int rv370_pcie_gart_init(struct radeon_device *rdev); | 1030 | extern int rv370_pcie_gart_init(struct radeon_device *rdev); |
| 1040 | extern void rv370_pcie_gart_fini(struct radeon_device *rdev); | 1031 | extern void rv370_pcie_gart_fini(struct radeon_device *rdev); |
| 1041 | extern int rv370_pcie_gart_enable(struct radeon_device *rdev); | 1032 | extern int rv370_pcie_gart_enable(struct radeon_device *rdev); |
| @@ -1066,6 +1057,18 @@ extern void rv515_clock_startup(struct radeon_device *rdev); | |||
| 1066 | extern void rv515_debugfs(struct radeon_device *rdev); | 1057 | extern void rv515_debugfs(struct radeon_device *rdev); |
| 1067 | extern int rv515_suspend(struct radeon_device *rdev); | 1058 | extern int rv515_suspend(struct radeon_device *rdev); |
| 1068 | 1059 | ||
| 1060 | /* rs400 */ | ||
| 1061 | extern int rs400_gart_init(struct radeon_device *rdev); | ||
| 1062 | extern int rs400_gart_enable(struct radeon_device *rdev); | ||
| 1063 | extern void rs400_gart_adjust_size(struct radeon_device *rdev); | ||
| 1064 | extern void rs400_gart_disable(struct radeon_device *rdev); | ||
| 1065 | extern void rs400_gart_fini(struct radeon_device *rdev); | ||
| 1066 | |||
| 1067 | /* rs600 */ | ||
| 1068 | extern void rs600_set_safe_registers(struct radeon_device *rdev); | ||
| 1069 | extern int rs600_irq_set(struct radeon_device *rdev); | ||
| 1070 | extern void rs600_irq_disable(struct radeon_device *rdev); | ||
| 1071 | |||
| 1069 | /* rs690, rs740 */ | 1072 | /* rs690, rs740 */ |
| 1070 | extern void rs690_line_buffer_adjust(struct radeon_device *rdev, | 1073 | extern void rs690_line_buffer_adjust(struct radeon_device *rdev, |
| 1071 | struct drm_display_mode *mode1, | 1074 | struct drm_display_mode *mode1, |
| @@ -1083,8 +1086,9 @@ extern int r600_pcie_gart_init(struct radeon_device *rdev); | |||
| 1083 | extern void r600_pcie_gart_tlb_flush(struct radeon_device *rdev); | 1086 | extern void r600_pcie_gart_tlb_flush(struct radeon_device *rdev); |
| 1084 | extern int r600_ib_test(struct radeon_device *rdev); | 1087 | extern int r600_ib_test(struct radeon_device *rdev); |
| 1085 | extern int r600_ring_test(struct radeon_device *rdev); | 1088 | extern int r600_ring_test(struct radeon_device *rdev); |
| 1086 | extern int r600_wb_init(struct radeon_device *rdev); | ||
| 1087 | extern void r600_wb_fini(struct radeon_device *rdev); | 1089 | extern void r600_wb_fini(struct radeon_device *rdev); |
| 1090 | extern int r600_wb_enable(struct radeon_device *rdev); | ||
| 1091 | extern void r600_wb_disable(struct radeon_device *rdev); | ||
| 1088 | extern void r600_scratch_init(struct radeon_device *rdev); | 1092 | extern void r600_scratch_init(struct radeon_device *rdev); |
| 1089 | extern int r600_blit_init(struct radeon_device *rdev); | 1093 | extern int r600_blit_init(struct radeon_device *rdev); |
| 1090 | extern void r600_blit_fini(struct radeon_device *rdev); | 1094 | extern void r600_blit_fini(struct radeon_device *rdev); |
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h index c8a4e7b5663d..c3532c7a6f3f 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.h +++ b/drivers/gpu/drm/radeon/radeon_asic.h | |||
| @@ -41,28 +41,17 @@ void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable); | |||
| 41 | /* | 41 | /* |
| 42 | * r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280 | 42 | * r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280 |
| 43 | */ | 43 | */ |
| 44 | int r100_init(struct radeon_device *rdev); | 44 | extern int r100_init(struct radeon_device *rdev); |
| 45 | int r200_init(struct radeon_device *rdev); | 45 | extern void r100_fini(struct radeon_device *rdev); |
| 46 | extern int r100_suspend(struct radeon_device *rdev); | ||
| 47 | extern int r100_resume(struct radeon_device *rdev); | ||
| 46 | uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg); | 48 | uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg); |
| 47 | void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); | 49 | void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); |
| 48 | void r100_errata(struct radeon_device *rdev); | ||
| 49 | void r100_vram_info(struct radeon_device *rdev); | ||
| 50 | void r100_vga_set_state(struct radeon_device *rdev, bool state); | 50 | void r100_vga_set_state(struct radeon_device *rdev, bool state); |
| 51 | int r100_gpu_reset(struct radeon_device *rdev); | 51 | int r100_gpu_reset(struct radeon_device *rdev); |
| 52 | int r100_mc_init(struct radeon_device *rdev); | ||
| 53 | void r100_mc_fini(struct radeon_device *rdev); | ||
| 54 | u32 r100_get_vblank_counter(struct radeon_device *rdev, int crtc); | 52 | u32 r100_get_vblank_counter(struct radeon_device *rdev, int crtc); |
| 55 | int r100_wb_init(struct radeon_device *rdev); | ||
| 56 | void r100_wb_fini(struct radeon_device *rdev); | ||
| 57 | int r100_pci_gart_init(struct radeon_device *rdev); | ||
| 58 | void r100_pci_gart_fini(struct radeon_device *rdev); | ||
| 59 | int r100_pci_gart_enable(struct radeon_device *rdev); | ||
| 60 | void r100_pci_gart_disable(struct radeon_device *rdev); | ||
| 61 | void r100_pci_gart_tlb_flush(struct radeon_device *rdev); | 53 | void r100_pci_gart_tlb_flush(struct radeon_device *rdev); |
| 62 | int r100_pci_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr); | 54 | int r100_pci_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr); |
| 63 | int r100_cp_init(struct radeon_device *rdev, unsigned ring_size); | ||
| 64 | void r100_cp_fini(struct radeon_device *rdev); | ||
| 65 | void r100_cp_disable(struct radeon_device *rdev); | ||
| 66 | void r100_cp_commit(struct radeon_device *rdev); | 55 | void r100_cp_commit(struct radeon_device *rdev); |
| 67 | void r100_ring_start(struct radeon_device *rdev); | 56 | void r100_ring_start(struct radeon_device *rdev); |
| 68 | int r100_irq_set(struct radeon_device *rdev); | 57 | int r100_irq_set(struct radeon_device *rdev); |
| @@ -83,33 +72,21 @@ int r100_set_surface_reg(struct radeon_device *rdev, int reg, | |||
| 83 | int r100_clear_surface_reg(struct radeon_device *rdev, int reg); | 72 | int r100_clear_surface_reg(struct radeon_device *rdev, int reg); |
| 84 | void r100_bandwidth_update(struct radeon_device *rdev); | 73 | void r100_bandwidth_update(struct radeon_device *rdev); |
| 85 | void r100_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib); | 74 | void r100_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib); |
| 86 | int r100_ib_test(struct radeon_device *rdev); | ||
| 87 | int r100_ring_test(struct radeon_device *rdev); | 75 | int r100_ring_test(struct radeon_device *rdev); |
| 88 | 76 | ||
| 89 | static struct radeon_asic r100_asic = { | 77 | static struct radeon_asic r100_asic = { |
| 90 | .init = &r100_init, | 78 | .init = &r100_init, |
| 91 | .errata = &r100_errata, | 79 | .fini = &r100_fini, |
| 92 | .vram_info = &r100_vram_info, | 80 | .suspend = &r100_suspend, |
| 81 | .resume = &r100_resume, | ||
| 93 | .vga_set_state = &r100_vga_set_state, | 82 | .vga_set_state = &r100_vga_set_state, |
| 94 | .gpu_reset = &r100_gpu_reset, | 83 | .gpu_reset = &r100_gpu_reset, |
| 95 | .mc_init = &r100_mc_init, | ||
| 96 | .mc_fini = &r100_mc_fini, | ||
| 97 | .wb_init = &r100_wb_init, | ||
| 98 | .wb_fini = &r100_wb_fini, | ||
| 99 | .gart_init = &r100_pci_gart_init, | ||
| 100 | .gart_fini = &r100_pci_gart_fini, | ||
| 101 | .gart_enable = &r100_pci_gart_enable, | ||
| 102 | .gart_disable = &r100_pci_gart_disable, | ||
| 103 | .gart_tlb_flush = &r100_pci_gart_tlb_flush, | 84 | .gart_tlb_flush = &r100_pci_gart_tlb_flush, |
| 104 | .gart_set_page = &r100_pci_gart_set_page, | 85 | .gart_set_page = &r100_pci_gart_set_page, |
| 105 | .cp_init = &r100_cp_init, | ||
| 106 | .cp_fini = &r100_cp_fini, | ||
| 107 | .cp_disable = &r100_cp_disable, | ||
| 108 | .cp_commit = &r100_cp_commit, | 86 | .cp_commit = &r100_cp_commit, |
| 109 | .ring_start = &r100_ring_start, | 87 | .ring_start = &r100_ring_start, |
| 110 | .ring_test = &r100_ring_test, | 88 | .ring_test = &r100_ring_test, |
| 111 | .ring_ib_execute = &r100_ring_ib_execute, | 89 | .ring_ib_execute = &r100_ring_ib_execute, |
| 112 | .ib_test = &r100_ib_test, | ||
| 113 | .irq_set = &r100_irq_set, | 90 | .irq_set = &r100_irq_set, |
| 114 | .irq_process = &r100_irq_process, | 91 | .irq_process = &r100_irq_process, |
| 115 | .get_vblank_counter = &r100_get_vblank_counter, | 92 | .get_vblank_counter = &r100_get_vblank_counter, |
| @@ -131,55 +108,38 @@ static struct radeon_asic r100_asic = { | |||
| 131 | /* | 108 | /* |
| 132 | * r300,r350,rv350,rv380 | 109 | * r300,r350,rv350,rv380 |
| 133 | */ | 110 | */ |
| 134 | int r300_init(struct radeon_device *rdev); | 111 | extern int r300_init(struct radeon_device *rdev); |
| 135 | void r300_errata(struct radeon_device *rdev); | 112 | extern void r300_fini(struct radeon_device *rdev); |
| 136 | void r300_vram_info(struct radeon_device *rdev); | 113 | extern int r300_suspend(struct radeon_device *rdev); |
| 137 | int r300_gpu_reset(struct radeon_device *rdev); | 114 | extern int r300_resume(struct radeon_device *rdev); |
| 138 | int r300_mc_init(struct radeon_device *rdev); | 115 | extern int r300_gpu_reset(struct radeon_device *rdev); |
| 139 | void r300_mc_fini(struct radeon_device *rdev); | 116 | extern void r300_ring_start(struct radeon_device *rdev); |
| 140 | void r300_ring_start(struct radeon_device *rdev); | 117 | extern void r300_fence_ring_emit(struct radeon_device *rdev, |
| 141 | void r300_fence_ring_emit(struct radeon_device *rdev, | 118 | struct radeon_fence *fence); |
| 142 | struct radeon_fence *fence); | 119 | extern int r300_cs_parse(struct radeon_cs_parser *p); |
| 143 | int r300_cs_parse(struct radeon_cs_parser *p); | 120 | extern void rv370_pcie_gart_tlb_flush(struct radeon_device *rdev); |
| 144 | int rv370_pcie_gart_init(struct radeon_device *rdev); | 121 | extern int rv370_pcie_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr); |
| 145 | void rv370_pcie_gart_fini(struct radeon_device *rdev); | 122 | extern uint32_t rv370_pcie_rreg(struct radeon_device *rdev, uint32_t reg); |
| 146 | int rv370_pcie_gart_enable(struct radeon_device *rdev); | 123 | extern void rv370_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); |
| 147 | void rv370_pcie_gart_disable(struct radeon_device *rdev); | 124 | extern void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes); |
| 148 | void rv370_pcie_gart_tlb_flush(struct radeon_device *rdev); | 125 | extern int r300_copy_dma(struct radeon_device *rdev, |
| 149 | int rv370_pcie_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr); | 126 | uint64_t src_offset, |
| 150 | uint32_t rv370_pcie_rreg(struct radeon_device *rdev, uint32_t reg); | 127 | uint64_t dst_offset, |
| 151 | void rv370_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); | 128 | unsigned num_pages, |
| 152 | void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes); | 129 | struct radeon_fence *fence); |
| 153 | int r300_copy_dma(struct radeon_device *rdev, | ||
| 154 | uint64_t src_offset, | ||
| 155 | uint64_t dst_offset, | ||
| 156 | unsigned num_pages, | ||
| 157 | struct radeon_fence *fence); | ||
| 158 | |||
| 159 | static struct radeon_asic r300_asic = { | 130 | static struct radeon_asic r300_asic = { |
| 160 | .init = &r300_init, | 131 | .init = &r300_init, |
| 161 | .errata = &r300_errata, | 132 | .fini = &r300_fini, |
| 162 | .vram_info = &r300_vram_info, | 133 | .suspend = &r300_suspend, |
| 134 | .resume = &r300_resume, | ||
| 163 | .vga_set_state = &r100_vga_set_state, | 135 | .vga_set_state = &r100_vga_set_state, |
| 164 | .gpu_reset = &r300_gpu_reset, | 136 | .gpu_reset = &r300_gpu_reset, |
| 165 | .mc_init = &r300_mc_init, | ||
| 166 | .mc_fini = &r300_mc_fini, | ||
| 167 | .wb_init = &r100_wb_init, | ||
| 168 | .wb_fini = &r100_wb_fini, | ||
| 169 | .gart_init = &r100_pci_gart_init, | ||
| 170 | .gart_fini = &r100_pci_gart_fini, | ||
| 171 | .gart_enable = &r100_pci_gart_enable, | ||
| 172 | .gart_disable = &r100_pci_gart_disable, | ||
| 173 | .gart_tlb_flush = &r100_pci_gart_tlb_flush, | 137 | .gart_tlb_flush = &r100_pci_gart_tlb_flush, |
| 174 | .gart_set_page = &r100_pci_gart_set_page, | 138 | .gart_set_page = &r100_pci_gart_set_page, |
| 175 | .cp_init = &r100_cp_init, | ||
| 176 | .cp_fini = &r100_cp_fini, | ||
| 177 | .cp_disable = &r100_cp_disable, | ||
| 178 | .cp_commit = &r100_cp_commit, | 139 | .cp_commit = &r100_cp_commit, |
| 179 | .ring_start = &r300_ring_start, | 140 | .ring_start = &r300_ring_start, |
| 180 | .ring_test = &r100_ring_test, | 141 | .ring_test = &r100_ring_test, |
| 181 | .ring_ib_execute = &r100_ring_ib_execute, | 142 | .ring_ib_execute = &r100_ring_ib_execute, |
| 182 | .ib_test = &r100_ib_test, | ||
| 183 | .irq_set = &r100_irq_set, | 143 | .irq_set = &r100_irq_set, |
| 184 | .irq_process = &r100_irq_process, | 144 | .irq_process = &r100_irq_process, |
| 185 | .get_vblank_counter = &r100_get_vblank_counter, | 145 | .get_vblank_counter = &r100_get_vblank_counter, |
| @@ -209,26 +169,14 @@ static struct radeon_asic r420_asic = { | |||
| 209 | .fini = &r420_fini, | 169 | .fini = &r420_fini, |
| 210 | .suspend = &r420_suspend, | 170 | .suspend = &r420_suspend, |
| 211 | .resume = &r420_resume, | 171 | .resume = &r420_resume, |
| 212 | .errata = NULL, | ||
| 213 | .vram_info = NULL, | ||
| 214 | .vga_set_state = &r100_vga_set_state, | 172 | .vga_set_state = &r100_vga_set_state, |
| 215 | .gpu_reset = &r300_gpu_reset, | 173 | .gpu_reset = &r300_gpu_reset, |
| 216 | .mc_init = NULL, | ||
| 217 | .mc_fini = NULL, | ||
| 218 | .wb_init = NULL, | ||
| 219 | .wb_fini = NULL, | ||
| 220 | .gart_enable = NULL, | ||
| 221 | .gart_disable = NULL, | ||
| 222 | .gart_tlb_flush = &rv370_pcie_gart_tlb_flush, | 174 | .gart_tlb_flush = &rv370_pcie_gart_tlb_flush, |
| 223 | .gart_set_page = &rv370_pcie_gart_set_page, | 175 | .gart_set_page = &rv370_pcie_gart_set_page, |
| 224 | .cp_init = NULL, | ||
| 225 | .cp_fini = NULL, | ||
| 226 | .cp_disable = NULL, | ||
| 227 | .cp_commit = &r100_cp_commit, | 176 | .cp_commit = &r100_cp_commit, |
| 228 | .ring_start = &r300_ring_start, | 177 | .ring_start = &r300_ring_start, |
| 229 | .ring_test = &r100_ring_test, | 178 | .ring_test = &r100_ring_test, |
| 230 | .ring_ib_execute = &r100_ring_ib_execute, | 179 | .ring_ib_execute = &r100_ring_ib_execute, |
| 231 | .ib_test = NULL, | ||
| 232 | .irq_set = &r100_irq_set, | 180 | .irq_set = &r100_irq_set, |
| 233 | .irq_process = &r100_irq_process, | 181 | .irq_process = &r100_irq_process, |
| 234 | .get_vblank_counter = &r100_get_vblank_counter, | 182 | .get_vblank_counter = &r100_get_vblank_counter, |
| @@ -250,42 +198,27 @@ static struct radeon_asic r420_asic = { | |||
| 250 | /* | 198 | /* |
| 251 | * rs400,rs480 | 199 | * rs400,rs480 |
| 252 | */ | 200 | */ |
| 253 | void rs400_errata(struct radeon_device *rdev); | 201 | extern int rs400_init(struct radeon_device *rdev); |
| 254 | void rs400_vram_info(struct radeon_device *rdev); | 202 | extern void rs400_fini(struct radeon_device *rdev); |
| 255 | int rs400_mc_init(struct radeon_device *rdev); | 203 | extern int rs400_suspend(struct radeon_device *rdev); |
| 256 | void rs400_mc_fini(struct radeon_device *rdev); | 204 | extern int rs400_resume(struct radeon_device *rdev); |
| 257 | int rs400_gart_init(struct radeon_device *rdev); | ||
| 258 | void rs400_gart_fini(struct radeon_device *rdev); | ||
| 259 | int rs400_gart_enable(struct radeon_device *rdev); | ||
| 260 | void rs400_gart_disable(struct radeon_device *rdev); | ||
| 261 | void rs400_gart_tlb_flush(struct radeon_device *rdev); | 205 | void rs400_gart_tlb_flush(struct radeon_device *rdev); |
| 262 | int rs400_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr); | 206 | int rs400_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr); |
| 263 | uint32_t rs400_mc_rreg(struct radeon_device *rdev, uint32_t reg); | 207 | uint32_t rs400_mc_rreg(struct radeon_device *rdev, uint32_t reg); |
| 264 | void rs400_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); | 208 | void rs400_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); |
| 265 | static struct radeon_asic rs400_asic = { | 209 | static struct radeon_asic rs400_asic = { |
| 266 | .init = &r300_init, | 210 | .init = &rs400_init, |
| 267 | .errata = &rs400_errata, | 211 | .fini = &rs400_fini, |
| 268 | .vram_info = &rs400_vram_info, | 212 | .suspend = &rs400_suspend, |
| 213 | .resume = &rs400_resume, | ||
| 269 | .vga_set_state = &r100_vga_set_state, | 214 | .vga_set_state = &r100_vga_set_state, |
| 270 | .gpu_reset = &r300_gpu_reset, | 215 | .gpu_reset = &r300_gpu_reset, |
| 271 | .mc_init = &rs400_mc_init, | ||
| 272 | .mc_fini = &rs400_mc_fini, | ||
| 273 | .wb_init = &r100_wb_init, | ||
| 274 | .wb_fini = &r100_wb_fini, | ||
| 275 | .gart_init = &rs400_gart_init, | ||
| 276 | .gart_fini = &rs400_gart_fini, | ||
| 277 | .gart_enable = &rs400_gart_enable, | ||
| 278 | .gart_disable = &rs400_gart_disable, | ||
| 279 | .gart_tlb_flush = &rs400_gart_tlb_flush, | 216 | .gart_tlb_flush = &rs400_gart_tlb_flush, |
| 280 | .gart_set_page = &rs400_gart_set_page, | 217 | .gart_set_page = &rs400_gart_set_page, |
| 281 | .cp_init = &r100_cp_init, | ||
| 282 | .cp_fini = &r100_cp_fini, | ||
| 283 | .cp_disable = &r100_cp_disable, | ||
| 284 | .cp_commit = &r100_cp_commit, | 218 | .cp_commit = &r100_cp_commit, |
| 285 | .ring_start = &r300_ring_start, | 219 | .ring_start = &r300_ring_start, |
| 286 | .ring_test = &r100_ring_test, | 220 | .ring_test = &r100_ring_test, |
| 287 | .ring_ib_execute = &r100_ring_ib_execute, | 221 | .ring_ib_execute = &r100_ring_ib_execute, |
| 288 | .ib_test = &r100_ib_test, | ||
| 289 | .irq_set = &r100_irq_set, | 222 | .irq_set = &r100_irq_set, |
| 290 | .irq_process = &r100_irq_process, | 223 | .irq_process = &r100_irq_process, |
| 291 | .get_vblank_counter = &r100_get_vblank_counter, | 224 | .get_vblank_counter = &r100_get_vblank_counter, |
| @@ -307,18 +240,13 @@ static struct radeon_asic rs400_asic = { | |||
| 307 | /* | 240 | /* |
| 308 | * rs600. | 241 | * rs600. |
| 309 | */ | 242 | */ |
| 310 | int rs600_init(struct radeon_device *rdev); | 243 | extern int rs600_init(struct radeon_device *rdev); |
| 311 | void rs600_errata(struct radeon_device *rdev); | 244 | extern void rs600_fini(struct radeon_device *rdev); |
| 312 | void rs600_vram_info(struct radeon_device *rdev); | 245 | extern int rs600_suspend(struct radeon_device *rdev); |
| 313 | int rs600_mc_init(struct radeon_device *rdev); | 246 | extern int rs600_resume(struct radeon_device *rdev); |
| 314 | void rs600_mc_fini(struct radeon_device *rdev); | ||
| 315 | int rs600_irq_set(struct radeon_device *rdev); | 247 | int rs600_irq_set(struct radeon_device *rdev); |
| 316 | int rs600_irq_process(struct radeon_device *rdev); | 248 | int rs600_irq_process(struct radeon_device *rdev); |
| 317 | u32 rs600_get_vblank_counter(struct radeon_device *rdev, int crtc); | 249 | u32 rs600_get_vblank_counter(struct radeon_device *rdev, int crtc); |
| 318 | int rs600_gart_init(struct radeon_device *rdev); | ||
| 319 | void rs600_gart_fini(struct radeon_device *rdev); | ||
| 320 | int rs600_gart_enable(struct radeon_device *rdev); | ||
| 321 | void rs600_gart_disable(struct radeon_device *rdev); | ||
| 322 | void rs600_gart_tlb_flush(struct radeon_device *rdev); | 250 | void rs600_gart_tlb_flush(struct radeon_device *rdev); |
| 323 | int rs600_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr); | 251 | int rs600_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr); |
| 324 | uint32_t rs600_mc_rreg(struct radeon_device *rdev, uint32_t reg); | 252 | uint32_t rs600_mc_rreg(struct radeon_device *rdev, uint32_t reg); |
| @@ -326,28 +254,17 @@ void rs600_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); | |||
| 326 | void rs600_bandwidth_update(struct radeon_device *rdev); | 254 | void rs600_bandwidth_update(struct radeon_device *rdev); |
| 327 | static struct radeon_asic rs600_asic = { | 255 | static struct radeon_asic rs600_asic = { |
| 328 | .init = &rs600_init, | 256 | .init = &rs600_init, |
| 329 | .errata = &rs600_errata, | 257 | .fini = &rs600_fini, |
| 330 | .vram_info = &rs600_vram_info, | 258 | .suspend = &rs600_suspend, |
| 259 | .resume = &rs600_resume, | ||
| 331 | .vga_set_state = &r100_vga_set_state, | 260 | .vga_set_state = &r100_vga_set_state, |
| 332 | .gpu_reset = &r300_gpu_reset, | 261 | .gpu_reset = &r300_gpu_reset, |
| 333 | .mc_init = &rs600_mc_init, | ||
| 334 | .mc_fini = &rs600_mc_fini, | ||
| 335 | .wb_init = &r100_wb_init, | ||
| 336 | .wb_fini = &r100_wb_fini, | ||
| 337 | .gart_init = &rs600_gart_init, | ||
| 338 | .gart_fini = &rs600_gart_fini, | ||
| 339 | .gart_enable = &rs600_gart_enable, | ||
| 340 | .gart_disable = &rs600_gart_disable, | ||
| 341 | .gart_tlb_flush = &rs600_gart_tlb_flush, | 262 | .gart_tlb_flush = &rs600_gart_tlb_flush, |
| 342 | .gart_set_page = &rs600_gart_set_page, | 263 | .gart_set_page = &rs600_gart_set_page, |
| 343 | .cp_init = &r100_cp_init, | ||
| 344 | .cp_fini = &r100_cp_fini, | ||
| 345 | .cp_disable = &r100_cp_disable, | ||
| 346 | .cp_commit = &r100_cp_commit, | 264 | .cp_commit = &r100_cp_commit, |
| 347 | .ring_start = &r300_ring_start, | 265 | .ring_start = &r300_ring_start, |
| 348 | .ring_test = &r100_ring_test, | 266 | .ring_test = &r100_ring_test, |
| 349 | .ring_ib_execute = &r100_ring_ib_execute, | 267 | .ring_ib_execute = &r100_ring_ib_execute, |
| 350 | .ib_test = &r100_ib_test, | ||
| 351 | .irq_set = &rs600_irq_set, | 268 | .irq_set = &rs600_irq_set, |
| 352 | .irq_process = &rs600_irq_process, | 269 | .irq_process = &rs600_irq_process, |
| 353 | .get_vblank_counter = &rs600_get_vblank_counter, | 270 | .get_vblank_counter = &rs600_get_vblank_counter, |
| @@ -367,37 +284,26 @@ static struct radeon_asic rs600_asic = { | |||
| 367 | /* | 284 | /* |
| 368 | * rs690,rs740 | 285 | * rs690,rs740 |
| 369 | */ | 286 | */ |
| 370 | void rs690_errata(struct radeon_device *rdev); | 287 | int rs690_init(struct radeon_device *rdev); |
| 371 | void rs690_vram_info(struct radeon_device *rdev); | 288 | void rs690_fini(struct radeon_device *rdev); |
| 372 | int rs690_mc_init(struct radeon_device *rdev); | 289 | int rs690_resume(struct radeon_device *rdev); |
| 373 | void rs690_mc_fini(struct radeon_device *rdev); | 290 | int rs690_suspend(struct radeon_device *rdev); |
| 374 | uint32_t rs690_mc_rreg(struct radeon_device *rdev, uint32_t reg); | 291 | uint32_t rs690_mc_rreg(struct radeon_device *rdev, uint32_t reg); |
| 375 | void rs690_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); | 292 | void rs690_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); |
| 376 | void rs690_bandwidth_update(struct radeon_device *rdev); | 293 | void rs690_bandwidth_update(struct radeon_device *rdev); |
| 377 | static struct radeon_asic rs690_asic = { | 294 | static struct radeon_asic rs690_asic = { |
| 378 | .init = &rs600_init, | 295 | .init = &rs690_init, |
| 379 | .errata = &rs690_errata, | 296 | .fini = &rs690_fini, |
| 380 | .vram_info = &rs690_vram_info, | 297 | .suspend = &rs690_suspend, |
| 298 | .resume = &rs690_resume, | ||
| 381 | .vga_set_state = &r100_vga_set_state, | 299 | .vga_set_state = &r100_vga_set_state, |
| 382 | .gpu_reset = &r300_gpu_reset, | 300 | .gpu_reset = &r300_gpu_reset, |
| 383 | .mc_init = &rs690_mc_init, | ||
| 384 | .mc_fini = &rs690_mc_fini, | ||
| 385 | .wb_init = &r100_wb_init, | ||
| 386 | .wb_fini = &r100_wb_fini, | ||
| 387 | .gart_init = &rs400_gart_init, | ||
| 388 | .gart_fini = &rs400_gart_fini, | ||
| 389 | .gart_enable = &rs400_gart_enable, | ||
| 390 | .gart_disable = &rs400_gart_disable, | ||
| 391 | .gart_tlb_flush = &rs400_gart_tlb_flush, | 301 | .gart_tlb_flush = &rs400_gart_tlb_flush, |
| 392 | .gart_set_page = &rs400_gart_set_page, | 302 | .gart_set_page = &rs400_gart_set_page, |
| 393 | .cp_init = &r100_cp_init, | ||
| 394 | .cp_fini = &r100_cp_fini, | ||
| 395 | .cp_disable = &r100_cp_disable, | ||
| 396 | .cp_commit = &r100_cp_commit, | 303 | .cp_commit = &r100_cp_commit, |
| 397 | .ring_start = &r300_ring_start, | 304 | .ring_start = &r300_ring_start, |
| 398 | .ring_test = &r100_ring_test, | 305 | .ring_test = &r100_ring_test, |
| 399 | .ring_ib_execute = &r100_ring_ib_execute, | 306 | .ring_ib_execute = &r100_ring_ib_execute, |
| 400 | .ib_test = &r100_ib_test, | ||
| 401 | .irq_set = &rs600_irq_set, | 307 | .irq_set = &rs600_irq_set, |
| 402 | .irq_process = &rs600_irq_process, | 308 | .irq_process = &rs600_irq_process, |
| 403 | .get_vblank_counter = &rs600_get_vblank_counter, | 309 | .get_vblank_counter = &rs600_get_vblank_counter, |
| @@ -435,28 +341,14 @@ static struct radeon_asic rv515_asic = { | |||
| 435 | .fini = &rv515_fini, | 341 | .fini = &rv515_fini, |
| 436 | .suspend = &rv515_suspend, | 342 | .suspend = &rv515_suspend, |
| 437 | .resume = &rv515_resume, | 343 | .resume = &rv515_resume, |
| 438 | .errata = NULL, | ||
| 439 | .vram_info = NULL, | ||
| 440 | .vga_set_state = &r100_vga_set_state, | 344 | .vga_set_state = &r100_vga_set_state, |
| 441 | .gpu_reset = &rv515_gpu_reset, | 345 | .gpu_reset = &rv515_gpu_reset, |
| 442 | .mc_init = NULL, | ||
| 443 | .mc_fini = NULL, | ||
| 444 | .wb_init = NULL, | ||
| 445 | .wb_fini = NULL, | ||
| 446 | .gart_init = &rv370_pcie_gart_init, | ||
| 447 | .gart_fini = &rv370_pcie_gart_fini, | ||
| 448 | .gart_enable = NULL, | ||
| 449 | .gart_disable = NULL, | ||
| 450 | .gart_tlb_flush = &rv370_pcie_gart_tlb_flush, | 346 | .gart_tlb_flush = &rv370_pcie_gart_tlb_flush, |
| 451 | .gart_set_page = &rv370_pcie_gart_set_page, | 347 | .gart_set_page = &rv370_pcie_gart_set_page, |
| 452 | .cp_init = NULL, | ||
| 453 | .cp_fini = NULL, | ||
| 454 | .cp_disable = NULL, | ||
| 455 | .cp_commit = &r100_cp_commit, | 348 | .cp_commit = &r100_cp_commit, |
| 456 | .ring_start = &rv515_ring_start, | 349 | .ring_start = &rv515_ring_start, |
| 457 | .ring_test = &r100_ring_test, | 350 | .ring_test = &r100_ring_test, |
| 458 | .ring_ib_execute = &r100_ring_ib_execute, | 351 | .ring_ib_execute = &r100_ring_ib_execute, |
| 459 | .ib_test = NULL, | ||
| 460 | .irq_set = &rs600_irq_set, | 352 | .irq_set = &rs600_irq_set, |
| 461 | .irq_process = &rs600_irq_process, | 353 | .irq_process = &rs600_irq_process, |
| 462 | .get_vblank_counter = &rs600_get_vblank_counter, | 354 | .get_vblank_counter = &rs600_get_vblank_counter, |
| @@ -485,28 +377,14 @@ static struct radeon_asic r520_asic = { | |||
| 485 | .fini = &rv515_fini, | 377 | .fini = &rv515_fini, |
| 486 | .suspend = &rv515_suspend, | 378 | .suspend = &rv515_suspend, |
| 487 | .resume = &r520_resume, | 379 | .resume = &r520_resume, |
| 488 | .errata = NULL, | ||
| 489 | .vram_info = NULL, | ||
| 490 | .vga_set_state = &r100_vga_set_state, | 380 | .vga_set_state = &r100_vga_set_state, |
| 491 | .gpu_reset = &rv515_gpu_reset, | 381 | .gpu_reset = &rv515_gpu_reset, |
| 492 | .mc_init = NULL, | ||
| 493 | .mc_fini = NULL, | ||
| 494 | .wb_init = NULL, | ||
| 495 | .wb_fini = NULL, | ||
| 496 | .gart_init = NULL, | ||
| 497 | .gart_fini = NULL, | ||
| 498 | .gart_enable = NULL, | ||
| 499 | .gart_disable = NULL, | ||
| 500 | .gart_tlb_flush = &rv370_pcie_gart_tlb_flush, | 382 | .gart_tlb_flush = &rv370_pcie_gart_tlb_flush, |
| 501 | .gart_set_page = &rv370_pcie_gart_set_page, | 383 | .gart_set_page = &rv370_pcie_gart_set_page, |
| 502 | .cp_init = NULL, | ||
| 503 | .cp_fini = NULL, | ||
| 504 | .cp_disable = NULL, | ||
| 505 | .cp_commit = &r100_cp_commit, | 384 | .cp_commit = &r100_cp_commit, |
| 506 | .ring_start = &rv515_ring_start, | 385 | .ring_start = &rv515_ring_start, |
| 507 | .ring_test = &r100_ring_test, | 386 | .ring_test = &r100_ring_test, |
| 508 | .ring_ib_execute = &r100_ring_ib_execute, | 387 | .ring_ib_execute = &r100_ring_ib_execute, |
| 509 | .ib_test = NULL, | ||
| 510 | .irq_set = &rs600_irq_set, | 388 | .irq_set = &rs600_irq_set, |
| 511 | .irq_process = &rs600_irq_process, | 389 | .irq_process = &rs600_irq_process, |
| 512 | .get_vblank_counter = &rs600_get_vblank_counter, | 390 | .get_vblank_counter = &rs600_get_vblank_counter, |
| @@ -554,37 +432,23 @@ int r600_set_surface_reg(struct radeon_device *rdev, int reg, | |||
| 554 | uint32_t offset, uint32_t obj_size); | 432 | uint32_t offset, uint32_t obj_size); |
| 555 | int r600_clear_surface_reg(struct radeon_device *rdev, int reg); | 433 | int r600_clear_surface_reg(struct radeon_device *rdev, int reg); |
| 556 | void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib); | 434 | void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib); |
| 557 | int r600_ib_test(struct radeon_device *rdev); | ||
| 558 | int r600_ring_test(struct radeon_device *rdev); | 435 | int r600_ring_test(struct radeon_device *rdev); |
| 559 | int r600_copy_blit(struct radeon_device *rdev, | 436 | int r600_copy_blit(struct radeon_device *rdev, |
| 560 | uint64_t src_offset, uint64_t dst_offset, | 437 | uint64_t src_offset, uint64_t dst_offset, |
| 561 | unsigned num_pages, struct radeon_fence *fence); | 438 | unsigned num_pages, struct radeon_fence *fence); |
| 562 | 439 | ||
| 563 | static struct radeon_asic r600_asic = { | 440 | static struct radeon_asic r600_asic = { |
| 564 | .errata = NULL, | ||
| 565 | .init = &r600_init, | 441 | .init = &r600_init, |
| 566 | .fini = &r600_fini, | 442 | .fini = &r600_fini, |
| 567 | .suspend = &r600_suspend, | 443 | .suspend = &r600_suspend, |
| 568 | .resume = &r600_resume, | 444 | .resume = &r600_resume, |
| 569 | .cp_commit = &r600_cp_commit, | 445 | .cp_commit = &r600_cp_commit, |
| 570 | .vram_info = NULL, | ||
| 571 | .vga_set_state = &r600_vga_set_state, | 446 | .vga_set_state = &r600_vga_set_state, |
| 572 | .gpu_reset = &r600_gpu_reset, | 447 | .gpu_reset = &r600_gpu_reset, |
| 573 | .mc_init = NULL, | ||
| 574 | .mc_fini = NULL, | ||
| 575 | .wb_init = &r600_wb_init, | ||
| 576 | .wb_fini = &r600_wb_fini, | ||
| 577 | .gart_enable = NULL, | ||
| 578 | .gart_disable = NULL, | ||
| 579 | .gart_tlb_flush = &r600_pcie_gart_tlb_flush, | 448 | .gart_tlb_flush = &r600_pcie_gart_tlb_flush, |
| 580 | .gart_set_page = &rs600_gart_set_page, | 449 | .gart_set_page = &rs600_gart_set_page, |
| 581 | .cp_init = NULL, | ||
| 582 | .cp_fini = NULL, | ||
| 583 | .cp_disable = NULL, | ||
| 584 | .ring_start = NULL, | ||
| 585 | .ring_test = &r600_ring_test, | 450 | .ring_test = &r600_ring_test, |
| 586 | .ring_ib_execute = &r600_ring_ib_execute, | 451 | .ring_ib_execute = &r600_ring_ib_execute, |
| 587 | .ib_test = &r600_ib_test, | ||
| 588 | .irq_set = &r600_irq_set, | 452 | .irq_set = &r600_irq_set, |
| 589 | .irq_process = &r600_irq_process, | 453 | .irq_process = &r600_irq_process, |
| 590 | .fence_ring_emit = &r600_fence_ring_emit, | 454 | .fence_ring_emit = &r600_fence_ring_emit, |
| @@ -611,30 +475,17 @@ int rv770_resume(struct radeon_device *rdev); | |||
| 611 | int rv770_gpu_reset(struct radeon_device *rdev); | 475 | int rv770_gpu_reset(struct radeon_device *rdev); |
| 612 | 476 | ||
| 613 | static struct radeon_asic rv770_asic = { | 477 | static struct radeon_asic rv770_asic = { |
| 614 | .errata = NULL, | ||
| 615 | .init = &rv770_init, | 478 | .init = &rv770_init, |
| 616 | .fini = &rv770_fini, | 479 | .fini = &rv770_fini, |
| 617 | .suspend = &rv770_suspend, | 480 | .suspend = &rv770_suspend, |
| 618 | .resume = &rv770_resume, | 481 | .resume = &rv770_resume, |
| 619 | .cp_commit = &r600_cp_commit, | 482 | .cp_commit = &r600_cp_commit, |
| 620 | .vram_info = NULL, | ||
| 621 | .gpu_reset = &rv770_gpu_reset, | 483 | .gpu_reset = &rv770_gpu_reset, |
| 622 | .vga_set_state = &r600_vga_set_state, | 484 | .vga_set_state = &r600_vga_set_state, |
| 623 | .mc_init = NULL, | ||
| 624 | .mc_fini = NULL, | ||
| 625 | .wb_init = &r600_wb_init, | ||
| 626 | .wb_fini = &r600_wb_fini, | ||
| 627 | .gart_enable = NULL, | ||
| 628 | .gart_disable = NULL, | ||
| 629 | .gart_tlb_flush = &r600_pcie_gart_tlb_flush, | 485 | .gart_tlb_flush = &r600_pcie_gart_tlb_flush, |
| 630 | .gart_set_page = &rs600_gart_set_page, | 486 | .gart_set_page = &rs600_gart_set_page, |
| 631 | .cp_init = NULL, | ||
| 632 | .cp_fini = NULL, | ||
| 633 | .cp_disable = NULL, | ||
| 634 | .ring_start = NULL, | ||
| 635 | .ring_test = &r600_ring_test, | 487 | .ring_test = &r600_ring_test, |
| 636 | .ring_ib_execute = &r600_ring_ib_execute, | 488 | .ring_ib_execute = &r600_ring_ib_execute, |
| 637 | .ib_test = &r600_ib_test, | ||
| 638 | .irq_set = &r600_irq_set, | 489 | .irq_set = &r600_irq_set, |
| 639 | .irq_process = &r600_irq_process, | 490 | .irq_process = &r600_irq_process, |
| 640 | .fence_ring_emit = &r600_fence_ring_emit, | 491 | .fence_ring_emit = &r600_fence_ring_emit, |
diff --git a/drivers/gpu/drm/radeon/radeon_bios.c b/drivers/gpu/drm/radeon/radeon_bios.c index 96e37a6e7ce4..34a9b9119518 100644 --- a/drivers/gpu/drm/radeon/radeon_bios.c +++ b/drivers/gpu/drm/radeon/radeon_bios.c | |||
| @@ -33,12 +33,50 @@ | |||
| 33 | /* | 33 | /* |
| 34 | * BIOS. | 34 | * BIOS. |
| 35 | */ | 35 | */ |
| 36 | |||
| 37 | /* If you boot an IGP board with a discrete card as the primary, | ||
| 38 | * the IGP rom is not accessible via the rom bar as the IGP rom is | ||
| 39 | * part of the system bios. On boot, the system bios puts a | ||
| 40 | * copy of the igp rom at the start of vram if a discrete card is | ||
| 41 | * present. | ||
| 42 | */ | ||
| 43 | static bool igp_read_bios_from_vram(struct radeon_device *rdev) | ||
| 44 | { | ||
| 45 | uint8_t __iomem *bios; | ||
| 46 | resource_size_t vram_base; | ||
| 47 | resource_size_t size = 256 * 1024; /* ??? */ | ||
| 48 | |||
| 49 | rdev->bios = NULL; | ||
| 50 | vram_base = drm_get_resource_start(rdev->ddev, 0); | ||
| 51 | bios = ioremap(vram_base, size); | ||
| 52 | if (!bios) { | ||
| 53 | DRM_ERROR("Unable to mmap vram\n"); | ||
| 54 | return false; | ||
| 55 | } | ||
| 56 | |||
| 57 | if (size == 0 || bios[0] != 0x55 || bios[1] != 0xaa) { | ||
| 58 | iounmap(bios); | ||
| 59 | DRM_ERROR("bad rom signature\n"); | ||
| 60 | return false; | ||
| 61 | } | ||
| 62 | rdev->bios = kmalloc(size, GFP_KERNEL); | ||
| 63 | if (rdev->bios == NULL) { | ||
| 64 | iounmap(bios); | ||
| 65 | DRM_ERROR("kmalloc failed\n"); | ||
| 66 | return false; | ||
| 67 | } | ||
| 68 | memcpy(rdev->bios, bios, size); | ||
| 69 | iounmap(bios); | ||
| 70 | return true; | ||
| 71 | } | ||
| 72 | |||
| 36 | static bool radeon_read_bios(struct radeon_device *rdev) | 73 | static bool radeon_read_bios(struct radeon_device *rdev) |
| 37 | { | 74 | { |
| 38 | uint8_t __iomem *bios; | 75 | uint8_t __iomem *bios; |
| 39 | size_t size; | 76 | size_t size; |
| 40 | 77 | ||
| 41 | rdev->bios = NULL; | 78 | rdev->bios = NULL; |
| 79 | /* XXX: some cards may return 0 for rom size? ddx has a workaround */ | ||
| 42 | bios = pci_map_rom(rdev->pdev, &size); | 80 | bios = pci_map_rom(rdev->pdev, &size); |
| 43 | if (!bios) { | 81 | if (!bios) { |
| 44 | return false; | 82 | return false; |
| @@ -341,7 +379,9 @@ static bool legacy_read_disabled_bios(struct radeon_device *rdev) | |||
| 341 | 379 | ||
| 342 | static bool radeon_read_disabled_bios(struct radeon_device *rdev) | 380 | static bool radeon_read_disabled_bios(struct radeon_device *rdev) |
| 343 | { | 381 | { |
| 344 | if (rdev->family >= CHIP_RV770) | 382 | if (rdev->flags & RADEON_IS_IGP) |
| 383 | return igp_read_bios_from_vram(rdev); | ||
| 384 | else if (rdev->family >= CHIP_RV770) | ||
| 345 | return r700_read_disabled_bios(rdev); | 385 | return r700_read_disabled_bios(rdev); |
| 346 | else if (rdev->family >= CHIP_R600) | 386 | else if (rdev->family >= CHIP_R600) |
| 347 | return r600_read_disabled_bios(rdev); | 387 | return r600_read_disabled_bios(rdev); |
| @@ -356,7 +396,12 @@ bool radeon_get_bios(struct radeon_device *rdev) | |||
| 356 | bool r; | 396 | bool r; |
| 357 | uint16_t tmp; | 397 | uint16_t tmp; |
| 358 | 398 | ||
| 359 | r = radeon_read_bios(rdev); | 399 | if (rdev->flags & RADEON_IS_IGP) { |
| 400 | r = igp_read_bios_from_vram(rdev); | ||
| 401 | if (r == false) | ||
| 402 | r = radeon_read_bios(rdev); | ||
| 403 | } else | ||
| 404 | r = radeon_read_bios(rdev); | ||
| 360 | if (r == false) { | 405 | if (r == false) { |
| 361 | r = radeon_read_disabled_bios(rdev); | 406 | r = radeon_read_disabled_bios(rdev); |
| 362 | } | 407 | } |
diff --git a/drivers/gpu/drm/radeon/radeon_clocks.c b/drivers/gpu/drm/radeon/radeon_clocks.c index 152eef13197a..f5c32a766b10 100644 --- a/drivers/gpu/drm/radeon/radeon_clocks.c +++ b/drivers/gpu/drm/radeon/radeon_clocks.c | |||
| @@ -411,7 +411,7 @@ void radeon_legacy_set_clock_gating(struct radeon_device *rdev, int enable) | |||
| 411 | R300_PIXCLK_TRANS_ALWAYS_ONb | | 411 | R300_PIXCLK_TRANS_ALWAYS_ONb | |
| 412 | R300_PIXCLK_TVO_ALWAYS_ONb | | 412 | R300_PIXCLK_TVO_ALWAYS_ONb | |
| 413 | R300_P2G2CLK_ALWAYS_ONb | | 413 | R300_P2G2CLK_ALWAYS_ONb | |
| 414 | R300_P2G2CLK_ALWAYS_ONb); | 414 | R300_P2G2CLK_DAC_ALWAYS_ONb); |
| 415 | WREG32_PLL(RADEON_PIXCLKS_CNTL, tmp); | 415 | WREG32_PLL(RADEON_PIXCLKS_CNTL, tmp); |
| 416 | } else if (rdev->family >= CHIP_RV350) { | 416 | } else if (rdev->family >= CHIP_RV350) { |
| 417 | tmp = RREG32_PLL(R300_SCLK_CNTL2); | 417 | tmp = RREG32_PLL(R300_SCLK_CNTL2); |
| @@ -464,7 +464,7 @@ void radeon_legacy_set_clock_gating(struct radeon_device *rdev, int enable) | |||
| 464 | R300_PIXCLK_TRANS_ALWAYS_ONb | | 464 | R300_PIXCLK_TRANS_ALWAYS_ONb | |
| 465 | R300_PIXCLK_TVO_ALWAYS_ONb | | 465 | R300_PIXCLK_TVO_ALWAYS_ONb | |
| 466 | R300_P2G2CLK_ALWAYS_ONb | | 466 | R300_P2G2CLK_ALWAYS_ONb | |
| 467 | R300_P2G2CLK_ALWAYS_ONb); | 467 | R300_P2G2CLK_DAC_ALWAYS_ONb); |
| 468 | WREG32_PLL(RADEON_PIXCLKS_CNTL, tmp); | 468 | WREG32_PLL(RADEON_PIXCLKS_CNTL, tmp); |
| 469 | 469 | ||
| 470 | tmp = RREG32_PLL(RADEON_MCLK_MISC); | 470 | tmp = RREG32_PLL(RADEON_MCLK_MISC); |
| @@ -654,7 +654,7 @@ void radeon_legacy_set_clock_gating(struct radeon_device *rdev, int enable) | |||
| 654 | R300_PIXCLK_TRANS_ALWAYS_ONb | | 654 | R300_PIXCLK_TRANS_ALWAYS_ONb | |
| 655 | R300_PIXCLK_TVO_ALWAYS_ONb | | 655 | R300_PIXCLK_TVO_ALWAYS_ONb | |
| 656 | R300_P2G2CLK_ALWAYS_ONb | | 656 | R300_P2G2CLK_ALWAYS_ONb | |
| 657 | R300_P2G2CLK_ALWAYS_ONb | | 657 | R300_P2G2CLK_DAC_ALWAYS_ONb | |
| 658 | R300_DISP_DAC_PIXCLK_DAC2_BLANK_OFF); | 658 | R300_DISP_DAC_PIXCLK_DAC2_BLANK_OFF); |
| 659 | WREG32_PLL(RADEON_PIXCLKS_CNTL, tmp); | 659 | WREG32_PLL(RADEON_PIXCLKS_CNTL, tmp); |
| 660 | } else if (rdev->family >= CHIP_RV350) { | 660 | } else if (rdev->family >= CHIP_RV350) { |
| @@ -705,7 +705,7 @@ void radeon_legacy_set_clock_gating(struct radeon_device *rdev, int enable) | |||
| 705 | R300_PIXCLK_TRANS_ALWAYS_ONb | | 705 | R300_PIXCLK_TRANS_ALWAYS_ONb | |
| 706 | R300_PIXCLK_TVO_ALWAYS_ONb | | 706 | R300_PIXCLK_TVO_ALWAYS_ONb | |
| 707 | R300_P2G2CLK_ALWAYS_ONb | | 707 | R300_P2G2CLK_ALWAYS_ONb | |
| 708 | R300_P2G2CLK_ALWAYS_ONb | | 708 | R300_P2G2CLK_DAC_ALWAYS_ONb | |
| 709 | R300_DISP_DAC_PIXCLK_DAC2_BLANK_OFF); | 709 | R300_DISP_DAC_PIXCLK_DAC2_BLANK_OFF); |
| 710 | WREG32_PLL(RADEON_PIXCLKS_CNTL, tmp); | 710 | WREG32_PLL(RADEON_PIXCLKS_CNTL, tmp); |
| 711 | } else { | 711 | } else { |
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index ec835d56d30a..3d667031de6e 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c | |||
| @@ -322,10 +322,6 @@ int radeon_asic_init(struct radeon_device *rdev) | |||
| 322 | case CHIP_RV380: | 322 | case CHIP_RV380: |
| 323 | rdev->asic = &r300_asic; | 323 | rdev->asic = &r300_asic; |
| 324 | if (rdev->flags & RADEON_IS_PCIE) { | 324 | if (rdev->flags & RADEON_IS_PCIE) { |
| 325 | rdev->asic->gart_init = &rv370_pcie_gart_init; | ||
| 326 | rdev->asic->gart_fini = &rv370_pcie_gart_fini; | ||
| 327 | rdev->asic->gart_enable = &rv370_pcie_gart_enable; | ||
| 328 | rdev->asic->gart_disable = &rv370_pcie_gart_disable; | ||
| 329 | rdev->asic->gart_tlb_flush = &rv370_pcie_gart_tlb_flush; | 325 | rdev->asic->gart_tlb_flush = &rv370_pcie_gart_tlb_flush; |
| 330 | rdev->asic->gart_set_page = &rv370_pcie_gart_set_page; | 326 | rdev->asic->gart_set_page = &rv370_pcie_gart_set_page; |
| 331 | } | 327 | } |
| @@ -485,7 +481,6 @@ void radeon_combios_fini(struct radeon_device *rdev) | |||
| 485 | static unsigned int radeon_vga_set_decode(void *cookie, bool state) | 481 | static unsigned int radeon_vga_set_decode(void *cookie, bool state) |
| 486 | { | 482 | { |
| 487 | struct radeon_device *rdev = cookie; | 483 | struct radeon_device *rdev = cookie; |
| 488 | |||
| 489 | radeon_vga_set_state(rdev, state); | 484 | radeon_vga_set_state(rdev, state); |
| 490 | if (state) | 485 | if (state) |
| 491 | return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM | | 486 | return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM | |
| @@ -493,6 +488,29 @@ static unsigned int radeon_vga_set_decode(void *cookie, bool state) | |||
| 493 | else | 488 | else |
| 494 | return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; | 489 | return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; |
| 495 | } | 490 | } |
| 491 | |||
| 492 | void radeon_agp_disable(struct radeon_device *rdev) | ||
| 493 | { | ||
| 494 | rdev->flags &= ~RADEON_IS_AGP; | ||
| 495 | if (rdev->family >= CHIP_R600) { | ||
| 496 | DRM_INFO("Forcing AGP to PCIE mode\n"); | ||
| 497 | rdev->flags |= RADEON_IS_PCIE; | ||
| 498 | } else if (rdev->family >= CHIP_RV515 || | ||
| 499 | rdev->family == CHIP_RV380 || | ||
| 500 | rdev->family == CHIP_RV410 || | ||
| 501 | rdev->family == CHIP_R423) { | ||
| 502 | DRM_INFO("Forcing AGP to PCIE mode\n"); | ||
| 503 | rdev->flags |= RADEON_IS_PCIE; | ||
| 504 | rdev->asic->gart_tlb_flush = &rv370_pcie_gart_tlb_flush; | ||
| 505 | rdev->asic->gart_set_page = &rv370_pcie_gart_set_page; | ||
| 506 | } else { | ||
| 507 | DRM_INFO("Forcing AGP to PCI mode\n"); | ||
| 508 | rdev->flags |= RADEON_IS_PCI; | ||
| 509 | rdev->asic->gart_tlb_flush = &r100_pci_gart_tlb_flush; | ||
| 510 | rdev->asic->gart_set_page = &r100_pci_gart_set_page; | ||
| 511 | } | ||
| 512 | } | ||
| 513 | |||
| 496 | /* | 514 | /* |
| 497 | * Radeon device. | 515 | * Radeon device. |
| 498 | */ | 516 | */ |
| @@ -531,32 +549,7 @@ int radeon_device_init(struct radeon_device *rdev, | |||
| 531 | } | 549 | } |
| 532 | 550 | ||
| 533 | if (radeon_agpmode == -1) { | 551 | if (radeon_agpmode == -1) { |
| 534 | rdev->flags &= ~RADEON_IS_AGP; | 552 | radeon_agp_disable(rdev); |
| 535 | if (rdev->family >= CHIP_R600) { | ||
| 536 | DRM_INFO("Forcing AGP to PCIE mode\n"); | ||
| 537 | rdev->flags |= RADEON_IS_PCIE; | ||
| 538 | } else if (rdev->family >= CHIP_RV515 || | ||
| 539 | rdev->family == CHIP_RV380 || | ||
| 540 | rdev->family == CHIP_RV410 || | ||
| 541 | rdev->family == CHIP_R423) { | ||
| 542 | DRM_INFO("Forcing AGP to PCIE mode\n"); | ||
| 543 | rdev->flags |= RADEON_IS_PCIE; | ||
| 544 | rdev->asic->gart_init = &rv370_pcie_gart_init; | ||
| 545 | rdev->asic->gart_fini = &rv370_pcie_gart_fini; | ||
| 546 | rdev->asic->gart_enable = &rv370_pcie_gart_enable; | ||
| 547 | rdev->asic->gart_disable = &rv370_pcie_gart_disable; | ||
| 548 | rdev->asic->gart_tlb_flush = &rv370_pcie_gart_tlb_flush; | ||
| 549 | rdev->asic->gart_set_page = &rv370_pcie_gart_set_page; | ||
| 550 | } else { | ||
| 551 | DRM_INFO("Forcing AGP to PCI mode\n"); | ||
| 552 | rdev->flags |= RADEON_IS_PCI; | ||
| 553 | rdev->asic->gart_init = &r100_pci_gart_init; | ||
| 554 | rdev->asic->gart_fini = &r100_pci_gart_fini; | ||
| 555 | rdev->asic->gart_enable = &r100_pci_gart_enable; | ||
| 556 | rdev->asic->gart_disable = &r100_pci_gart_disable; | ||
| 557 | rdev->asic->gart_tlb_flush = &r100_pci_gart_tlb_flush; | ||
| 558 | rdev->asic->gart_set_page = &r100_pci_gart_set_page; | ||
| 559 | } | ||
| 560 | } | 553 | } |
| 561 | 554 | ||
| 562 | /* set DMA mask + need_dma32 flags. | 555 | /* set DMA mask + need_dma32 flags. |
| @@ -588,111 +581,27 @@ int radeon_device_init(struct radeon_device *rdev, | |||
| 588 | DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)rdev->rmmio_base); | 581 | DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)rdev->rmmio_base); |
| 589 | DRM_INFO("register mmio size: %u\n", (unsigned)rdev->rmmio_size); | 582 | DRM_INFO("register mmio size: %u\n", (unsigned)rdev->rmmio_size); |
| 590 | 583 | ||
| 591 | rdev->new_init_path = false; | ||
| 592 | r = radeon_init(rdev); | ||
| 593 | if (r) { | ||
| 594 | return r; | ||
| 595 | } | ||
| 596 | |||
| 597 | /* if we have > 1 VGA cards, then disable the radeon VGA resources */ | 584 | /* if we have > 1 VGA cards, then disable the radeon VGA resources */ |
| 598 | r = vga_client_register(rdev->pdev, rdev, NULL, radeon_vga_set_decode); | 585 | r = vga_client_register(rdev->pdev, rdev, NULL, radeon_vga_set_decode); |
| 599 | if (r) { | 586 | if (r) { |
| 600 | return -EINVAL; | 587 | return -EINVAL; |
| 601 | } | 588 | } |
| 602 | 589 | ||
| 603 | if (!rdev->new_init_path) { | 590 | r = radeon_init(rdev); |
| 604 | /* Setup errata flags */ | 591 | if (r) |
| 605 | radeon_errata(rdev); | 592 | return r; |
| 606 | /* Initialize scratch registers */ | ||
| 607 | radeon_scratch_init(rdev); | ||
| 608 | /* Initialize surface registers */ | ||
| 609 | radeon_surface_init(rdev); | ||
| 610 | |||
| 611 | /* BIOS*/ | ||
| 612 | if (!radeon_get_bios(rdev)) { | ||
| 613 | if (ASIC_IS_AVIVO(rdev)) | ||
| 614 | return -EINVAL; | ||
| 615 | } | ||
| 616 | if (rdev->is_atom_bios) { | ||
| 617 | r = radeon_atombios_init(rdev); | ||
| 618 | if (r) { | ||
| 619 | return r; | ||
| 620 | } | ||
| 621 | } else { | ||
| 622 | r = radeon_combios_init(rdev); | ||
| 623 | if (r) { | ||
| 624 | return r; | ||
| 625 | } | ||
| 626 | } | ||
| 627 | /* Reset gpu before posting otherwise ATOM will enter infinite loop */ | ||
| 628 | if (radeon_gpu_reset(rdev)) { | ||
| 629 | /* FIXME: what do we want to do here ? */ | ||
| 630 | } | ||
| 631 | /* check if cards are posted or not */ | ||
| 632 | if (!radeon_card_posted(rdev) && rdev->bios) { | ||
| 633 | DRM_INFO("GPU not posted. posting now...\n"); | ||
| 634 | if (rdev->is_atom_bios) { | ||
| 635 | atom_asic_init(rdev->mode_info.atom_context); | ||
| 636 | } else { | ||
| 637 | radeon_combios_asic_init(rdev->ddev); | ||
| 638 | } | ||
| 639 | } | ||
| 640 | /* Get clock & vram information */ | ||
| 641 | radeon_get_clock_info(rdev->ddev); | ||
| 642 | radeon_vram_info(rdev); | ||
| 643 | /* Initialize clocks */ | ||
| 644 | r = radeon_clocks_init(rdev); | ||
| 645 | if (r) { | ||
| 646 | return r; | ||
| 647 | } | ||
| 648 | 593 | ||
| 649 | /* Initialize memory controller (also test AGP) */ | 594 | if (rdev->flags & RADEON_IS_AGP && !rdev->accel_working) { |
| 650 | r = radeon_mc_init(rdev); | 595 | /* Acceleration not working on AGP card try again |
| 651 | if (r) { | 596 | * with fallback to PCI or PCIE GART |
| 652 | return r; | 597 | */ |
| 653 | } | 598 | radeon_gpu_reset(rdev); |
| 654 | /* Fence driver */ | 599 | radeon_fini(rdev); |
| 655 | r = radeon_fence_driver_init(rdev); | 600 | radeon_agp_disable(rdev); |
| 656 | if (r) { | 601 | r = radeon_init(rdev); |
| 657 | return r; | ||
| 658 | } | ||
| 659 | r = radeon_irq_kms_init(rdev); | ||
| 660 | if (r) { | ||
| 661 | return r; | ||
| 662 | } | ||
| 663 | /* Memory manager */ | ||
| 664 | r = radeon_object_init(rdev); | ||
| 665 | if (r) { | ||
| 666 | return r; | ||
| 667 | } | ||
| 668 | r = radeon_gpu_gart_init(rdev); | ||
| 669 | if (r) | 602 | if (r) |
| 670 | return r; | 603 | return r; |
| 671 | /* Initialize GART (initialize after TTM so we can allocate | ||
| 672 | * memory through TTM but finalize after TTM) */ | ||
| 673 | r = radeon_gart_enable(rdev); | ||
| 674 | if (r) | ||
| 675 | return 0; | ||
| 676 | r = radeon_gem_init(rdev); | ||
| 677 | if (r) | ||
| 678 | return 0; | ||
| 679 | |||
| 680 | /* 1M ring buffer */ | ||
| 681 | r = radeon_cp_init(rdev, 1024 * 1024); | ||
| 682 | if (r) | ||
| 683 | return 0; | ||
| 684 | r = radeon_wb_init(rdev); | ||
| 685 | if (r) | ||
| 686 | DRM_ERROR("radeon: failled initializing WB (%d).\n", r); | ||
| 687 | r = radeon_ib_pool_init(rdev); | ||
| 688 | if (r) | ||
| 689 | return 0; | ||
| 690 | r = radeon_ib_test(rdev); | ||
| 691 | if (r) | ||
| 692 | return 0; | ||
| 693 | rdev->accel_working = true; | ||
| 694 | } | 604 | } |
| 695 | DRM_INFO("radeon: kernel modesetting successfully initialized.\n"); | ||
| 696 | if (radeon_testing) { | 605 | if (radeon_testing) { |
| 697 | radeon_test_moves(rdev); | 606 | radeon_test_moves(rdev); |
| 698 | } | 607 | } |
| @@ -706,32 +615,8 @@ void radeon_device_fini(struct radeon_device *rdev) | |||
| 706 | { | 615 | { |
| 707 | DRM_INFO("radeon: finishing device.\n"); | 616 | DRM_INFO("radeon: finishing device.\n"); |
| 708 | rdev->shutdown = true; | 617 | rdev->shutdown = true; |
| 709 | /* Order matter so becarefull if you rearrange anythings */ | 618 | radeon_fini(rdev); |
| 710 | if (!rdev->new_init_path) { | 619 | vga_client_register(rdev->pdev, NULL, NULL, NULL); |
| 711 | radeon_ib_pool_fini(rdev); | ||
| 712 | radeon_cp_fini(rdev); | ||
| 713 | radeon_wb_fini(rdev); | ||
| 714 | radeon_gpu_gart_fini(rdev); | ||
| 715 | radeon_gem_fini(rdev); | ||
| 716 | radeon_mc_fini(rdev); | ||
| 717 | #if __OS_HAS_AGP | ||
| 718 | radeon_agp_fini(rdev); | ||
| 719 | #endif | ||
| 720 | radeon_irq_kms_fini(rdev); | ||
| 721 | vga_client_register(rdev->pdev, NULL, NULL, NULL); | ||
| 722 | radeon_fence_driver_fini(rdev); | ||
| 723 | radeon_clocks_fini(rdev); | ||
| 724 | radeon_object_fini(rdev); | ||
| 725 | if (rdev->is_atom_bios) { | ||
| 726 | radeon_atombios_fini(rdev); | ||
| 727 | } else { | ||
| 728 | radeon_combios_fini(rdev); | ||
| 729 | } | ||
| 730 | kfree(rdev->bios); | ||
| 731 | rdev->bios = NULL; | ||
| 732 | } else { | ||
| 733 | radeon_fini(rdev); | ||
| 734 | } | ||
| 735 | iounmap(rdev->rmmio); | 620 | iounmap(rdev->rmmio); |
| 736 | rdev->rmmio = NULL; | 621 | rdev->rmmio = NULL; |
| 737 | } | 622 | } |
| @@ -771,14 +656,7 @@ int radeon_suspend_kms(struct drm_device *dev, pm_message_t state) | |||
| 771 | 656 | ||
| 772 | radeon_save_bios_scratch_regs(rdev); | 657 | radeon_save_bios_scratch_regs(rdev); |
| 773 | 658 | ||
| 774 | if (!rdev->new_init_path) { | 659 | radeon_suspend(rdev); |
| 775 | radeon_cp_disable(rdev); | ||
| 776 | radeon_gart_disable(rdev); | ||
| 777 | rdev->irq.sw_int = false; | ||
| 778 | radeon_irq_set(rdev); | ||
| 779 | } else { | ||
| 780 | radeon_suspend(rdev); | ||
| 781 | } | ||
| 782 | /* evict remaining vram memory */ | 660 | /* evict remaining vram memory */ |
| 783 | radeon_object_evict_vram(rdev); | 661 | radeon_object_evict_vram(rdev); |
| 784 | 662 | ||
| @@ -797,7 +675,6 @@ int radeon_suspend_kms(struct drm_device *dev, pm_message_t state) | |||
| 797 | int radeon_resume_kms(struct drm_device *dev) | 675 | int radeon_resume_kms(struct drm_device *dev) |
| 798 | { | 676 | { |
| 799 | struct radeon_device *rdev = dev->dev_private; | 677 | struct radeon_device *rdev = dev->dev_private; |
| 800 | int r; | ||
| 801 | 678 | ||
| 802 | acquire_console_sem(); | 679 | acquire_console_sem(); |
| 803 | pci_set_power_state(dev->pdev, PCI_D0); | 680 | pci_set_power_state(dev->pdev, PCI_D0); |
| @@ -807,43 +684,7 @@ int radeon_resume_kms(struct drm_device *dev) | |||
| 807 | return -1; | 684 | return -1; |
| 808 | } | 685 | } |
| 809 | pci_set_master(dev->pdev); | 686 | pci_set_master(dev->pdev); |
| 810 | /* Reset gpu before posting otherwise ATOM will enter infinite loop */ | 687 | radeon_resume(rdev); |
| 811 | if (!rdev->new_init_path) { | ||
| 812 | if (radeon_gpu_reset(rdev)) { | ||
| 813 | /* FIXME: what do we want to do here ? */ | ||
| 814 | } | ||
| 815 | /* post card */ | ||
| 816 | if (rdev->is_atom_bios) { | ||
| 817 | atom_asic_init(rdev->mode_info.atom_context); | ||
| 818 | } else { | ||
| 819 | radeon_combios_asic_init(rdev->ddev); | ||
| 820 | } | ||
| 821 | /* Initialize clocks */ | ||
| 822 | r = radeon_clocks_init(rdev); | ||
| 823 | if (r) { | ||
| 824 | release_console_sem(); | ||
| 825 | return r; | ||
| 826 | } | ||
| 827 | /* Enable IRQ */ | ||
| 828 | rdev->irq.sw_int = true; | ||
| 829 | radeon_irq_set(rdev); | ||
| 830 | /* Initialize GPU Memory Controller */ | ||
| 831 | r = radeon_mc_init(rdev); | ||
| 832 | if (r) { | ||
| 833 | goto out; | ||
| 834 | } | ||
| 835 | r = radeon_gart_enable(rdev); | ||
| 836 | if (r) { | ||
| 837 | goto out; | ||
| 838 | } | ||
| 839 | r = radeon_cp_init(rdev, rdev->cp.ring_size); | ||
| 840 | if (r) { | ||
| 841 | goto out; | ||
| 842 | } | ||
| 843 | } else { | ||
| 844 | radeon_resume(rdev); | ||
| 845 | } | ||
| 846 | out: | ||
| 847 | radeon_restore_bios_scratch_regs(rdev); | 688 | radeon_restore_bios_scratch_regs(rdev); |
| 848 | fb_set_suspend(rdev->fbdev_info, 0); | 689 | fb_set_suspend(rdev->fbdev_info, 0); |
| 849 | release_console_sem(); | 690 | release_console_sem(); |
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c index 5d8141b13765..3655d91993a6 100644 --- a/drivers/gpu/drm/radeon/radeon_display.c +++ b/drivers/gpu/drm/radeon/radeon_display.c | |||
| @@ -106,24 +106,33 @@ void radeon_crtc_load_lut(struct drm_crtc *crtc) | |||
| 106 | legacy_crtc_load_lut(crtc); | 106 | legacy_crtc_load_lut(crtc); |
| 107 | } | 107 | } |
| 108 | 108 | ||
| 109 | /** Sets the color ramps on behalf of RandR */ | 109 | /** Sets the color ramps on behalf of fbcon */ |
| 110 | void radeon_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green, | 110 | void radeon_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green, |
| 111 | u16 blue, int regno) | 111 | u16 blue, int regno) |
| 112 | { | 112 | { |
| 113 | struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); | 113 | struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); |
| 114 | 114 | ||
| 115 | if (regno == 0) | ||
| 116 | DRM_DEBUG("gamma set %d\n", radeon_crtc->crtc_id); | ||
| 117 | radeon_crtc->lut_r[regno] = red >> 6; | 115 | radeon_crtc->lut_r[regno] = red >> 6; |
| 118 | radeon_crtc->lut_g[regno] = green >> 6; | 116 | radeon_crtc->lut_g[regno] = green >> 6; |
| 119 | radeon_crtc->lut_b[regno] = blue >> 6; | 117 | radeon_crtc->lut_b[regno] = blue >> 6; |
| 120 | } | 118 | } |
| 121 | 119 | ||
| 120 | /** Gets the color ramps on behalf of fbcon */ | ||
| 121 | void radeon_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green, | ||
| 122 | u16 *blue, int regno) | ||
| 123 | { | ||
| 124 | struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); | ||
| 125 | |||
| 126 | *red = radeon_crtc->lut_r[regno] << 6; | ||
| 127 | *green = radeon_crtc->lut_g[regno] << 6; | ||
| 128 | *blue = radeon_crtc->lut_b[regno] << 6; | ||
| 129 | } | ||
| 130 | |||
| 122 | static void radeon_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, | 131 | static void radeon_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, |
| 123 | u16 *blue, uint32_t size) | 132 | u16 *blue, uint32_t size) |
| 124 | { | 133 | { |
| 125 | struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); | 134 | struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); |
| 126 | int i, j; | 135 | int i; |
| 127 | 136 | ||
| 128 | if (size != 256) { | 137 | if (size != 256) { |
| 129 | return; | 138 | return; |
| @@ -132,23 +141,11 @@ static void radeon_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, | |||
| 132 | return; | 141 | return; |
| 133 | } | 142 | } |
| 134 | 143 | ||
| 135 | if (crtc->fb->depth == 16) { | 144 | /* userspace palettes are always correct as is */ |
| 136 | for (i = 0; i < 64; i++) { | 145 | for (i = 0; i < 256; i++) { |
| 137 | if (i <= 31) { | 146 | radeon_crtc->lut_r[i] = red[i] >> 6; |
| 138 | for (j = 0; j < 8; j++) { | 147 | radeon_crtc->lut_g[i] = green[i] >> 6; |
| 139 | radeon_crtc->lut_r[i * 8 + j] = red[i] >> 6; | 148 | radeon_crtc->lut_b[i] = blue[i] >> 6; |
| 140 | radeon_crtc->lut_b[i * 8 + j] = blue[i] >> 6; | ||
| 141 | } | ||
| 142 | } | ||
| 143 | for (j = 0; j < 4; j++) | ||
| 144 | radeon_crtc->lut_g[i * 4 + j] = green[i] >> 6; | ||
| 145 | } | ||
| 146 | } else { | ||
| 147 | for (i = 0; i < 256; i++) { | ||
| 148 | radeon_crtc->lut_r[i] = red[i] >> 6; | ||
| 149 | radeon_crtc->lut_g[i] = green[i] >> 6; | ||
| 150 | radeon_crtc->lut_b[i] = blue[i] >> 6; | ||
| 151 | } | ||
| 152 | } | 149 | } |
| 153 | 150 | ||
| 154 | radeon_crtc_load_lut(crtc); | 151 | radeon_crtc_load_lut(crtc); |
| @@ -724,7 +721,11 @@ int radeon_modeset_init(struct radeon_device *rdev) | |||
| 724 | if (ret) { | 721 | if (ret) { |
| 725 | return ret; | 722 | return ret; |
| 726 | } | 723 | } |
| 727 | /* allocate crtcs - TODO single crtc */ | 724 | |
| 725 | if (rdev->flags & RADEON_SINGLE_CRTC) | ||
| 726 | num_crtc = 1; | ||
| 727 | |||
| 728 | /* allocate crtcs */ | ||
| 728 | for (i = 0; i < num_crtc; i++) { | 729 | for (i = 0; i < num_crtc; i++) { |
| 729 | radeon_crtc_init(rdev->ddev, i); | 730 | radeon_crtc_init(rdev->ddev, i); |
| 730 | } | 731 | } |
diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c index 621646752cd2..a65ab1a0dad2 100644 --- a/drivers/gpu/drm/radeon/radeon_encoders.c +++ b/drivers/gpu/drm/radeon/radeon_encoders.c | |||
| @@ -1345,6 +1345,7 @@ radeon_atombios_set_dig_info(struct radeon_encoder *radeon_encoder) | |||
| 1345 | void | 1345 | void |
| 1346 | radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_id, uint32_t supported_device) | 1346 | radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_id, uint32_t supported_device) |
| 1347 | { | 1347 | { |
| 1348 | struct radeon_device *rdev = dev->dev_private; | ||
| 1348 | struct drm_encoder *encoder; | 1349 | struct drm_encoder *encoder; |
| 1349 | struct radeon_encoder *radeon_encoder; | 1350 | struct radeon_encoder *radeon_encoder; |
| 1350 | 1351 | ||
| @@ -1364,7 +1365,10 @@ radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_id, uint32_t su | |||
| 1364 | return; | 1365 | return; |
| 1365 | 1366 | ||
| 1366 | encoder = &radeon_encoder->base; | 1367 | encoder = &radeon_encoder->base; |
| 1367 | encoder->possible_crtcs = 0x3; | 1368 | if (rdev->flags & RADEON_SINGLE_CRTC) |
| 1369 | encoder->possible_crtcs = 0x1; | ||
| 1370 | else | ||
| 1371 | encoder->possible_crtcs = 0x3; | ||
| 1368 | encoder->possible_clones = 0; | 1372 | encoder->possible_clones = 0; |
| 1369 | 1373 | ||
| 1370 | radeon_encoder->enc_priv = NULL; | 1374 | radeon_encoder->enc_priv = NULL; |
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c index 1ba704eedefb..b38c4c8e2c61 100644 --- a/drivers/gpu/drm/radeon/radeon_fb.c +++ b/drivers/gpu/drm/radeon/radeon_fb.c | |||
| @@ -55,6 +55,7 @@ static struct fb_ops radeonfb_ops = { | |||
| 55 | .fb_imageblit = cfb_imageblit, | 55 | .fb_imageblit = cfb_imageblit, |
| 56 | .fb_pan_display = drm_fb_helper_pan_display, | 56 | .fb_pan_display = drm_fb_helper_pan_display, |
| 57 | .fb_blank = drm_fb_helper_blank, | 57 | .fb_blank = drm_fb_helper_blank, |
| 58 | .fb_setcmap = drm_fb_helper_setcmap, | ||
| 58 | }; | 59 | }; |
| 59 | 60 | ||
| 60 | /** | 61 | /** |
| @@ -123,6 +124,7 @@ static int radeon_align_pitch(struct radeon_device *rdev, int width, int bpp, bo | |||
| 123 | 124 | ||
| 124 | static struct drm_fb_helper_funcs radeon_fb_helper_funcs = { | 125 | static struct drm_fb_helper_funcs radeon_fb_helper_funcs = { |
| 125 | .gamma_set = radeon_crtc_fb_gamma_set, | 126 | .gamma_set = radeon_crtc_fb_gamma_set, |
| 127 | .gamma_get = radeon_crtc_fb_gamma_get, | ||
| 126 | }; | 128 | }; |
| 127 | 129 | ||
| 128 | int radeonfb_create(struct drm_device *dev, | 130 | int radeonfb_create(struct drm_device *dev, |
| @@ -146,9 +148,15 @@ int radeonfb_create(struct drm_device *dev, | |||
| 146 | unsigned long tmp; | 148 | unsigned long tmp; |
| 147 | bool fb_tiled = false; /* useful for testing */ | 149 | bool fb_tiled = false; /* useful for testing */ |
| 148 | u32 tiling_flags = 0; | 150 | u32 tiling_flags = 0; |
| 151 | int crtc_count; | ||
| 149 | 152 | ||
| 150 | mode_cmd.width = surface_width; | 153 | mode_cmd.width = surface_width; |
| 151 | mode_cmd.height = surface_height; | 154 | mode_cmd.height = surface_height; |
| 155 | |||
| 156 | /* avivo can't scanout real 24bpp */ | ||
| 157 | if ((surface_bpp == 24) && ASIC_IS_AVIVO(rdev)) | ||
| 158 | surface_bpp = 32; | ||
| 159 | |||
| 152 | mode_cmd.bpp = surface_bpp; | 160 | mode_cmd.bpp = surface_bpp; |
| 153 | /* need to align pitch with crtc limits */ | 161 | /* need to align pitch with crtc limits */ |
| 154 | mode_cmd.pitch = radeon_align_pitch(rdev, mode_cmd.width, mode_cmd.bpp, fb_tiled) * ((mode_cmd.bpp + 1) / 8); | 162 | mode_cmd.pitch = radeon_align_pitch(rdev, mode_cmd.width, mode_cmd.bpp, fb_tiled) * ((mode_cmd.bpp + 1) / 8); |
| @@ -217,7 +225,11 @@ int radeonfb_create(struct drm_device *dev, | |||
| 217 | rfbdev = info->par; | 225 | rfbdev = info->par; |
| 218 | rfbdev->helper.funcs = &radeon_fb_helper_funcs; | 226 | rfbdev->helper.funcs = &radeon_fb_helper_funcs; |
| 219 | rfbdev->helper.dev = dev; | 227 | rfbdev->helper.dev = dev; |
| 220 | ret = drm_fb_helper_init_crtc_count(&rfbdev->helper, 2, | 228 | if (rdev->flags & RADEON_SINGLE_CRTC) |
| 229 | crtc_count = 1; | ||
| 230 | else | ||
| 231 | crtc_count = 2; | ||
| 232 | ret = drm_fb_helper_init_crtc_count(&rfbdev->helper, crtc_count, | ||
| 221 | RADEONFB_CONN_LIMIT); | 233 | RADEONFB_CONN_LIMIT); |
| 222 | if (ret) | 234 | if (ret) |
| 223 | goto out_unref; | 235 | goto out_unref; |
| @@ -234,7 +246,7 @@ int radeonfb_create(struct drm_device *dev, | |||
| 234 | 246 | ||
| 235 | strcpy(info->fix.id, "radeondrmfb"); | 247 | strcpy(info->fix.id, "radeondrmfb"); |
| 236 | 248 | ||
| 237 | drm_fb_helper_fill_fix(info, fb->pitch); | 249 | drm_fb_helper_fill_fix(info, fb->pitch, fb->depth); |
| 238 | 250 | ||
| 239 | info->flags = FBINFO_DEFAULT; | 251 | info->flags = FBINFO_DEFAULT; |
| 240 | info->fbops = &radeonfb_ops; | 252 | info->fbops = &radeonfb_ops; |
| @@ -309,7 +321,7 @@ int radeon_parse_options(char *options) | |||
| 309 | 321 | ||
| 310 | int radeonfb_probe(struct drm_device *dev) | 322 | int radeonfb_probe(struct drm_device *dev) |
| 311 | { | 323 | { |
| 312 | return drm_fb_helper_single_fb_probe(dev, &radeonfb_create); | 324 | return drm_fb_helper_single_fb_probe(dev, 32, &radeonfb_create); |
| 313 | } | 325 | } |
| 314 | 326 | ||
| 315 | int radeonfb_remove(struct drm_device *dev, struct drm_framebuffer *fb) | 327 | int radeonfb_remove(struct drm_device *dev, struct drm_framebuffer *fb) |
diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c index 1841145a7c4f..8e0a8759e428 100644 --- a/drivers/gpu/drm/radeon/radeon_irq_kms.c +++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c | |||
| @@ -83,8 +83,12 @@ void radeon_driver_irq_uninstall_kms(struct drm_device *dev) | |||
| 83 | int radeon_irq_kms_init(struct radeon_device *rdev) | 83 | int radeon_irq_kms_init(struct radeon_device *rdev) |
| 84 | { | 84 | { |
| 85 | int r = 0; | 85 | int r = 0; |
| 86 | int num_crtc = 2; | ||
| 86 | 87 | ||
| 87 | r = drm_vblank_init(rdev->ddev, 2); | 88 | if (rdev->flags & RADEON_SINGLE_CRTC) |
| 89 | num_crtc = 1; | ||
| 90 | |||
| 91 | r = drm_vblank_init(rdev->ddev, num_crtc); | ||
| 88 | if (r) { | 92 | if (r) { |
| 89 | return r; | 93 | return r; |
| 90 | } | 94 | } |
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c index 2b997a15fb1f..36410f85d705 100644 --- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c +++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c | |||
| @@ -1053,6 +1053,7 @@ static const struct drm_crtc_helper_funcs legacy_helper_funcs = { | |||
| 1053 | .mode_set_base = radeon_crtc_set_base, | 1053 | .mode_set_base = radeon_crtc_set_base, |
| 1054 | .prepare = radeon_crtc_prepare, | 1054 | .prepare = radeon_crtc_prepare, |
| 1055 | .commit = radeon_crtc_commit, | 1055 | .commit = radeon_crtc_commit, |
| 1056 | .load_lut = radeon_crtc_load_lut, | ||
| 1056 | }; | 1057 | }; |
| 1057 | 1058 | ||
| 1058 | 1059 | ||
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c index b1547f700d73..6ceb958fd194 100644 --- a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c +++ b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c | |||
| @@ -881,7 +881,7 @@ static void radeon_legacy_tv_dac_mode_set(struct drm_encoder *encoder, | |||
| 881 | R420_TV_DAC_DACADJ_MASK | | 881 | R420_TV_DAC_DACADJ_MASK | |
| 882 | R420_TV_DAC_RDACPD | | 882 | R420_TV_DAC_RDACPD | |
| 883 | R420_TV_DAC_GDACPD | | 883 | R420_TV_DAC_GDACPD | |
| 884 | R420_TV_DAC_GDACPD | | 884 | R420_TV_DAC_BDACPD | |
| 885 | R420_TV_DAC_TVENABLE); | 885 | R420_TV_DAC_TVENABLE); |
| 886 | } else { | 886 | } else { |
| 887 | tv_dac_cntl &= ~(RADEON_TV_DAC_STD_MASK | | 887 | tv_dac_cntl &= ~(RADEON_TV_DAC_STD_MASK | |
| @@ -889,7 +889,7 @@ static void radeon_legacy_tv_dac_mode_set(struct drm_encoder *encoder, | |||
| 889 | RADEON_TV_DAC_DACADJ_MASK | | 889 | RADEON_TV_DAC_DACADJ_MASK | |
| 890 | RADEON_TV_DAC_RDACPD | | 890 | RADEON_TV_DAC_RDACPD | |
| 891 | RADEON_TV_DAC_GDACPD | | 891 | RADEON_TV_DAC_GDACPD | |
| 892 | RADEON_TV_DAC_GDACPD); | 892 | RADEON_TV_DAC_BDACPD); |
| 893 | } | 893 | } |
| 894 | 894 | ||
| 895 | /* FIXME TV */ | 895 | /* FIXME TV */ |
| @@ -1318,7 +1318,10 @@ radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_id, uint32_t | |||
| 1318 | return; | 1318 | return; |
| 1319 | 1319 | ||
| 1320 | encoder = &radeon_encoder->base; | 1320 | encoder = &radeon_encoder->base; |
| 1321 | encoder->possible_crtcs = 0x3; | 1321 | if (rdev->flags & RADEON_SINGLE_CRTC) |
| 1322 | encoder->possible_crtcs = 0x1; | ||
| 1323 | else | ||
| 1324 | encoder->possible_crtcs = 0x3; | ||
| 1322 | encoder->possible_clones = 0; | 1325 | encoder->possible_clones = 0; |
| 1323 | 1326 | ||
| 1324 | radeon_encoder->enc_priv = NULL; | 1327 | radeon_encoder->enc_priv = NULL; |
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h index 570a58729daf..e61226817ccf 100644 --- a/drivers/gpu/drm/radeon/radeon_mode.h +++ b/drivers/gpu/drm/radeon/radeon_mode.h | |||
| @@ -407,6 +407,8 @@ extern void | |||
| 407 | radeon_combios_encoder_dpms_scratch_regs(struct drm_encoder *encoder, bool on); | 407 | radeon_combios_encoder_dpms_scratch_regs(struct drm_encoder *encoder, bool on); |
| 408 | extern void radeon_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green, | 408 | extern void radeon_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green, |
| 409 | u16 blue, int regno); | 409 | u16 blue, int regno); |
| 410 | extern void radeon_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green, | ||
| 411 | u16 *blue, int regno); | ||
| 410 | struct drm_framebuffer *radeon_framebuffer_create(struct drm_device *dev, | 412 | struct drm_framebuffer *radeon_framebuffer_create(struct drm_device *dev, |
| 411 | struct drm_mode_fb_cmd *mode_cmd, | 413 | struct drm_mode_fb_cmd *mode_cmd, |
| 412 | struct drm_gem_object *obj); | 414 | struct drm_gem_object *obj); |
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c index 73af463b7a59..1f056dadc5c2 100644 --- a/drivers/gpu/drm/radeon/radeon_object.c +++ b/drivers/gpu/drm/radeon/radeon_object.c | |||
| @@ -400,11 +400,9 @@ void radeon_object_list_add_object(struct radeon_object_list *lobj, | |||
| 400 | int radeon_object_list_reserve(struct list_head *head) | 400 | int radeon_object_list_reserve(struct list_head *head) |
| 401 | { | 401 | { |
| 402 | struct radeon_object_list *lobj; | 402 | struct radeon_object_list *lobj; |
| 403 | struct list_head *i; | ||
| 404 | int r; | 403 | int r; |
| 405 | 404 | ||
| 406 | list_for_each(i, head) { | 405 | list_for_each_entry(lobj, head, list){ |
| 407 | lobj = list_entry(i, struct radeon_object_list, list); | ||
| 408 | if (!lobj->robj->pin_count) { | 406 | if (!lobj->robj->pin_count) { |
| 409 | r = radeon_object_reserve(lobj->robj, true); | 407 | r = radeon_object_reserve(lobj->robj, true); |
| 410 | if (unlikely(r != 0)) { | 408 | if (unlikely(r != 0)) { |
| @@ -420,13 +418,10 @@ int radeon_object_list_reserve(struct list_head *head) | |||
| 420 | void radeon_object_list_unreserve(struct list_head *head) | 418 | void radeon_object_list_unreserve(struct list_head *head) |
| 421 | { | 419 | { |
| 422 | struct radeon_object_list *lobj; | 420 | struct radeon_object_list *lobj; |
| 423 | struct list_head *i; | ||
| 424 | 421 | ||
| 425 | list_for_each(i, head) { | 422 | list_for_each_entry(lobj, head, list) { |
| 426 | lobj = list_entry(i, struct radeon_object_list, list); | ||
| 427 | if (!lobj->robj->pin_count) { | 423 | if (!lobj->robj->pin_count) { |
| 428 | radeon_object_unreserve(lobj->robj); | 424 | radeon_object_unreserve(lobj->robj); |
| 429 | } else { | ||
| 430 | } | 425 | } |
| 431 | } | 426 | } |
| 432 | } | 427 | } |
| @@ -436,7 +431,6 @@ int radeon_object_list_validate(struct list_head *head, void *fence) | |||
| 436 | struct radeon_object_list *lobj; | 431 | struct radeon_object_list *lobj; |
| 437 | struct radeon_object *robj; | 432 | struct radeon_object *robj; |
| 438 | struct radeon_fence *old_fence = NULL; | 433 | struct radeon_fence *old_fence = NULL; |
| 439 | struct list_head *i; | ||
| 440 | int r; | 434 | int r; |
| 441 | 435 | ||
| 442 | r = radeon_object_list_reserve(head); | 436 | r = radeon_object_list_reserve(head); |
| @@ -444,8 +438,7 @@ int radeon_object_list_validate(struct list_head *head, void *fence) | |||
| 444 | radeon_object_list_unreserve(head); | 438 | radeon_object_list_unreserve(head); |
| 445 | return r; | 439 | return r; |
| 446 | } | 440 | } |
| 447 | list_for_each(i, head) { | 441 | list_for_each_entry(lobj, head, list) { |
| 448 | lobj = list_entry(i, struct radeon_object_list, list); | ||
| 449 | robj = lobj->robj; | 442 | robj = lobj->robj; |
| 450 | if (!robj->pin_count) { | 443 | if (!robj->pin_count) { |
| 451 | if (lobj->wdomain) { | 444 | if (lobj->wdomain) { |
| @@ -482,10 +475,8 @@ void radeon_object_list_unvalidate(struct list_head *head) | |||
| 482 | { | 475 | { |
| 483 | struct radeon_object_list *lobj; | 476 | struct radeon_object_list *lobj; |
| 484 | struct radeon_fence *old_fence = NULL; | 477 | struct radeon_fence *old_fence = NULL; |
| 485 | struct list_head *i; | ||
| 486 | 478 | ||
| 487 | list_for_each(i, head) { | 479 | list_for_each_entry(lobj, head, list) { |
| 488 | lobj = list_entry(i, struct radeon_object_list, list); | ||
| 489 | old_fence = (struct radeon_fence *)lobj->robj->tobj.sync_obj; | 480 | old_fence = (struct radeon_fence *)lobj->robj->tobj.sync_obj; |
| 490 | lobj->robj->tobj.sync_obj = NULL; | 481 | lobj->robj->tobj.sync_obj = NULL; |
| 491 | if (old_fence) { | 482 | if (old_fence) { |
diff --git a/drivers/gpu/drm/radeon/rs100d.h b/drivers/gpu/drm/radeon/rs100d.h new file mode 100644 index 000000000000..48a913a06cfd --- /dev/null +++ b/drivers/gpu/drm/radeon/rs100d.h | |||
| @@ -0,0 +1,40 @@ | |||
| 1 | /* | ||
| 2 | * Copyright 2008 Advanced Micro Devices, Inc. | ||
| 3 | * Copyright 2008 Red Hat Inc. | ||
| 4 | * Copyright 2009 Jerome Glisse. | ||
| 5 | * | ||
| 6 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
| 7 | * copy of this software and associated documentation files (the "Software"), | ||
| 8 | * to deal in the Software without restriction, including without limitation | ||
| 9 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
| 10 | * and/or sell copies of the Software, and to permit persons to whom the | ||
| 11 | * Software is furnished to do so, subject to the following conditions: | ||
| 12 | * | ||
| 13 | * The above copyright notice and this permission notice shall be included in | ||
| 14 | * all copies or substantial portions of the Software. | ||
| 15 | * | ||
| 16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
| 17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
| 18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
| 19 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
| 20 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
| 21 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
| 22 | * OTHER DEALINGS IN THE SOFTWARE. | ||
| 23 | * | ||
| 24 | * Authors: Dave Airlie | ||
| 25 | * Alex Deucher | ||
| 26 | * Jerome Glisse | ||
| 27 | */ | ||
| 28 | #ifndef __RS100D_H__ | ||
| 29 | #define __RS100D_H__ | ||
| 30 | |||
| 31 | /* Registers */ | ||
| 32 | #define R_00015C_NB_TOM 0x00015C | ||
| 33 | #define S_00015C_MC_FB_START(x) (((x) & 0xFFFF) << 0) | ||
| 34 | #define G_00015C_MC_FB_START(x) (((x) >> 0) & 0xFFFF) | ||
| 35 | #define C_00015C_MC_FB_START 0xFFFF0000 | ||
| 36 | #define S_00015C_MC_FB_TOP(x) (((x) & 0xFFFF) << 16) | ||
| 37 | #define G_00015C_MC_FB_TOP(x) (((x) >> 16) & 0xFFFF) | ||
| 38 | #define C_00015C_MC_FB_TOP 0x0000FFFF | ||
| 39 | |||
| 40 | #endif | ||
diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c index a3fbdad938c7..a769c296f6a6 100644 --- a/drivers/gpu/drm/radeon/rs400.c +++ b/drivers/gpu/drm/radeon/rs400.c | |||
| @@ -27,27 +27,12 @@ | |||
| 27 | */ | 27 | */ |
| 28 | #include <linux/seq_file.h> | 28 | #include <linux/seq_file.h> |
| 29 | #include <drm/drmP.h> | 29 | #include <drm/drmP.h> |
| 30 | #include "radeon_reg.h" | ||
| 31 | #include "radeon.h" | 30 | #include "radeon.h" |
| 31 | #include "rs400d.h" | ||
| 32 | 32 | ||
| 33 | /* rs400,rs480 depends on : */ | 33 | /* This files gather functions specifics to : rs400,rs480 */ |
| 34 | void r100_hdp_reset(struct radeon_device *rdev); | 34 | static int rs400_debugfs_pcie_gart_info_init(struct radeon_device *rdev); |
| 35 | void r100_mc_disable_clients(struct radeon_device *rdev); | ||
| 36 | int r300_mc_wait_for_idle(struct radeon_device *rdev); | ||
| 37 | void r420_pipes_init(struct radeon_device *rdev); | ||
| 38 | 35 | ||
| 39 | /* This files gather functions specifics to : | ||
| 40 | * rs400,rs480 | ||
| 41 | * | ||
| 42 | * Some of these functions might be used by newer ASICs. | ||
| 43 | */ | ||
| 44 | void rs400_gpu_init(struct radeon_device *rdev); | ||
| 45 | int rs400_debugfs_pcie_gart_info_init(struct radeon_device *rdev); | ||
| 46 | |||
| 47 | |||
| 48 | /* | ||
| 49 | * GART functions. | ||
| 50 | */ | ||
| 51 | void rs400_gart_adjust_size(struct radeon_device *rdev) | 36 | void rs400_gart_adjust_size(struct radeon_device *rdev) |
| 52 | { | 37 | { |
| 53 | /* Check gart size */ | 38 | /* Check gart size */ |
| @@ -238,61 +223,6 @@ int rs400_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr) | |||
| 238 | return 0; | 223 | return 0; |
| 239 | } | 224 | } |
| 240 | 225 | ||
| 241 | |||
| 242 | /* | ||
| 243 | * MC functions. | ||
| 244 | */ | ||
| 245 | int rs400_mc_init(struct radeon_device *rdev) | ||
| 246 | { | ||
| 247 | uint32_t tmp; | ||
| 248 | int r; | ||
| 249 | |||
| 250 | if (r100_debugfs_rbbm_init(rdev)) { | ||
| 251 | DRM_ERROR("Failed to register debugfs file for RBBM !\n"); | ||
| 252 | } | ||
| 253 | |||
| 254 | rs400_gpu_init(rdev); | ||
| 255 | rs400_gart_disable(rdev); | ||
| 256 | rdev->mc.gtt_location = rdev->mc.mc_vram_size; | ||
| 257 | rdev->mc.gtt_location += (rdev->mc.gtt_size - 1); | ||
| 258 | rdev->mc.gtt_location &= ~(rdev->mc.gtt_size - 1); | ||
| 259 | r = radeon_mc_setup(rdev); | ||
| 260 | if (r) { | ||
| 261 | return r; | ||
| 262 | } | ||
| 263 | |||
| 264 | r100_mc_disable_clients(rdev); | ||
| 265 | if (r300_mc_wait_for_idle(rdev)) { | ||
| 266 | printk(KERN_WARNING "Failed to wait MC idle while " | ||
| 267 | "programming pipes. Bad things might happen.\n"); | ||
| 268 | } | ||
| 269 | |||
| 270 | tmp = rdev->mc.vram_location + rdev->mc.mc_vram_size - 1; | ||
| 271 | tmp = REG_SET(RADEON_MC_FB_TOP, tmp >> 16); | ||
| 272 | tmp |= REG_SET(RADEON_MC_FB_START, rdev->mc.vram_location >> 16); | ||
| 273 | WREG32(RADEON_MC_FB_LOCATION, tmp); | ||
| 274 | tmp = RREG32(RADEON_HOST_PATH_CNTL) | RADEON_HP_LIN_RD_CACHE_DIS; | ||
| 275 | WREG32(RADEON_HOST_PATH_CNTL, tmp | RADEON_HDP_SOFT_RESET | RADEON_HDP_READ_BUFFER_INVALIDATE); | ||
| 276 | (void)RREG32(RADEON_HOST_PATH_CNTL); | ||
| 277 | WREG32(RADEON_HOST_PATH_CNTL, tmp); | ||
| 278 | (void)RREG32(RADEON_HOST_PATH_CNTL); | ||
| 279 | |||
| 280 | return 0; | ||
| 281 | } | ||
| 282 | |||
| 283 | void rs400_mc_fini(struct radeon_device *rdev) | ||
| 284 | { | ||
| 285 | } | ||
| 286 | |||
| 287 | |||
| 288 | /* | ||
| 289 | * Global GPU functions | ||
| 290 | */ | ||
| 291 | void rs400_errata(struct radeon_device *rdev) | ||
| 292 | { | ||
| 293 | rdev->pll_errata = 0; | ||
| 294 | } | ||
| 295 | |||
| 296 | void rs400_gpu_init(struct radeon_device *rdev) | 226 | void rs400_gpu_init(struct radeon_device *rdev) |
| 297 | { | 227 | { |
| 298 | /* FIXME: HDP same place on rs400 ? */ | 228 | /* FIXME: HDP same place on rs400 ? */ |
| @@ -305,10 +235,6 @@ void rs400_gpu_init(struct radeon_device *rdev) | |||
| 305 | } | 235 | } |
| 306 | } | 236 | } |
| 307 | 237 | ||
| 308 | |||
| 309 | /* | ||
| 310 | * VRAM info. | ||
| 311 | */ | ||
| 312 | void rs400_vram_info(struct radeon_device *rdev) | 238 | void rs400_vram_info(struct radeon_device *rdev) |
| 313 | { | 239 | { |
| 314 | rs400_gart_adjust_size(rdev); | 240 | rs400_gart_adjust_size(rdev); |
| @@ -319,10 +245,6 @@ void rs400_vram_info(struct radeon_device *rdev) | |||
| 319 | r100_vram_init_sizes(rdev); | 245 | r100_vram_init_sizes(rdev); |
| 320 | } | 246 | } |
| 321 | 247 | ||
| 322 | |||
| 323 | /* | ||
| 324 | * Indirect registers accessor | ||
| 325 | */ | ||
| 326 | uint32_t rs400_mc_rreg(struct radeon_device *rdev, uint32_t reg) | 248 | uint32_t rs400_mc_rreg(struct radeon_device *rdev, uint32_t reg) |
| 327 | { | 249 | { |
| 328 | uint32_t r; | 250 | uint32_t r; |
| @@ -340,10 +262,6 @@ void rs400_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) | |||
| 340 | WREG32(RS480_NB_MC_INDEX, 0xff); | 262 | WREG32(RS480_NB_MC_INDEX, 0xff); |
| 341 | } | 263 | } |
| 342 | 264 | ||
| 343 | |||
| 344 | /* | ||
| 345 | * Debugfs info | ||
| 346 | */ | ||
| 347 | #if defined(CONFIG_DEBUG_FS) | 265 | #if defined(CONFIG_DEBUG_FS) |
| 348 | static int rs400_debugfs_gart_info(struct seq_file *m, void *data) | 266 | static int rs400_debugfs_gart_info(struct seq_file *m, void *data) |
| 349 | { | 267 | { |
| @@ -419,7 +337,7 @@ static struct drm_info_list rs400_gart_info_list[] = { | |||
| 419 | }; | 337 | }; |
| 420 | #endif | 338 | #endif |
| 421 | 339 | ||
| 422 | int rs400_debugfs_pcie_gart_info_init(struct radeon_device *rdev) | 340 | static int rs400_debugfs_pcie_gart_info_init(struct radeon_device *rdev) |
| 423 | { | 341 | { |
| 424 | #if defined(CONFIG_DEBUG_FS) | 342 | #if defined(CONFIG_DEBUG_FS) |
| 425 | return radeon_debugfs_add_files(rdev, rs400_gart_info_list, 1); | 343 | return radeon_debugfs_add_files(rdev, rs400_gart_info_list, 1); |
| @@ -427,3 +345,188 @@ int rs400_debugfs_pcie_gart_info_init(struct radeon_device *rdev) | |||
| 427 | return 0; | 345 | return 0; |
| 428 | #endif | 346 | #endif |
| 429 | } | 347 | } |
| 348 | |||
| 349 | static int rs400_mc_init(struct radeon_device *rdev) | ||
| 350 | { | ||
| 351 | int r; | ||
| 352 | u32 tmp; | ||
| 353 | |||
| 354 | /* Setup GPU memory space */ | ||
| 355 | tmp = G_00015C_MC_FB_START(RREG32(R_00015C_NB_TOM)); | ||
| 356 | rdev->mc.vram_location = G_00015C_MC_FB_START(tmp) << 16; | ||
| 357 | rdev->mc.gtt_location = 0xFFFFFFFFUL; | ||
| 358 | r = radeon_mc_setup(rdev); | ||
| 359 | if (r) | ||
| 360 | return r; | ||
| 361 | return 0; | ||
| 362 | } | ||
| 363 | |||
| 364 | void rs400_mc_program(struct radeon_device *rdev) | ||
| 365 | { | ||
| 366 | struct r100_mc_save save; | ||
| 367 | |||
| 368 | /* Stops all mc clients */ | ||
| 369 | r100_mc_stop(rdev, &save); | ||
| 370 | |||
| 371 | /* Wait for mc idle */ | ||
| 372 | if (r300_mc_wait_for_idle(rdev)) | ||
| 373 | dev_warn(rdev->dev, "Wait MC idle timeout before updating MC.\n"); | ||
| 374 | WREG32(R_000148_MC_FB_LOCATION, | ||
| 375 | S_000148_MC_FB_START(rdev->mc.vram_start >> 16) | | ||
| 376 | S_000148_MC_FB_TOP(rdev->mc.vram_end >> 16)); | ||
| 377 | |||
| 378 | r100_mc_resume(rdev, &save); | ||
| 379 | } | ||
| 380 | |||
| 381 | static int rs400_startup(struct radeon_device *rdev) | ||
| 382 | { | ||
| 383 | int r; | ||
| 384 | |||
| 385 | rs400_mc_program(rdev); | ||
| 386 | /* Resume clock */ | ||
| 387 | r300_clock_startup(rdev); | ||
| 388 | /* Initialize GPU configuration (# pipes, ...) */ | ||
| 389 | rs400_gpu_init(rdev); | ||
| 390 | /* Initialize GART (initialize after TTM so we can allocate | ||
| 391 | * memory through TTM but finalize after TTM) */ | ||
| 392 | r = rs400_gart_enable(rdev); | ||
| 393 | if (r) | ||
| 394 | return r; | ||
| 395 | /* Enable IRQ */ | ||
| 396 | rdev->irq.sw_int = true; | ||
| 397 | r100_irq_set(rdev); | ||
| 398 | /* 1M ring buffer */ | ||
| 399 | r = r100_cp_init(rdev, 1024 * 1024); | ||
| 400 | if (r) { | ||
| 401 | dev_err(rdev->dev, "failled initializing CP (%d).\n", r); | ||
| 402 | return r; | ||
| 403 | } | ||
| 404 | r = r100_wb_init(rdev); | ||
| 405 | if (r) | ||
| 406 | dev_err(rdev->dev, "failled initializing WB (%d).\n", r); | ||
| 407 | r = r100_ib_init(rdev); | ||
| 408 | if (r) { | ||
| 409 | dev_err(rdev->dev, "failled initializing IB (%d).\n", r); | ||
| 410 | return r; | ||
| 411 | } | ||
| 412 | return 0; | ||
| 413 | } | ||
| 414 | |||
| 415 | int rs400_resume(struct radeon_device *rdev) | ||
| 416 | { | ||
| 417 | /* Make sur GART are not working */ | ||
| 418 | rs400_gart_disable(rdev); | ||
| 419 | /* Resume clock before doing reset */ | ||
| 420 | r300_clock_startup(rdev); | ||
| 421 | /* Reset gpu before posting otherwise ATOM will enter infinite loop */ | ||
| 422 | if (radeon_gpu_reset(rdev)) { | ||
| 423 | dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", | ||
| 424 | RREG32(R_000E40_RBBM_STATUS), | ||
| 425 | RREG32(R_0007C0_CP_STAT)); | ||
| 426 | } | ||
| 427 | /* post */ | ||
| 428 | radeon_combios_asic_init(rdev->ddev); | ||
| 429 | /* Resume clock after posting */ | ||
| 430 | r300_clock_startup(rdev); | ||
| 431 | return rs400_startup(rdev); | ||
| 432 | } | ||
| 433 | |||
| 434 | int rs400_suspend(struct radeon_device *rdev) | ||
| 435 | { | ||
| 436 | r100_cp_disable(rdev); | ||
| 437 | r100_wb_disable(rdev); | ||
| 438 | r100_irq_disable(rdev); | ||
| 439 | rs400_gart_disable(rdev); | ||
| 440 | return 0; | ||
| 441 | } | ||
| 442 | |||
| 443 | void rs400_fini(struct radeon_device *rdev) | ||
| 444 | { | ||
| 445 | rs400_suspend(rdev); | ||
| 446 | r100_cp_fini(rdev); | ||
| 447 | r100_wb_fini(rdev); | ||
| 448 | r100_ib_fini(rdev); | ||
| 449 | radeon_gem_fini(rdev); | ||
| 450 | rs400_gart_fini(rdev); | ||
| 451 | radeon_irq_kms_fini(rdev); | ||
| 452 | radeon_fence_driver_fini(rdev); | ||
| 453 | radeon_object_fini(rdev); | ||
| 454 | radeon_atombios_fini(rdev); | ||
| 455 | kfree(rdev->bios); | ||
| 456 | rdev->bios = NULL; | ||
| 457 | } | ||
| 458 | |||
| 459 | int rs400_init(struct radeon_device *rdev) | ||
| 460 | { | ||
| 461 | int r; | ||
| 462 | |||
| 463 | /* Disable VGA */ | ||
| 464 | r100_vga_render_disable(rdev); | ||
| 465 | /* Initialize scratch registers */ | ||
| 466 | radeon_scratch_init(rdev); | ||
| 467 | /* Initialize surface registers */ | ||
| 468 | radeon_surface_init(rdev); | ||
| 469 | /* TODO: disable VGA need to use VGA request */ | ||
| 470 | /* BIOS*/ | ||
| 471 | if (!radeon_get_bios(rdev)) { | ||
| 472 | if (ASIC_IS_AVIVO(rdev)) | ||
| 473 | return -EINVAL; | ||
| 474 | } | ||
| 475 | if (rdev->is_atom_bios) { | ||
| 476 | dev_err(rdev->dev, "Expecting combios for RS400/RS480 GPU\n"); | ||
| 477 | return -EINVAL; | ||
| 478 | } else { | ||
| 479 | r = radeon_combios_init(rdev); | ||
| 480 | if (r) | ||
| 481 | return r; | ||
| 482 | } | ||
| 483 | /* Reset gpu before posting otherwise ATOM will enter infinite loop */ | ||
| 484 | if (radeon_gpu_reset(rdev)) { | ||
| 485 | dev_warn(rdev->dev, | ||
| 486 | "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", | ||
| 487 | RREG32(R_000E40_RBBM_STATUS), | ||
| 488 | RREG32(R_0007C0_CP_STAT)); | ||
| 489 | } | ||
| 490 | /* check if cards are posted or not */ | ||
| 491 | if (!radeon_card_posted(rdev) && rdev->bios) { | ||
| 492 | DRM_INFO("GPU not posted. posting now...\n"); | ||
| 493 | radeon_combios_asic_init(rdev->ddev); | ||
| 494 | } | ||
| 495 | /* Initialize clocks */ | ||
| 496 | radeon_get_clock_info(rdev->ddev); | ||
| 497 | /* Get vram informations */ | ||
| 498 | rs400_vram_info(rdev); | ||
| 499 | /* Initialize memory controller (also test AGP) */ | ||
| 500 | r = rs400_mc_init(rdev); | ||
| 501 | if (r) | ||
| 502 | return r; | ||
| 503 | /* Fence driver */ | ||
| 504 | r = radeon_fence_driver_init(rdev); | ||
| 505 | if (r) | ||
| 506 | return r; | ||
| 507 | r = radeon_irq_kms_init(rdev); | ||
| 508 | if (r) | ||
| 509 | return r; | ||
| 510 | /* Memory manager */ | ||
| 511 | r = radeon_object_init(rdev); | ||
| 512 | if (r) | ||
| 513 | return r; | ||
| 514 | r = rs400_gart_init(rdev); | ||
| 515 | if (r) | ||
| 516 | return r; | ||
| 517 | r300_set_reg_safe(rdev); | ||
| 518 | rdev->accel_working = true; | ||
| 519 | r = rs400_startup(rdev); | ||
| 520 | if (r) { | ||
| 521 | /* Somethings want wront with the accel init stop accel */ | ||
| 522 | dev_err(rdev->dev, "Disabling GPU acceleration\n"); | ||
| 523 | rs400_suspend(rdev); | ||
| 524 | r100_cp_fini(rdev); | ||
| 525 | r100_wb_fini(rdev); | ||
| 526 | r100_ib_fini(rdev); | ||
| 527 | rs400_gart_fini(rdev); | ||
| 528 | radeon_irq_kms_fini(rdev); | ||
| 529 | rdev->accel_working = false; | ||
| 530 | } | ||
| 531 | return 0; | ||
| 532 | } | ||
diff --git a/drivers/gpu/drm/radeon/rs400d.h b/drivers/gpu/drm/radeon/rs400d.h new file mode 100644 index 000000000000..6d8bac58ced9 --- /dev/null +++ b/drivers/gpu/drm/radeon/rs400d.h | |||
| @@ -0,0 +1,160 @@ | |||
| 1 | /* | ||
| 2 | * Copyright 2008 Advanced Micro Devices, Inc. | ||
| 3 | * Copyright 2008 Red Hat Inc. | ||
| 4 | * Copyright 2009 Jerome Glisse. | ||
| 5 | * | ||
| 6 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
| 7 | * copy of this software and associated documentation files (the "Software"), | ||
| 8 | * to deal in the Software without restriction, including without limitation | ||
| 9 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
| 10 | * and/or sell copies of the Software, and to permit persons to whom the | ||
| 11 | * Software is furnished to do so, subject to the following conditions: | ||
| 12 | * | ||
| 13 | * The above copyright notice and this permission notice shall be included in | ||
| 14 | * all copies or substantial portions of the Software. | ||
| 15 | * | ||
| 16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
| 17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
| 18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
| 19 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
| 20 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
| 21 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
| 22 | * OTHER DEALINGS IN THE SOFTWARE. | ||
| 23 | * | ||
| 24 | * Authors: Dave Airlie | ||
| 25 | * Alex Deucher | ||
| 26 | * Jerome Glisse | ||
| 27 | */ | ||
| 28 | #ifndef __RS400D_H__ | ||
| 29 | #define __RS400D_H__ | ||
| 30 | |||
| 31 | /* Registers */ | ||
| 32 | #define R_000148_MC_FB_LOCATION 0x000148 | ||
| 33 | #define S_000148_MC_FB_START(x) (((x) & 0xFFFF) << 0) | ||
| 34 | #define G_000148_MC_FB_START(x) (((x) >> 0) & 0xFFFF) | ||
| 35 | #define C_000148_MC_FB_START 0xFFFF0000 | ||
| 36 | #define S_000148_MC_FB_TOP(x) (((x) & 0xFFFF) << 16) | ||
| 37 | #define G_000148_MC_FB_TOP(x) (((x) >> 16) & 0xFFFF) | ||
| 38 | #define C_000148_MC_FB_TOP 0x0000FFFF | ||
| 39 | #define R_00015C_NB_TOM 0x00015C | ||
| 40 | #define S_00015C_MC_FB_START(x) (((x) & 0xFFFF) << 0) | ||
| 41 | #define G_00015C_MC_FB_START(x) (((x) >> 0) & 0xFFFF) | ||
| 42 | #define C_00015C_MC_FB_START 0xFFFF0000 | ||
| 43 | #define S_00015C_MC_FB_TOP(x) (((x) & 0xFFFF) << 16) | ||
| 44 | #define G_00015C_MC_FB_TOP(x) (((x) >> 16) & 0xFFFF) | ||
| 45 | #define C_00015C_MC_FB_TOP 0x0000FFFF | ||
| 46 | #define R_0007C0_CP_STAT 0x0007C0 | ||
| 47 | #define S_0007C0_MRU_BUSY(x) (((x) & 0x1) << 0) | ||
| 48 | #define G_0007C0_MRU_BUSY(x) (((x) >> 0) & 0x1) | ||
| 49 | #define C_0007C0_MRU_BUSY 0xFFFFFFFE | ||
| 50 | #define S_0007C0_MWU_BUSY(x) (((x) & 0x1) << 1) | ||
| 51 | #define G_0007C0_MWU_BUSY(x) (((x) >> 1) & 0x1) | ||
| 52 | #define C_0007C0_MWU_BUSY 0xFFFFFFFD | ||
| 53 | #define S_0007C0_RSIU_BUSY(x) (((x) & 0x1) << 2) | ||
| 54 | #define G_0007C0_RSIU_BUSY(x) (((x) >> 2) & 0x1) | ||
| 55 | #define C_0007C0_RSIU_BUSY 0xFFFFFFFB | ||
| 56 | #define S_0007C0_RCIU_BUSY(x) (((x) & 0x1) << 3) | ||
| 57 | #define G_0007C0_RCIU_BUSY(x) (((x) >> 3) & 0x1) | ||
| 58 | #define C_0007C0_RCIU_BUSY 0xFFFFFFF7 | ||
| 59 | #define S_0007C0_CSF_PRIMARY_BUSY(x) (((x) & 0x1) << 9) | ||
| 60 | #define G_0007C0_CSF_PRIMARY_BUSY(x) (((x) >> 9) & 0x1) | ||
| 61 | #define C_0007C0_CSF_PRIMARY_BUSY 0xFFFFFDFF | ||
| 62 | #define S_0007C0_CSF_INDIRECT_BUSY(x) (((x) & 0x1) << 10) | ||
| 63 | #define G_0007C0_CSF_INDIRECT_BUSY(x) (((x) >> 10) & 0x1) | ||
| 64 | #define C_0007C0_CSF_INDIRECT_BUSY 0xFFFFFBFF | ||
| 65 | #define S_0007C0_CSQ_PRIMARY_BUSY(x) (((x) & 0x1) << 11) | ||
| 66 | #define G_0007C0_CSQ_PRIMARY_BUSY(x) (((x) >> 11) & 0x1) | ||
| 67 | #define C_0007C0_CSQ_PRIMARY_BUSY 0xFFFFF7FF | ||
| 68 | #define S_0007C0_CSQ_INDIRECT_BUSY(x) (((x) & 0x1) << 12) | ||
| 69 | #define G_0007C0_CSQ_INDIRECT_BUSY(x) (((x) >> 12) & 0x1) | ||
| 70 | #define C_0007C0_CSQ_INDIRECT_BUSY 0xFFFFEFFF | ||
| 71 | #define S_0007C0_CSI_BUSY(x) (((x) & 0x1) << 13) | ||
| 72 | #define G_0007C0_CSI_BUSY(x) (((x) >> 13) & 0x1) | ||
| 73 | #define C_0007C0_CSI_BUSY 0xFFFFDFFF | ||
| 74 | #define S_0007C0_CSF_INDIRECT2_BUSY(x) (((x) & 0x1) << 14) | ||
| 75 | #define G_0007C0_CSF_INDIRECT2_BUSY(x) (((x) >> 14) & 0x1) | ||
| 76 | #define C_0007C0_CSF_INDIRECT2_BUSY 0xFFFFBFFF | ||
| 77 | #define S_0007C0_CSQ_INDIRECT2_BUSY(x) (((x) & 0x1) << 15) | ||
| 78 | #define G_0007C0_CSQ_INDIRECT2_BUSY(x) (((x) >> 15) & 0x1) | ||
| 79 | #define C_0007C0_CSQ_INDIRECT2_BUSY 0xFFFF7FFF | ||
| 80 | #define S_0007C0_GUIDMA_BUSY(x) (((x) & 0x1) << 28) | ||
| 81 | #define G_0007C0_GUIDMA_BUSY(x) (((x) >> 28) & 0x1) | ||
| 82 | #define C_0007C0_GUIDMA_BUSY 0xEFFFFFFF | ||
| 83 | #define S_0007C0_VIDDMA_BUSY(x) (((x) & 0x1) << 29) | ||
| 84 | #define G_0007C0_VIDDMA_BUSY(x) (((x) >> 29) & 0x1) | ||
| 85 | #define C_0007C0_VIDDMA_BUSY 0xDFFFFFFF | ||
| 86 | #define S_0007C0_CMDSTRM_BUSY(x) (((x) & 0x1) << 30) | ||
| 87 | #define G_0007C0_CMDSTRM_BUSY(x) (((x) >> 30) & 0x1) | ||
| 88 | #define C_0007C0_CMDSTRM_BUSY 0xBFFFFFFF | ||
| 89 | #define S_0007C0_CP_BUSY(x) (((x) & 0x1) << 31) | ||
| 90 | #define G_0007C0_CP_BUSY(x) (((x) >> 31) & 0x1) | ||
| 91 | #define C_0007C0_CP_BUSY 0x7FFFFFFF | ||
| 92 | #define R_000E40_RBBM_STATUS 0x000E40 | ||
| 93 | #define S_000E40_CMDFIFO_AVAIL(x) (((x) & 0x7F) << 0) | ||
| 94 | #define G_000E40_CMDFIFO_AVAIL(x) (((x) >> 0) & 0x7F) | ||
| 95 | #define C_000E40_CMDFIFO_AVAIL 0xFFFFFF80 | ||
| 96 | #define S_000E40_HIRQ_ON_RBB(x) (((x) & 0x1) << 8) | ||
| 97 | #define G_000E40_HIRQ_ON_RBB(x) (((x) >> 8) & 0x1) | ||
| 98 | #define C_000E40_HIRQ_ON_RBB 0xFFFFFEFF | ||
| 99 | #define S_000E40_CPRQ_ON_RBB(x) (((x) & 0x1) << 9) | ||
| 100 | #define G_000E40_CPRQ_ON_RBB(x) (((x) >> 9) & 0x1) | ||
| 101 | #define C_000E40_CPRQ_ON_RBB 0xFFFFFDFF | ||
| 102 | #define S_000E40_CFRQ_ON_RBB(x) (((x) & 0x1) << 10) | ||
| 103 | #define G_000E40_CFRQ_ON_RBB(x) (((x) >> 10) & 0x1) | ||
| 104 | #define C_000E40_CFRQ_ON_RBB 0xFFFFFBFF | ||
| 105 | #define S_000E40_HIRQ_IN_RTBUF(x) (((x) & 0x1) << 11) | ||
| 106 | #define G_000E40_HIRQ_IN_RTBUF(x) (((x) >> 11) & 0x1) | ||
| 107 | #define C_000E40_HIRQ_IN_RTBUF 0xFFFFF7FF | ||
| 108 | #define S_000E40_CPRQ_IN_RTBUF(x) (((x) & 0x1) << 12) | ||
| 109 | #define G_000E40_CPRQ_IN_RTBUF(x) (((x) >> 12) & 0x1) | ||
| 110 | #define C_000E40_CPRQ_IN_RTBUF 0xFFFFEFFF | ||
| 111 | #define S_000E40_CFRQ_IN_RTBUF(x) (((x) & 0x1) << 13) | ||
| 112 | #define G_000E40_CFRQ_IN_RTBUF(x) (((x) >> 13) & 0x1) | ||
| 113 | #define C_000E40_CFRQ_IN_RTBUF 0xFFFFDFFF | ||
| 114 | #define S_000E40_CF_PIPE_BUSY(x) (((x) & 0x1) << 14) | ||
| 115 | #define G_000E40_CF_PIPE_BUSY(x) (((x) >> 14) & 0x1) | ||
| 116 | #define C_000E40_CF_PIPE_BUSY 0xFFFFBFFF | ||
| 117 | #define S_000E40_ENG_EV_BUSY(x) (((x) & 0x1) << 15) | ||
| 118 | #define G_000E40_ENG_EV_BUSY(x) (((x) >> 15) & 0x1) | ||
| 119 | #define C_000E40_ENG_EV_BUSY 0xFFFF7FFF | ||
| 120 | #define S_000E40_CP_CMDSTRM_BUSY(x) (((x) & 0x1) << 16) | ||
| 121 | #define G_000E40_CP_CMDSTRM_BUSY(x) (((x) >> 16) & 0x1) | ||
| 122 | #define C_000E40_CP_CMDSTRM_BUSY 0xFFFEFFFF | ||
| 123 | #define S_000E40_E2_BUSY(x) (((x) & 0x1) << 17) | ||
| 124 | #define G_000E40_E2_BUSY(x) (((x) >> 17) & 0x1) | ||
| 125 | #define C_000E40_E2_BUSY 0xFFFDFFFF | ||
| 126 | #define S_000E40_RB2D_BUSY(x) (((x) & 0x1) << 18) | ||
| 127 | #define G_000E40_RB2D_BUSY(x) (((x) >> 18) & 0x1) | ||
| 128 | #define C_000E40_RB2D_BUSY 0xFFFBFFFF | ||
| 129 | #define S_000E40_RB3D_BUSY(x) (((x) & 0x1) << 19) | ||
| 130 | #define G_000E40_RB3D_BUSY(x) (((x) >> 19) & 0x1) | ||
| 131 | #define C_000E40_RB3D_BUSY 0xFFF7FFFF | ||
| 132 | #define S_000E40_VAP_BUSY(x) (((x) & 0x1) << 20) | ||
| 133 | #define G_000E40_VAP_BUSY(x) (((x) >> 20) & 0x1) | ||
| 134 | #define C_000E40_VAP_BUSY 0xFFEFFFFF | ||
| 135 | #define S_000E40_RE_BUSY(x) (((x) & 0x1) << 21) | ||
| 136 | #define G_000E40_RE_BUSY(x) (((x) >> 21) & 0x1) | ||
| 137 | #define C_000E40_RE_BUSY 0xFFDFFFFF | ||
| 138 | #define S_000E40_TAM_BUSY(x) (((x) & 0x1) << 22) | ||
| 139 | #define G_000E40_TAM_BUSY(x) (((x) >> 22) & 0x1) | ||
| 140 | #define C_000E40_TAM_BUSY 0xFFBFFFFF | ||
| 141 | #define S_000E40_TDM_BUSY(x) (((x) & 0x1) << 23) | ||
| 142 | #define G_000E40_TDM_BUSY(x) (((x) >> 23) & 0x1) | ||
| 143 | #define C_000E40_TDM_BUSY 0xFF7FFFFF | ||
| 144 | #define S_000E40_PB_BUSY(x) (((x) & 0x1) << 24) | ||
| 145 | #define G_000E40_PB_BUSY(x) (((x) >> 24) & 0x1) | ||
| 146 | #define C_000E40_PB_BUSY 0xFEFFFFFF | ||
| 147 | #define S_000E40_TIM_BUSY(x) (((x) & 0x1) << 25) | ||
| 148 | #define G_000E40_TIM_BUSY(x) (((x) >> 25) & 0x1) | ||
| 149 | #define C_000E40_TIM_BUSY 0xFDFFFFFF | ||
| 150 | #define S_000E40_GA_BUSY(x) (((x) & 0x1) << 26) | ||
| 151 | #define G_000E40_GA_BUSY(x) (((x) >> 26) & 0x1) | ||
| 152 | #define C_000E40_GA_BUSY 0xFBFFFFFF | ||
| 153 | #define S_000E40_CBA2D_BUSY(x) (((x) & 0x1) << 27) | ||
| 154 | #define G_000E40_CBA2D_BUSY(x) (((x) >> 27) & 0x1) | ||
| 155 | #define C_000E40_CBA2D_BUSY 0xF7FFFFFF | ||
| 156 | #define S_000E40_GUI_ACTIVE(x) (((x) & 0x1) << 31) | ||
| 157 | #define G_000E40_GUI_ACTIVE(x) (((x) >> 31) & 0x1) | ||
| 158 | #define C_000E40_GUI_ACTIVE 0x7FFFFFFF | ||
| 159 | |||
| 160 | #endif | ||
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c index 4a4fe1cb131c..10dfa78762da 100644 --- a/drivers/gpu/drm/radeon/rs600.c +++ b/drivers/gpu/drm/radeon/rs600.c | |||
| @@ -25,27 +25,26 @@ | |||
| 25 | * Alex Deucher | 25 | * Alex Deucher |
| 26 | * Jerome Glisse | 26 | * Jerome Glisse |
| 27 | */ | 27 | */ |
| 28 | /* RS600 / Radeon X1250/X1270 integrated GPU | ||
| 29 | * | ||
| 30 | * This file gather function specific to RS600 which is the IGP of | ||
| 31 | * the X1250/X1270 family supporting intel CPU (while RS690/RS740 | ||
| 32 | * is the X1250/X1270 supporting AMD CPU). The display engine are | ||
| 33 | * the avivo one, bios is an atombios, 3D block are the one of the | ||
| 34 | * R4XX family. The GART is different from the RS400 one and is very | ||
| 35 | * close to the one of the R600 family (R600 likely being an evolution | ||
| 36 | * of the RS600 GART block). | ||
| 37 | */ | ||
| 28 | #include "drmP.h" | 38 | #include "drmP.h" |
| 29 | #include "radeon_reg.h" | ||
| 30 | #include "radeon.h" | 39 | #include "radeon.h" |
| 40 | #include "atom.h" | ||
| 41 | #include "rs600d.h" | ||
| 31 | 42 | ||
| 32 | #include "rs600_reg_safe.h" | 43 | #include "rs600_reg_safe.h" |
| 33 | 44 | ||
| 34 | /* rs600 depends on : */ | ||
| 35 | void r100_hdp_reset(struct radeon_device *rdev); | ||
| 36 | int r100_gui_wait_for_idle(struct radeon_device *rdev); | ||
| 37 | int r300_mc_wait_for_idle(struct radeon_device *rdev); | ||
| 38 | void r420_pipes_init(struct radeon_device *rdev); | ||
| 39 | |||
| 40 | /* This files gather functions specifics to : | ||
| 41 | * rs600 | ||
| 42 | * | ||
| 43 | * Some of these functions might be used by newer ASICs. | ||
| 44 | */ | ||
| 45 | void rs600_gpu_init(struct radeon_device *rdev); | 45 | void rs600_gpu_init(struct radeon_device *rdev); |
| 46 | int rs600_mc_wait_for_idle(struct radeon_device *rdev); | 46 | int rs600_mc_wait_for_idle(struct radeon_device *rdev); |
| 47 | 47 | ||
| 48 | |||
| 49 | /* | 48 | /* |
| 50 | * GART. | 49 | * GART. |
| 51 | */ | 50 | */ |
| @@ -53,18 +52,18 @@ void rs600_gart_tlb_flush(struct radeon_device *rdev) | |||
| 53 | { | 52 | { |
| 54 | uint32_t tmp; | 53 | uint32_t tmp; |
| 55 | 54 | ||
| 56 | tmp = RREG32_MC(RS600_MC_PT0_CNTL); | 55 | tmp = RREG32_MC(R_000100_MC_PT0_CNTL); |
| 57 | tmp &= ~(RS600_INVALIDATE_ALL_L1_TLBS | RS600_INVALIDATE_L2_CACHE); | 56 | tmp &= C_000100_INVALIDATE_ALL_L1_TLBS & C_000100_INVALIDATE_L2_CACHE; |
| 58 | WREG32_MC(RS600_MC_PT0_CNTL, tmp); | 57 | WREG32_MC(R_000100_MC_PT0_CNTL, tmp); |
| 59 | 58 | ||
| 60 | tmp = RREG32_MC(RS600_MC_PT0_CNTL); | 59 | tmp = RREG32_MC(R_000100_MC_PT0_CNTL); |
| 61 | tmp |= RS600_INVALIDATE_ALL_L1_TLBS | RS600_INVALIDATE_L2_CACHE; | 60 | tmp |= S_000100_INVALIDATE_ALL_L1_TLBS(1) & S_000100_INVALIDATE_L2_CACHE(1); |
| 62 | WREG32_MC(RS600_MC_PT0_CNTL, tmp); | 61 | WREG32_MC(R_000100_MC_PT0_CNTL, tmp); |
| 63 | 62 | ||
| 64 | tmp = RREG32_MC(RS600_MC_PT0_CNTL); | 63 | tmp = RREG32_MC(R_000100_MC_PT0_CNTL); |
| 65 | tmp &= ~(RS600_INVALIDATE_ALL_L1_TLBS | RS600_INVALIDATE_L2_CACHE); | 64 | tmp &= C_000100_INVALIDATE_ALL_L1_TLBS & C_000100_INVALIDATE_L2_CACHE; |
| 66 | WREG32_MC(RS600_MC_PT0_CNTL, tmp); | 65 | WREG32_MC(R_000100_MC_PT0_CNTL, tmp); |
| 67 | tmp = RREG32_MC(RS600_MC_PT0_CNTL); | 66 | tmp = RREG32_MC(R_000100_MC_PT0_CNTL); |
| 68 | } | 67 | } |
| 69 | 68 | ||
| 70 | int rs600_gart_init(struct radeon_device *rdev) | 69 | int rs600_gart_init(struct radeon_device *rdev) |
| @@ -86,7 +85,7 @@ int rs600_gart_init(struct radeon_device *rdev) | |||
| 86 | 85 | ||
| 87 | int rs600_gart_enable(struct radeon_device *rdev) | 86 | int rs600_gart_enable(struct radeon_device *rdev) |
| 88 | { | 87 | { |
| 89 | uint32_t tmp; | 88 | u32 tmp; |
| 90 | int r, i; | 89 | int r, i; |
| 91 | 90 | ||
| 92 | if (rdev->gart.table.vram.robj == NULL) { | 91 | if (rdev->gart.table.vram.robj == NULL) { |
| @@ -96,46 +95,50 @@ int rs600_gart_enable(struct radeon_device *rdev) | |||
| 96 | r = radeon_gart_table_vram_pin(rdev); | 95 | r = radeon_gart_table_vram_pin(rdev); |
| 97 | if (r) | 96 | if (r) |
| 98 | return r; | 97 | return r; |
| 98 | /* Enable bus master */ | ||
| 99 | tmp = RREG32(R_00004C_BUS_CNTL) & C_00004C_BUS_MASTER_DIS; | ||
| 100 | WREG32(R_00004C_BUS_CNTL, tmp); | ||
| 99 | /* FIXME: setup default page */ | 101 | /* FIXME: setup default page */ |
| 100 | WREG32_MC(RS600_MC_PT0_CNTL, | 102 | WREG32_MC(R_000100_MC_PT0_CNTL, |
| 101 | (RS600_EFFECTIVE_L2_CACHE_SIZE(6) | | 103 | (S_000100_EFFECTIVE_L2_CACHE_SIZE(6) | |
| 102 | RS600_EFFECTIVE_L2_QUEUE_SIZE(6))); | 104 | S_000100_EFFECTIVE_L2_QUEUE_SIZE(6))); |
| 103 | for (i = 0; i < 19; i++) { | 105 | for (i = 0; i < 19; i++) { |
| 104 | WREG32_MC(RS600_MC_PT0_CLIENT0_CNTL + i, | 106 | WREG32_MC(R_00016C_MC_PT0_CLIENT0_CNTL + i, |
| 105 | (RS600_ENABLE_TRANSLATION_MODE_OVERRIDE | | 107 | S_00016C_ENABLE_TRANSLATION_MODE_OVERRIDE(1) | |
| 106 | RS600_SYSTEM_ACCESS_MODE_IN_SYS | | 108 | S_00016C_SYSTEM_ACCESS_MODE_MASK( |
| 107 | RS600_SYSTEM_APERTURE_UNMAPPED_ACCESS_DEFAULT_PAGE | | 109 | V_00016C_SYSTEM_ACCESS_MODE_IN_SYS) | |
| 108 | RS600_EFFECTIVE_L1_CACHE_SIZE(3) | | 110 | S_00016C_SYSTEM_APERTURE_UNMAPPED_ACCESS( |
| 109 | RS600_ENABLE_FRAGMENT_PROCESSING | | 111 | V_00016C_SYSTEM_APERTURE_UNMAPPED_DEFAULT_PAGE) | |
| 110 | RS600_EFFECTIVE_L1_QUEUE_SIZE(3))); | 112 | S_00016C_EFFECTIVE_L1_CACHE_SIZE(1) | |
| 113 | S_00016C_ENABLE_FRAGMENT_PROCESSING(1) | | ||
| 114 | S_00016C_EFFECTIVE_L1_QUEUE_SIZE(1)); | ||
| 111 | } | 115 | } |
| 112 | 116 | ||
| 113 | /* System context map to GART space */ | 117 | /* System context map to GART space */ |
| 114 | WREG32_MC(RS600_MC_PT0_SYSTEM_APERTURE_LOW_ADDR, rdev->mc.gtt_location); | 118 | WREG32_MC(R_000112_MC_PT0_SYSTEM_APERTURE_LOW_ADDR, rdev->mc.gtt_start); |
| 115 | tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - 1; | 119 | WREG32_MC(R_000114_MC_PT0_SYSTEM_APERTURE_HIGH_ADDR, rdev->mc.gtt_end); |
| 116 | WREG32_MC(RS600_MC_PT0_SYSTEM_APERTURE_HIGH_ADDR, tmp); | ||
| 117 | 120 | ||
| 118 | /* enable first context */ | 121 | /* enable first context */ |
| 119 | WREG32_MC(RS600_MC_PT0_CONTEXT0_FLAT_START_ADDR, rdev->mc.gtt_location); | 122 | WREG32_MC(R_00013C_MC_PT0_CONTEXT0_FLAT_START_ADDR, rdev->mc.gtt_start); |
| 120 | tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - 1; | 123 | WREG32_MC(R_00014C_MC_PT0_CONTEXT0_FLAT_END_ADDR, rdev->mc.gtt_end); |
| 121 | WREG32_MC(RS600_MC_PT0_CONTEXT0_FLAT_END_ADDR, tmp); | 124 | WREG32_MC(R_000102_MC_PT0_CONTEXT0_CNTL, |
| 122 | WREG32_MC(RS600_MC_PT0_CONTEXT0_CNTL, | 125 | S_000102_ENABLE_PAGE_TABLE(1) | |
| 123 | (RS600_ENABLE_PAGE_TABLE | RS600_PAGE_TABLE_TYPE_FLAT)); | 126 | S_000102_PAGE_TABLE_DEPTH(V_000102_PAGE_TABLE_FLAT)); |
| 124 | /* disable all other contexts */ | 127 | /* disable all other contexts */ |
| 125 | for (i = 1; i < 8; i++) { | 128 | for (i = 1; i < 8; i++) { |
| 126 | WREG32_MC(RS600_MC_PT0_CONTEXT0_CNTL + i, 0); | 129 | WREG32_MC(R_000102_MC_PT0_CONTEXT0_CNTL + i, 0); |
| 127 | } | 130 | } |
| 128 | 131 | ||
| 129 | /* setup the page table */ | 132 | /* setup the page table */ |
| 130 | WREG32_MC(RS600_MC_PT0_CONTEXT0_FLAT_BASE_ADDR, | 133 | WREG32_MC(R_00012C_MC_PT0_CONTEXT0_FLAT_BASE_ADDR, |
| 131 | rdev->gart.table_addr); | 134 | rdev->gart.table_addr); |
| 132 | WREG32_MC(RS600_MC_PT0_CONTEXT0_DEFAULT_READ_ADDR, 0); | 135 | WREG32_MC(R_00011C_MC_PT0_CONTEXT0_DEFAULT_READ_ADDR, 0); |
| 133 | 136 | ||
| 134 | /* enable page tables */ | 137 | /* enable page tables */ |
| 135 | tmp = RREG32_MC(RS600_MC_PT0_CNTL); | 138 | tmp = RREG32_MC(R_000100_MC_PT0_CNTL); |
| 136 | WREG32_MC(RS600_MC_PT0_CNTL, (tmp | RS600_ENABLE_PT)); | 139 | WREG32_MC(R_000100_MC_PT0_CNTL, (tmp | S_000100_ENABLE_PT(1))); |
| 137 | tmp = RREG32_MC(RS600_MC_CNTL1); | 140 | tmp = RREG32_MC(R_000009_MC_CNTL1); |
| 138 | WREG32_MC(RS600_MC_CNTL1, (tmp | RS600_ENABLE_PAGE_TABLES)); | 141 | WREG32_MC(R_000009_MC_CNTL1, (tmp | S_000009_ENABLE_PAGE_TABLES(1))); |
| 139 | rs600_gart_tlb_flush(rdev); | 142 | rs600_gart_tlb_flush(rdev); |
| 140 | rdev->gart.ready = true; | 143 | rdev->gart.ready = true; |
| 141 | return 0; | 144 | return 0; |
| @@ -146,10 +149,9 @@ void rs600_gart_disable(struct radeon_device *rdev) | |||
| 146 | uint32_t tmp; | 149 | uint32_t tmp; |
| 147 | 150 | ||
| 148 | /* FIXME: disable out of gart access */ | 151 | /* FIXME: disable out of gart access */ |
| 149 | WREG32_MC(RS600_MC_PT0_CNTL, 0); | 152 | WREG32_MC(R_000100_MC_PT0_CNTL, 0); |
| 150 | tmp = RREG32_MC(RS600_MC_CNTL1); | 153 | tmp = RREG32_MC(R_000009_MC_CNTL1); |
| 151 | tmp &= ~RS600_ENABLE_PAGE_TABLES; | 154 | WREG32_MC(R_000009_MC_CNTL1, tmp & C_000009_ENABLE_PAGE_TABLES); |
| 152 | WREG32_MC(RS600_MC_CNTL1, tmp); | ||
| 153 | if (rdev->gart.table.vram.robj) { | 155 | if (rdev->gart.table.vram.robj) { |
| 154 | radeon_object_kunmap(rdev->gart.table.vram.robj); | 156 | radeon_object_kunmap(rdev->gart.table.vram.robj); |
| 155 | radeon_object_unpin(rdev->gart.table.vram.robj); | 157 | radeon_object_unpin(rdev->gart.table.vram.robj); |
| @@ -183,129 +185,61 @@ int rs600_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr) | |||
| 183 | return 0; | 185 | return 0; |
| 184 | } | 186 | } |
| 185 | 187 | ||
| 186 | |||
| 187 | /* | ||
| 188 | * MC. | ||
| 189 | */ | ||
| 190 | void rs600_mc_disable_clients(struct radeon_device *rdev) | ||
| 191 | { | ||
| 192 | unsigned tmp; | ||
| 193 | |||
| 194 | if (r100_gui_wait_for_idle(rdev)) { | ||
| 195 | printk(KERN_WARNING "Failed to wait GUI idle while " | ||
| 196 | "programming pipes. Bad things might happen.\n"); | ||
| 197 | } | ||
| 198 | |||
| 199 | rv515_vga_render_disable(rdev); | ||
| 200 | |||
| 201 | tmp = RREG32(AVIVO_D1VGA_CONTROL); | ||
| 202 | WREG32(AVIVO_D1VGA_CONTROL, tmp & ~AVIVO_DVGA_CONTROL_MODE_ENABLE); | ||
| 203 | tmp = RREG32(AVIVO_D2VGA_CONTROL); | ||
| 204 | WREG32(AVIVO_D2VGA_CONTROL, tmp & ~AVIVO_DVGA_CONTROL_MODE_ENABLE); | ||
| 205 | |||
| 206 | tmp = RREG32(AVIVO_D1CRTC_CONTROL); | ||
| 207 | WREG32(AVIVO_D1CRTC_CONTROL, tmp & ~AVIVO_CRTC_EN); | ||
| 208 | tmp = RREG32(AVIVO_D2CRTC_CONTROL); | ||
| 209 | WREG32(AVIVO_D2CRTC_CONTROL, tmp & ~AVIVO_CRTC_EN); | ||
| 210 | |||
| 211 | /* make sure all previous write got through */ | ||
| 212 | tmp = RREG32(AVIVO_D2CRTC_CONTROL); | ||
| 213 | |||
| 214 | mdelay(1); | ||
| 215 | } | ||
| 216 | |||
| 217 | int rs600_mc_init(struct radeon_device *rdev) | ||
| 218 | { | ||
| 219 | uint32_t tmp; | ||
| 220 | int r; | ||
| 221 | |||
| 222 | if (r100_debugfs_rbbm_init(rdev)) { | ||
| 223 | DRM_ERROR("Failed to register debugfs file for RBBM !\n"); | ||
| 224 | } | ||
| 225 | |||
| 226 | rs600_gpu_init(rdev); | ||
| 227 | rs600_gart_disable(rdev); | ||
| 228 | |||
| 229 | /* Setup GPU memory space */ | ||
| 230 | rdev->mc.vram_location = 0xFFFFFFFFUL; | ||
| 231 | rdev->mc.gtt_location = 0xFFFFFFFFUL; | ||
| 232 | r = radeon_mc_setup(rdev); | ||
| 233 | if (r) { | ||
| 234 | return r; | ||
| 235 | } | ||
| 236 | |||
| 237 | /* Program GPU memory space */ | ||
| 238 | /* Enable bus master */ | ||
| 239 | tmp = RREG32(RADEON_BUS_CNTL) & ~RS600_BUS_MASTER_DIS; | ||
| 240 | WREG32(RADEON_BUS_CNTL, tmp); | ||
| 241 | /* FIXME: What does AGP means for such chipset ? */ | ||
| 242 | WREG32_MC(RS600_MC_AGP_LOCATION, 0x0FFFFFFF); | ||
| 243 | /* FIXME: are this AGP reg in indirect MC range ? */ | ||
| 244 | WREG32_MC(RS600_MC_AGP_BASE, 0); | ||
| 245 | WREG32_MC(RS600_MC_AGP_BASE_2, 0); | ||
| 246 | rs600_mc_disable_clients(rdev); | ||
| 247 | if (rs600_mc_wait_for_idle(rdev)) { | ||
| 248 | printk(KERN_WARNING "Failed to wait MC idle while " | ||
| 249 | "programming pipes. Bad things might happen.\n"); | ||
| 250 | } | ||
| 251 | tmp = rdev->mc.vram_location + rdev->mc.mc_vram_size - 1; | ||
| 252 | tmp = REG_SET(RS600_MC_FB_TOP, tmp >> 16); | ||
| 253 | tmp |= REG_SET(RS600_MC_FB_START, rdev->mc.vram_location >> 16); | ||
| 254 | WREG32_MC(RS600_MC_FB_LOCATION, tmp); | ||
| 255 | WREG32(RS690_HDP_FB_LOCATION, rdev->mc.vram_location >> 16); | ||
| 256 | return 0; | ||
| 257 | } | ||
| 258 | |||
| 259 | void rs600_mc_fini(struct radeon_device *rdev) | ||
| 260 | { | ||
| 261 | } | ||
| 262 | |||
| 263 | |||
| 264 | /* | ||
| 265 | * Interrupts | ||
| 266 | */ | ||
| 267 | int rs600_irq_set(struct radeon_device *rdev) | 188 | int rs600_irq_set(struct radeon_device *rdev) |
| 268 | { | 189 | { |
| 269 | uint32_t tmp = 0; | 190 | uint32_t tmp = 0; |
| 270 | uint32_t mode_int = 0; | 191 | uint32_t mode_int = 0; |
| 271 | 192 | ||
| 272 | if (rdev->irq.sw_int) { | 193 | if (rdev->irq.sw_int) { |
| 273 | tmp |= RADEON_SW_INT_ENABLE; | 194 | tmp |= S_000040_SW_INT_EN(1); |
| 274 | } | 195 | } |
| 275 | if (rdev->irq.crtc_vblank_int[0]) { | 196 | if (rdev->irq.crtc_vblank_int[0]) { |
| 276 | mode_int |= AVIVO_D1MODE_INT_MASK; | 197 | mode_int |= S_006540_D1MODE_VBLANK_INT_MASK(1); |
| 277 | } | 198 | } |
| 278 | if (rdev->irq.crtc_vblank_int[1]) { | 199 | if (rdev->irq.crtc_vblank_int[1]) { |
| 279 | mode_int |= AVIVO_D2MODE_INT_MASK; | 200 | mode_int |= S_006540_D2MODE_VBLANK_INT_MASK(1); |
| 280 | } | 201 | } |
| 281 | WREG32(RADEON_GEN_INT_CNTL, tmp); | 202 | WREG32(R_000040_GEN_INT_CNTL, tmp); |
| 282 | WREG32(AVIVO_DxMODE_INT_MASK, mode_int); | 203 | WREG32(R_006540_DxMODE_INT_MASK, mode_int); |
| 283 | return 0; | 204 | return 0; |
| 284 | } | 205 | } |
| 285 | 206 | ||
| 286 | static inline uint32_t rs600_irq_ack(struct radeon_device *rdev, u32 *r500_disp_int) | 207 | static inline uint32_t rs600_irq_ack(struct radeon_device *rdev, u32 *r500_disp_int) |
| 287 | { | 208 | { |
| 288 | uint32_t irqs = RREG32(RADEON_GEN_INT_STATUS); | 209 | uint32_t irqs = RREG32(R_000044_GEN_INT_STATUS); |
| 289 | uint32_t irq_mask = RADEON_SW_INT_TEST; | 210 | uint32_t irq_mask = ~C_000044_SW_INT; |
| 290 | 211 | ||
| 291 | if (irqs & AVIVO_DISPLAY_INT_STATUS) { | 212 | if (G_000044_DISPLAY_INT_STAT(irqs)) { |
| 292 | *r500_disp_int = RREG32(AVIVO_DISP_INTERRUPT_STATUS); | 213 | *r500_disp_int = RREG32(R_007EDC_DISP_INTERRUPT_STATUS); |
| 293 | if (*r500_disp_int & AVIVO_D1_VBLANK_INTERRUPT) { | 214 | if (G_007EDC_LB_D1_VBLANK_INTERRUPT(*r500_disp_int)) { |
| 294 | WREG32(AVIVO_D1MODE_VBLANK_STATUS, AVIVO_VBLANK_ACK); | 215 | WREG32(R_006534_D1MODE_VBLANK_STATUS, |
| 216 | S_006534_D1MODE_VBLANK_ACK(1)); | ||
| 295 | } | 217 | } |
| 296 | if (*r500_disp_int & AVIVO_D2_VBLANK_INTERRUPT) { | 218 | if (G_007EDC_LB_D2_VBLANK_INTERRUPT(*r500_disp_int)) { |
| 297 | WREG32(AVIVO_D2MODE_VBLANK_STATUS, AVIVO_VBLANK_ACK); | 219 | WREG32(R_006D34_D2MODE_VBLANK_STATUS, |
| 220 | S_006D34_D2MODE_VBLANK_ACK(1)); | ||
| 298 | } | 221 | } |
| 299 | } else { | 222 | } else { |
| 300 | *r500_disp_int = 0; | 223 | *r500_disp_int = 0; |
| 301 | } | 224 | } |
| 302 | 225 | ||
| 303 | if (irqs) { | 226 | if (irqs) { |
| 304 | WREG32(RADEON_GEN_INT_STATUS, irqs); | 227 | WREG32(R_000044_GEN_INT_STATUS, irqs); |
| 305 | } | 228 | } |
| 306 | return irqs & irq_mask; | 229 | return irqs & irq_mask; |
| 307 | } | 230 | } |
| 308 | 231 | ||
| 232 | void rs600_irq_disable(struct radeon_device *rdev) | ||
| 233 | { | ||
| 234 | u32 tmp; | ||
| 235 | |||
| 236 | WREG32(R_000040_GEN_INT_CNTL, 0); | ||
| 237 | WREG32(R_006540_DxMODE_INT_MASK, 0); | ||
| 238 | /* Wait and acknowledge irq */ | ||
| 239 | mdelay(1); | ||
| 240 | rs600_irq_ack(rdev, &tmp); | ||
| 241 | } | ||
| 242 | |||
| 309 | int rs600_irq_process(struct radeon_device *rdev) | 243 | int rs600_irq_process(struct radeon_device *rdev) |
| 310 | { | 244 | { |
| 311 | uint32_t status; | 245 | uint32_t status; |
| @@ -317,16 +251,13 @@ int rs600_irq_process(struct radeon_device *rdev) | |||
| 317 | } | 251 | } |
| 318 | while (status || r500_disp_int) { | 252 | while (status || r500_disp_int) { |
| 319 | /* SW interrupt */ | 253 | /* SW interrupt */ |
| 320 | if (status & RADEON_SW_INT_TEST) { | 254 | if (G_000040_SW_INT_EN(status)) |
| 321 | radeon_fence_process(rdev); | 255 | radeon_fence_process(rdev); |
| 322 | } | ||
| 323 | /* Vertical blank interrupts */ | 256 | /* Vertical blank interrupts */ |
| 324 | if (r500_disp_int & AVIVO_D1_VBLANK_INTERRUPT) { | 257 | if (G_007EDC_LB_D1_VBLANK_INTERRUPT(r500_disp_int)) |
| 325 | drm_handle_vblank(rdev->ddev, 0); | 258 | drm_handle_vblank(rdev->ddev, 0); |
| 326 | } | 259 | if (G_007EDC_LB_D2_VBLANK_INTERRUPT(r500_disp_int)) |
| 327 | if (r500_disp_int & AVIVO_D2_VBLANK_INTERRUPT) { | ||
| 328 | drm_handle_vblank(rdev->ddev, 1); | 260 | drm_handle_vblank(rdev->ddev, 1); |
| 329 | } | ||
| 330 | status = rs600_irq_ack(rdev, &r500_disp_int); | 261 | status = rs600_irq_ack(rdev, &r500_disp_int); |
| 331 | } | 262 | } |
| 332 | return IRQ_HANDLED; | 263 | return IRQ_HANDLED; |
| @@ -335,53 +266,34 @@ int rs600_irq_process(struct radeon_device *rdev) | |||
| 335 | u32 rs600_get_vblank_counter(struct radeon_device *rdev, int crtc) | 266 | u32 rs600_get_vblank_counter(struct radeon_device *rdev, int crtc) |
| 336 | { | 267 | { |
| 337 | if (crtc == 0) | 268 | if (crtc == 0) |
| 338 | return RREG32(AVIVO_D1CRTC_FRAME_COUNT); | 269 | return RREG32(R_0060A4_D1CRTC_STATUS_FRAME_COUNT); |
| 339 | else | 270 | else |
| 340 | return RREG32(AVIVO_D2CRTC_FRAME_COUNT); | 271 | return RREG32(R_0068A4_D2CRTC_STATUS_FRAME_COUNT); |
| 341 | } | 272 | } |
| 342 | 273 | ||
| 343 | |||
| 344 | /* | ||
| 345 | * Global GPU functions | ||
| 346 | */ | ||
| 347 | int rs600_mc_wait_for_idle(struct radeon_device *rdev) | 274 | int rs600_mc_wait_for_idle(struct radeon_device *rdev) |
| 348 | { | 275 | { |
| 349 | unsigned i; | 276 | unsigned i; |
| 350 | uint32_t tmp; | ||
| 351 | 277 | ||
| 352 | for (i = 0; i < rdev->usec_timeout; i++) { | 278 | for (i = 0; i < rdev->usec_timeout; i++) { |
| 353 | /* read MC_STATUS */ | 279 | if (G_000000_MC_IDLE(RREG32_MC(R_000000_MC_STATUS))) |
| 354 | tmp = RREG32_MC(RS600_MC_STATUS); | ||
| 355 | if (tmp & RS600_MC_STATUS_IDLE) { | ||
| 356 | return 0; | 280 | return 0; |
| 357 | } | 281 | udelay(1); |
| 358 | DRM_UDELAY(1); | ||
| 359 | } | 282 | } |
| 360 | return -1; | 283 | return -1; |
| 361 | } | 284 | } |
| 362 | 285 | ||
| 363 | void rs600_errata(struct radeon_device *rdev) | ||
| 364 | { | ||
| 365 | rdev->pll_errata = 0; | ||
| 366 | } | ||
| 367 | |||
| 368 | void rs600_gpu_init(struct radeon_device *rdev) | 286 | void rs600_gpu_init(struct radeon_device *rdev) |
| 369 | { | 287 | { |
| 370 | /* FIXME: HDP same place on rs600 ? */ | 288 | /* FIXME: HDP same place on rs600 ? */ |
| 371 | r100_hdp_reset(rdev); | 289 | r100_hdp_reset(rdev); |
| 372 | rv515_vga_render_disable(rdev); | ||
| 373 | /* FIXME: is this correct ? */ | 290 | /* FIXME: is this correct ? */ |
| 374 | r420_pipes_init(rdev); | 291 | r420_pipes_init(rdev); |
| 375 | if (rs600_mc_wait_for_idle(rdev)) { | 292 | /* Wait for mc idle */ |
| 376 | printk(KERN_WARNING "Failed to wait MC idle while " | 293 | if (rs600_mc_wait_for_idle(rdev)) |
| 377 | "programming pipes. Bad things might happen.\n"); | 294 | dev_warn(rdev->dev, "Wait MC idle timeout before updating MC.\n"); |
| 378 | } | ||
| 379 | } | 295 | } |
| 380 | 296 | ||
| 381 | |||
| 382 | /* | ||
| 383 | * VRAM info. | ||
| 384 | */ | ||
| 385 | void rs600_vram_info(struct radeon_device *rdev) | 297 | void rs600_vram_info(struct radeon_device *rdev) |
| 386 | { | 298 | { |
| 387 | /* FIXME: to do or is these values sane ? */ | 299 | /* FIXME: to do or is these values sane ? */ |
| @@ -394,31 +306,206 @@ void rs600_bandwidth_update(struct radeon_device *rdev) | |||
| 394 | /* FIXME: implement, should this be like rs690 ? */ | 306 | /* FIXME: implement, should this be like rs690 ? */ |
| 395 | } | 307 | } |
| 396 | 308 | ||
| 397 | |||
| 398 | /* | ||
| 399 | * Indirect registers accessor | ||
| 400 | */ | ||
| 401 | uint32_t rs600_mc_rreg(struct radeon_device *rdev, uint32_t reg) | 309 | uint32_t rs600_mc_rreg(struct radeon_device *rdev, uint32_t reg) |
| 402 | { | 310 | { |
| 403 | uint32_t r; | 311 | WREG32(R_000070_MC_IND_INDEX, S_000070_MC_IND_ADDR(reg) | |
| 404 | 312 | S_000070_MC_IND_CITF_ARB0(1)); | |
| 405 | WREG32(RS600_MC_INDEX, | 313 | return RREG32(R_000074_MC_IND_DATA); |
| 406 | ((reg & RS600_MC_ADDR_MASK) | RS600_MC_IND_CITF_ARB0)); | ||
| 407 | r = RREG32(RS600_MC_DATA); | ||
| 408 | return r; | ||
| 409 | } | 314 | } |
| 410 | 315 | ||
| 411 | void rs600_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) | 316 | void rs600_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) |
| 412 | { | 317 | { |
| 413 | WREG32(RS600_MC_INDEX, | 318 | WREG32(R_000070_MC_IND_INDEX, S_000070_MC_IND_ADDR(reg) | |
| 414 | RS600_MC_IND_WR_EN | RS600_MC_IND_CITF_ARB0 | | 319 | S_000070_MC_IND_CITF_ARB0(1) | S_000070_MC_IND_WR_EN(1)); |
| 415 | ((reg) & RS600_MC_ADDR_MASK)); | 320 | WREG32(R_000074_MC_IND_DATA, v); |
| 416 | WREG32(RS600_MC_DATA, v); | ||
| 417 | } | 321 | } |
| 418 | 322 | ||
| 419 | int rs600_init(struct radeon_device *rdev) | 323 | void rs600_debugfs(struct radeon_device *rdev) |
| 324 | { | ||
| 325 | if (r100_debugfs_rbbm_init(rdev)) | ||
| 326 | DRM_ERROR("Failed to register debugfs file for RBBM !\n"); | ||
| 327 | } | ||
| 328 | |||
| 329 | void rs600_set_safe_registers(struct radeon_device *rdev) | ||
| 420 | { | 330 | { |
| 421 | rdev->config.r300.reg_safe_bm = rs600_reg_safe_bm; | 331 | rdev->config.r300.reg_safe_bm = rs600_reg_safe_bm; |
| 422 | rdev->config.r300.reg_safe_bm_size = ARRAY_SIZE(rs600_reg_safe_bm); | 332 | rdev->config.r300.reg_safe_bm_size = ARRAY_SIZE(rs600_reg_safe_bm); |
| 333 | } | ||
| 334 | |||
| 335 | static void rs600_mc_program(struct radeon_device *rdev) | ||
| 336 | { | ||
| 337 | struct rv515_mc_save save; | ||
| 338 | |||
| 339 | /* Stops all mc clients */ | ||
| 340 | rv515_mc_stop(rdev, &save); | ||
| 341 | |||
| 342 | /* Wait for mc idle */ | ||
| 343 | if (rs600_mc_wait_for_idle(rdev)) | ||
| 344 | dev_warn(rdev->dev, "Wait MC idle timeout before updating MC.\n"); | ||
| 345 | |||
| 346 | /* FIXME: What does AGP means for such chipset ? */ | ||
| 347 | WREG32_MC(R_000005_MC_AGP_LOCATION, 0x0FFFFFFF); | ||
| 348 | WREG32_MC(R_000006_AGP_BASE, 0); | ||
| 349 | WREG32_MC(R_000007_AGP_BASE_2, 0); | ||
| 350 | /* Program MC */ | ||
| 351 | WREG32_MC(R_000004_MC_FB_LOCATION, | ||
| 352 | S_000004_MC_FB_START(rdev->mc.vram_start >> 16) | | ||
| 353 | S_000004_MC_FB_TOP(rdev->mc.vram_end >> 16)); | ||
| 354 | WREG32(R_000134_HDP_FB_LOCATION, | ||
| 355 | S_000134_HDP_FB_START(rdev->mc.vram_start >> 16)); | ||
| 356 | |||
| 357 | rv515_mc_resume(rdev, &save); | ||
| 358 | } | ||
| 359 | |||
| 360 | static int rs600_startup(struct radeon_device *rdev) | ||
| 361 | { | ||
| 362 | int r; | ||
| 363 | |||
| 364 | rs600_mc_program(rdev); | ||
| 365 | /* Resume clock */ | ||
| 366 | rv515_clock_startup(rdev); | ||
| 367 | /* Initialize GPU configuration (# pipes, ...) */ | ||
| 368 | rs600_gpu_init(rdev); | ||
| 369 | /* Initialize GART (initialize after TTM so we can allocate | ||
| 370 | * memory through TTM but finalize after TTM) */ | ||
| 371 | r = rs600_gart_enable(rdev); | ||
| 372 | if (r) | ||
| 373 | return r; | ||
| 374 | /* Enable IRQ */ | ||
| 375 | rdev->irq.sw_int = true; | ||
| 376 | rs600_irq_set(rdev); | ||
| 377 | /* 1M ring buffer */ | ||
| 378 | r = r100_cp_init(rdev, 1024 * 1024); | ||
| 379 | if (r) { | ||
| 380 | dev_err(rdev->dev, "failled initializing CP (%d).\n", r); | ||
| 381 | return r; | ||
| 382 | } | ||
| 383 | r = r100_wb_init(rdev); | ||
| 384 | if (r) | ||
| 385 | dev_err(rdev->dev, "failled initializing WB (%d).\n", r); | ||
| 386 | r = r100_ib_init(rdev); | ||
| 387 | if (r) { | ||
| 388 | dev_err(rdev->dev, "failled initializing IB (%d).\n", r); | ||
| 389 | return r; | ||
| 390 | } | ||
| 391 | return 0; | ||
| 392 | } | ||
| 393 | |||
| 394 | int rs600_resume(struct radeon_device *rdev) | ||
| 395 | { | ||
| 396 | /* Make sur GART are not working */ | ||
| 397 | rs600_gart_disable(rdev); | ||
| 398 | /* Resume clock before doing reset */ | ||
| 399 | rv515_clock_startup(rdev); | ||
| 400 | /* Reset gpu before posting otherwise ATOM will enter infinite loop */ | ||
| 401 | if (radeon_gpu_reset(rdev)) { | ||
| 402 | dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", | ||
| 403 | RREG32(R_000E40_RBBM_STATUS), | ||
| 404 | RREG32(R_0007C0_CP_STAT)); | ||
| 405 | } | ||
| 406 | /* post */ | ||
| 407 | atom_asic_init(rdev->mode_info.atom_context); | ||
| 408 | /* Resume clock after posting */ | ||
| 409 | rv515_clock_startup(rdev); | ||
| 410 | return rs600_startup(rdev); | ||
| 411 | } | ||
| 412 | |||
| 413 | int rs600_suspend(struct radeon_device *rdev) | ||
| 414 | { | ||
| 415 | r100_cp_disable(rdev); | ||
| 416 | r100_wb_disable(rdev); | ||
| 417 | rs600_irq_disable(rdev); | ||
| 418 | rs600_gart_disable(rdev); | ||
| 419 | return 0; | ||
| 420 | } | ||
| 421 | |||
| 422 | void rs600_fini(struct radeon_device *rdev) | ||
| 423 | { | ||
| 424 | rs600_suspend(rdev); | ||
| 425 | r100_cp_fini(rdev); | ||
| 426 | r100_wb_fini(rdev); | ||
| 427 | r100_ib_fini(rdev); | ||
| 428 | radeon_gem_fini(rdev); | ||
| 429 | rs600_gart_fini(rdev); | ||
| 430 | radeon_irq_kms_fini(rdev); | ||
| 431 | radeon_fence_driver_fini(rdev); | ||
| 432 | radeon_object_fini(rdev); | ||
| 433 | radeon_atombios_fini(rdev); | ||
| 434 | kfree(rdev->bios); | ||
| 435 | rdev->bios = NULL; | ||
| 436 | } | ||
| 437 | |||
| 438 | int rs600_init(struct radeon_device *rdev) | ||
| 439 | { | ||
| 440 | int r; | ||
| 441 | |||
| 442 | /* Disable VGA */ | ||
| 443 | rv515_vga_render_disable(rdev); | ||
| 444 | /* Initialize scratch registers */ | ||
| 445 | radeon_scratch_init(rdev); | ||
| 446 | /* Initialize surface registers */ | ||
| 447 | radeon_surface_init(rdev); | ||
| 448 | /* BIOS */ | ||
| 449 | if (!radeon_get_bios(rdev)) { | ||
| 450 | if (ASIC_IS_AVIVO(rdev)) | ||
| 451 | return -EINVAL; | ||
| 452 | } | ||
| 453 | if (rdev->is_atom_bios) { | ||
| 454 | r = radeon_atombios_init(rdev); | ||
| 455 | if (r) | ||
| 456 | return r; | ||
| 457 | } else { | ||
| 458 | dev_err(rdev->dev, "Expecting atombios for RS600 GPU\n"); | ||
| 459 | return -EINVAL; | ||
| 460 | } | ||
| 461 | /* Reset gpu before posting otherwise ATOM will enter infinite loop */ | ||
| 462 | if (radeon_gpu_reset(rdev)) { | ||
| 463 | dev_warn(rdev->dev, | ||
| 464 | "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", | ||
| 465 | RREG32(R_000E40_RBBM_STATUS), | ||
| 466 | RREG32(R_0007C0_CP_STAT)); | ||
| 467 | } | ||
| 468 | /* check if cards are posted or not */ | ||
| 469 | if (!radeon_card_posted(rdev) && rdev->bios) { | ||
| 470 | DRM_INFO("GPU not posted. posting now...\n"); | ||
| 471 | atom_asic_init(rdev->mode_info.atom_context); | ||
| 472 | } | ||
| 473 | /* Initialize clocks */ | ||
| 474 | radeon_get_clock_info(rdev->ddev); | ||
| 475 | /* Get vram informations */ | ||
| 476 | rs600_vram_info(rdev); | ||
| 477 | /* Initialize memory controller (also test AGP) */ | ||
| 478 | r = r420_mc_init(rdev); | ||
| 479 | if (r) | ||
| 480 | return r; | ||
| 481 | rs600_debugfs(rdev); | ||
| 482 | /* Fence driver */ | ||
| 483 | r = radeon_fence_driver_init(rdev); | ||
| 484 | if (r) | ||
| 485 | return r; | ||
| 486 | r = radeon_irq_kms_init(rdev); | ||
| 487 | if (r) | ||
| 488 | return r; | ||
| 489 | /* Memory manager */ | ||
| 490 | r = radeon_object_init(rdev); | ||
| 491 | if (r) | ||
| 492 | return r; | ||
| 493 | r = rs600_gart_init(rdev); | ||
| 494 | if (r) | ||
| 495 | return r; | ||
| 496 | rs600_set_safe_registers(rdev); | ||
| 497 | rdev->accel_working = true; | ||
| 498 | r = rs600_startup(rdev); | ||
| 499 | if (r) { | ||
| 500 | /* Somethings want wront with the accel init stop accel */ | ||
| 501 | dev_err(rdev->dev, "Disabling GPU acceleration\n"); | ||
| 502 | rs600_suspend(rdev); | ||
| 503 | r100_cp_fini(rdev); | ||
| 504 | r100_wb_fini(rdev); | ||
| 505 | r100_ib_fini(rdev); | ||
| 506 | rs600_gart_fini(rdev); | ||
| 507 | radeon_irq_kms_fini(rdev); | ||
| 508 | rdev->accel_working = false; | ||
| 509 | } | ||
| 423 | return 0; | 510 | return 0; |
| 424 | } | 511 | } |
diff --git a/drivers/gpu/drm/radeon/rs600d.h b/drivers/gpu/drm/radeon/rs600d.h new file mode 100644 index 000000000000..81308924859a --- /dev/null +++ b/drivers/gpu/drm/radeon/rs600d.h | |||
| @@ -0,0 +1,470 @@ | |||
| 1 | /* | ||
| 2 | * Copyright 2008 Advanced Micro Devices, Inc. | ||
| 3 | * Copyright 2008 Red Hat Inc. | ||
| 4 | * Copyright 2009 Jerome Glisse. | ||
| 5 | * | ||
| 6 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
| 7 | * copy of this software and associated documentation files (the "Software"), | ||
| 8 | * to deal in the Software without restriction, including without limitation | ||
| 9 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
| 10 | * and/or sell copies of the Software, and to permit persons to whom the | ||
| 11 | * Software is furnished to do so, subject to the following conditions: | ||
| 12 | * | ||
| 13 | * The above copyright notice and this permission notice shall be included in | ||
| 14 | * all copies or substantial portions of the Software. | ||
| 15 | * | ||
| 16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
| 17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
| 18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
| 19 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
| 20 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
| 21 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
| 22 | * OTHER DEALINGS IN THE SOFTWARE. | ||
| 23 | * | ||
| 24 | * Authors: Dave Airlie | ||
| 25 | * Alex Deucher | ||
| 26 | * Jerome Glisse | ||
| 27 | */ | ||
| 28 | #ifndef __RS600D_H__ | ||
| 29 | #define __RS600D_H__ | ||
| 30 | |||
| 31 | /* Registers */ | ||
| 32 | #define R_000040_GEN_INT_CNTL 0x000040 | ||
| 33 | #define S_000040_DISPLAY_INT_STATUS(x) (((x) & 0x1) << 0) | ||
| 34 | #define G_000040_DISPLAY_INT_STATUS(x) (((x) >> 0) & 0x1) | ||
| 35 | #define C_000040_DISPLAY_INT_STATUS 0xFFFFFFFE | ||
| 36 | #define S_000040_DMA_VIPH0_INT_EN(x) (((x) & 0x1) << 12) | ||
| 37 | #define G_000040_DMA_VIPH0_INT_EN(x) (((x) >> 12) & 0x1) | ||
| 38 | #define C_000040_DMA_VIPH0_INT_EN 0xFFFFEFFF | ||
| 39 | #define S_000040_CRTC2_VSYNC(x) (((x) & 0x1) << 6) | ||
| 40 | #define G_000040_CRTC2_VSYNC(x) (((x) >> 6) & 0x1) | ||
| 41 | #define C_000040_CRTC2_VSYNC 0xFFFFFFBF | ||
| 42 | #define S_000040_SNAPSHOT2(x) (((x) & 0x1) << 7) | ||
| 43 | #define G_000040_SNAPSHOT2(x) (((x) >> 7) & 0x1) | ||
| 44 | #define C_000040_SNAPSHOT2 0xFFFFFF7F | ||
| 45 | #define S_000040_CRTC2_VBLANK(x) (((x) & 0x1) << 9) | ||
| 46 | #define G_000040_CRTC2_VBLANK(x) (((x) >> 9) & 0x1) | ||
| 47 | #define C_000040_CRTC2_VBLANK 0xFFFFFDFF | ||
| 48 | #define S_000040_FP2_DETECT(x) (((x) & 0x1) << 10) | ||
| 49 | #define G_000040_FP2_DETECT(x) (((x) >> 10) & 0x1) | ||
| 50 | #define C_000040_FP2_DETECT 0xFFFFFBFF | ||
| 51 | #define S_000040_VSYNC_DIFF_OVER_LIMIT(x) (((x) & 0x1) << 11) | ||
| 52 | #define G_000040_VSYNC_DIFF_OVER_LIMIT(x) (((x) >> 11) & 0x1) | ||
| 53 | #define C_000040_VSYNC_DIFF_OVER_LIMIT 0xFFFFF7FF | ||
| 54 | #define S_000040_DMA_VIPH1_INT_EN(x) (((x) & 0x1) << 13) | ||
| 55 | #define G_000040_DMA_VIPH1_INT_EN(x) (((x) >> 13) & 0x1) | ||
| 56 | #define C_000040_DMA_VIPH1_INT_EN 0xFFFFDFFF | ||
| 57 | #define S_000040_DMA_VIPH2_INT_EN(x) (((x) & 0x1) << 14) | ||
| 58 | #define G_000040_DMA_VIPH2_INT_EN(x) (((x) >> 14) & 0x1) | ||
| 59 | #define C_000040_DMA_VIPH2_INT_EN 0xFFFFBFFF | ||
| 60 | #define S_000040_DMA_VIPH3_INT_EN(x) (((x) & 0x1) << 15) | ||
| 61 | #define G_000040_DMA_VIPH3_INT_EN(x) (((x) >> 15) & 0x1) | ||
| 62 | #define C_000040_DMA_VIPH3_INT_EN 0xFFFF7FFF | ||
| 63 | #define S_000040_I2C_INT_EN(x) (((x) & 0x1) << 17) | ||
| 64 | #define G_000040_I2C_INT_EN(x) (((x) >> 17) & 0x1) | ||
| 65 | #define C_000040_I2C_INT_EN 0xFFFDFFFF | ||
| 66 | #define S_000040_GUI_IDLE(x) (((x) & 0x1) << 19) | ||
| 67 | #define G_000040_GUI_IDLE(x) (((x) >> 19) & 0x1) | ||
| 68 | #define C_000040_GUI_IDLE 0xFFF7FFFF | ||
| 69 | #define S_000040_VIPH_INT_EN(x) (((x) & 0x1) << 24) | ||
| 70 | #define G_000040_VIPH_INT_EN(x) (((x) >> 24) & 0x1) | ||
| 71 | #define C_000040_VIPH_INT_EN 0xFEFFFFFF | ||
| 72 | #define S_000040_SW_INT_EN(x) (((x) & 0x1) << 25) | ||
| 73 | #define G_000040_SW_INT_EN(x) (((x) >> 25) & 0x1) | ||
| 74 | #define C_000040_SW_INT_EN 0xFDFFFFFF | ||
| 75 | #define S_000040_GEYSERVILLE(x) (((x) & 0x1) << 27) | ||
| 76 | #define G_000040_GEYSERVILLE(x) (((x) >> 27) & 0x1) | ||
| 77 | #define C_000040_GEYSERVILLE 0xF7FFFFFF | ||
| 78 | #define S_000040_HDCP_AUTHORIZED_INT(x) (((x) & 0x1) << 28) | ||
| 79 | #define G_000040_HDCP_AUTHORIZED_INT(x) (((x) >> 28) & 0x1) | ||
| 80 | #define C_000040_HDCP_AUTHORIZED_INT 0xEFFFFFFF | ||
| 81 | #define S_000040_DVI_I2C_INT(x) (((x) & 0x1) << 29) | ||
| 82 | #define G_000040_DVI_I2C_INT(x) (((x) >> 29) & 0x1) | ||
| 83 | #define C_000040_DVI_I2C_INT 0xDFFFFFFF | ||
| 84 | #define S_000040_GUIDMA(x) (((x) & 0x1) << 30) | ||
| 85 | #define G_000040_GUIDMA(x) (((x) >> 30) & 0x1) | ||
| 86 | #define C_000040_GUIDMA 0xBFFFFFFF | ||
| 87 | #define S_000040_VIDDMA(x) (((x) & 0x1) << 31) | ||
| 88 | #define G_000040_VIDDMA(x) (((x) >> 31) & 0x1) | ||
| 89 | #define C_000040_VIDDMA 0x7FFFFFFF | ||
| 90 | #define R_000044_GEN_INT_STATUS 0x000044 | ||
| 91 | #define S_000044_DISPLAY_INT_STAT(x) (((x) & 0x1) << 0) | ||
| 92 | #define G_000044_DISPLAY_INT_STAT(x) (((x) >> 0) & 0x1) | ||
| 93 | #define C_000044_DISPLAY_INT_STAT 0xFFFFFFFE | ||
| 94 | #define S_000044_VGA_INT_STAT(x) (((x) & 0x1) << 1) | ||
| 95 | #define G_000044_VGA_INT_STAT(x) (((x) >> 1) & 0x1) | ||
| 96 | #define C_000044_VGA_INT_STAT 0xFFFFFFFD | ||
| 97 | #define S_000044_CAP0_INT_ACTIVE(x) (((x) & 0x1) << 8) | ||
| 98 | #define G_000044_CAP0_INT_ACTIVE(x) (((x) >> 8) & 0x1) | ||
| 99 | #define C_000044_CAP0_INT_ACTIVE 0xFFFFFEFF | ||
| 100 | #define S_000044_DMA_VIPH0_INT(x) (((x) & 0x1) << 12) | ||
| 101 | #define G_000044_DMA_VIPH0_INT(x) (((x) >> 12) & 0x1) | ||
| 102 | #define C_000044_DMA_VIPH0_INT 0xFFFFEFFF | ||
| 103 | #define S_000044_DMA_VIPH1_INT(x) (((x) & 0x1) << 13) | ||
| 104 | #define G_000044_DMA_VIPH1_INT(x) (((x) >> 13) & 0x1) | ||
| 105 | #define C_000044_DMA_VIPH1_INT 0xFFFFDFFF | ||
| 106 | #define S_000044_DMA_VIPH2_INT(x) (((x) & 0x1) << 14) | ||
| 107 | #define G_000044_DMA_VIPH2_INT(x) (((x) >> 14) & 0x1) | ||
| 108 | #define C_000044_DMA_VIPH2_INT 0xFFFFBFFF | ||
| 109 | #define S_000044_DMA_VIPH3_INT(x) (((x) & 0x1) << 15) | ||
| 110 | #define G_000044_DMA_VIPH3_INT(x) (((x) >> 15) & 0x1) | ||
| 111 | #define C_000044_DMA_VIPH3_INT 0xFFFF7FFF | ||
| 112 | #define S_000044_MC_PROBE_FAULT_STAT(x) (((x) & 0x1) << 16) | ||
| 113 | #define G_000044_MC_PROBE_FAULT_STAT(x) (((x) >> 16) & 0x1) | ||
| 114 | #define C_000044_MC_PROBE_FAULT_STAT 0xFFFEFFFF | ||
| 115 | #define S_000044_I2C_INT(x) (((x) & 0x1) << 17) | ||
| 116 | #define G_000044_I2C_INT(x) (((x) >> 17) & 0x1) | ||
| 117 | #define C_000044_I2C_INT 0xFFFDFFFF | ||
| 118 | #define S_000044_SCRATCH_INT_STAT(x) (((x) & 0x1) << 18) | ||
| 119 | #define G_000044_SCRATCH_INT_STAT(x) (((x) >> 18) & 0x1) | ||
| 120 | #define C_000044_SCRATCH_INT_STAT 0xFFFBFFFF | ||
| 121 | #define S_000044_GUI_IDLE_STAT(x) (((x) & 0x1) << 19) | ||
| 122 | #define G_000044_GUI_IDLE_STAT(x) (((x) >> 19) & 0x1) | ||
| 123 | #define C_000044_GUI_IDLE_STAT 0xFFF7FFFF | ||
| 124 | #define S_000044_ATI_OVERDRIVE_INT_STAT(x) (((x) & 0x1) << 20) | ||
| 125 | #define G_000044_ATI_OVERDRIVE_INT_STAT(x) (((x) >> 20) & 0x1) | ||
| 126 | #define C_000044_ATI_OVERDRIVE_INT_STAT 0xFFEFFFFF | ||
| 127 | #define S_000044_MC_PROTECTION_FAULT_STAT(x) (((x) & 0x1) << 21) | ||
| 128 | #define G_000044_MC_PROTECTION_FAULT_STAT(x) (((x) >> 21) & 0x1) | ||
| 129 | #define C_000044_MC_PROTECTION_FAULT_STAT 0xFFDFFFFF | ||
| 130 | #define S_000044_RBBM_READ_INT_STAT(x) (((x) & 0x1) << 22) | ||
| 131 | #define G_000044_RBBM_READ_INT_STAT(x) (((x) >> 22) & 0x1) | ||
| 132 | #define C_000044_RBBM_READ_INT_STAT 0xFFBFFFFF | ||
| 133 | #define S_000044_CB_CONTEXT_SWITCH_STAT(x) (((x) & 0x1) << 23) | ||
| 134 | #define G_000044_CB_CONTEXT_SWITCH_STAT(x) (((x) >> 23) & 0x1) | ||
| 135 | #define C_000044_CB_CONTEXT_SWITCH_STAT 0xFF7FFFFF | ||
| 136 | #define S_000044_VIPH_INT(x) (((x) & 0x1) << 24) | ||
| 137 | #define G_000044_VIPH_INT(x) (((x) >> 24) & 0x1) | ||
| 138 | #define C_000044_VIPH_INT 0xFEFFFFFF | ||
| 139 | #define S_000044_SW_INT(x) (((x) & 0x1) << 25) | ||
| 140 | #define G_000044_SW_INT(x) (((x) >> 25) & 0x1) | ||
| 141 | #define C_000044_SW_INT 0xFDFFFFFF | ||
| 142 | #define S_000044_SW_INT_SET(x) (((x) & 0x1) << 26) | ||
| 143 | #define G_000044_SW_INT_SET(x) (((x) >> 26) & 0x1) | ||
| 144 | #define C_000044_SW_INT_SET 0xFBFFFFFF | ||
| 145 | #define S_000044_IDCT_INT_STAT(x) (((x) & 0x1) << 27) | ||
| 146 | #define G_000044_IDCT_INT_STAT(x) (((x) >> 27) & 0x1) | ||
| 147 | #define C_000044_IDCT_INT_STAT 0xF7FFFFFF | ||
| 148 | #define S_000044_GUIDMA_STAT(x) (((x) & 0x1) << 30) | ||
| 149 | #define G_000044_GUIDMA_STAT(x) (((x) >> 30) & 0x1) | ||
| 150 | #define C_000044_GUIDMA_STAT 0xBFFFFFFF | ||
| 151 | #define S_000044_VIDDMA_STAT(x) (((x) & 0x1) << 31) | ||
| 152 | #define G_000044_VIDDMA_STAT(x) (((x) >> 31) & 0x1) | ||
| 153 | #define C_000044_VIDDMA_STAT 0x7FFFFFFF | ||
| 154 | #define R_00004C_BUS_CNTL 0x00004C | ||
| 155 | #define S_00004C_BUS_MASTER_DIS(x) (((x) & 0x1) << 14) | ||
| 156 | #define G_00004C_BUS_MASTER_DIS(x) (((x) >> 14) & 0x1) | ||
| 157 | #define C_00004C_BUS_MASTER_DIS 0xFFFFBFFF | ||
| 158 | #define S_00004C_BUS_MSI_REARM(x) (((x) & 0x1) << 20) | ||
| 159 | #define G_00004C_BUS_MSI_REARM(x) (((x) >> 20) & 0x1) | ||
| 160 | #define C_00004C_BUS_MSI_REARM 0xFFEFFFFF | ||
| 161 | #define R_000070_MC_IND_INDEX 0x000070 | ||
| 162 | #define S_000070_MC_IND_ADDR(x) (((x) & 0xFFFF) << 0) | ||
| 163 | #define G_000070_MC_IND_ADDR(x) (((x) >> 0) & 0xFFFF) | ||
| 164 | #define C_000070_MC_IND_ADDR 0xFFFF0000 | ||
| 165 | #define S_000070_MC_IND_SEQ_RBS_0(x) (((x) & 0x1) << 16) | ||
| 166 | #define G_000070_MC_IND_SEQ_RBS_0(x) (((x) >> 16) & 0x1) | ||
| 167 | #define C_000070_MC_IND_SEQ_RBS_0 0xFFFEFFFF | ||
| 168 | #define S_000070_MC_IND_SEQ_RBS_1(x) (((x) & 0x1) << 17) | ||
| 169 | #define G_000070_MC_IND_SEQ_RBS_1(x) (((x) >> 17) & 0x1) | ||
| 170 | #define C_000070_MC_IND_SEQ_RBS_1 0xFFFDFFFF | ||
| 171 | #define S_000070_MC_IND_SEQ_RBS_2(x) (((x) & 0x1) << 18) | ||
| 172 | #define G_000070_MC_IND_SEQ_RBS_2(x) (((x) >> 18) & 0x1) | ||
| 173 | #define C_000070_MC_IND_SEQ_RBS_2 0xFFFBFFFF | ||
| 174 | #define S_000070_MC_IND_SEQ_RBS_3(x) (((x) & 0x1) << 19) | ||
| 175 | #define G_000070_MC_IND_SEQ_RBS_3(x) (((x) >> 19) & 0x1) | ||
| 176 | #define C_000070_MC_IND_SEQ_RBS_3 0xFFF7FFFF | ||
| 177 | #define S_000070_MC_IND_AIC_RBS(x) (((x) & 0x1) << 20) | ||
| 178 | #define G_000070_MC_IND_AIC_RBS(x) (((x) >> 20) & 0x1) | ||
| 179 | #define C_000070_MC_IND_AIC_RBS 0xFFEFFFFF | ||
| 180 | #define S_000070_MC_IND_CITF_ARB0(x) (((x) & 0x1) << 21) | ||
| 181 | #define G_000070_MC_IND_CITF_ARB0(x) (((x) >> 21) & 0x1) | ||
| 182 | #define C_000070_MC_IND_CITF_ARB0 0xFFDFFFFF | ||
| 183 | #define S_000070_MC_IND_CITF_ARB1(x) (((x) & 0x1) << 22) | ||
| 184 | #define G_000070_MC_IND_CITF_ARB1(x) (((x) >> 22) & 0x1) | ||
| 185 | #define C_000070_MC_IND_CITF_ARB1 0xFFBFFFFF | ||
| 186 | #define S_000070_MC_IND_WR_EN(x) (((x) & 0x1) << 23) | ||
| 187 | #define G_000070_MC_IND_WR_EN(x) (((x) >> 23) & 0x1) | ||
| 188 | #define C_000070_MC_IND_WR_EN 0xFF7FFFFF | ||
| 189 | #define S_000070_MC_IND_RD_INV(x) (((x) & 0x1) << 24) | ||
| 190 | #define G_000070_MC_IND_RD_INV(x) (((x) >> 24) & 0x1) | ||
| 191 | #define C_000070_MC_IND_RD_INV 0xFEFFFFFF | ||
| 192 | #define R_000074_MC_IND_DATA 0x000074 | ||
| 193 | #define S_000074_MC_IND_DATA(x) (((x) & 0xFFFFFFFF) << 0) | ||
| 194 | #define G_000074_MC_IND_DATA(x) (((x) >> 0) & 0xFFFFFFFF) | ||
| 195 | #define C_000074_MC_IND_DATA 0x00000000 | ||
| 196 | #define R_000134_HDP_FB_LOCATION 0x000134 | ||
| 197 | #define S_000134_HDP_FB_START(x) (((x) & 0xFFFF) << 0) | ||
| 198 | #define G_000134_HDP_FB_START(x) (((x) >> 0) & 0xFFFF) | ||
| 199 | #define C_000134_HDP_FB_START 0xFFFF0000 | ||
| 200 | #define R_0007C0_CP_STAT 0x0007C0 | ||
| 201 | #define S_0007C0_MRU_BUSY(x) (((x) & 0x1) << 0) | ||
| 202 | #define G_0007C0_MRU_BUSY(x) (((x) >> 0) & 0x1) | ||
| 203 | #define C_0007C0_MRU_BUSY 0xFFFFFFFE | ||
| 204 | #define S_0007C0_MWU_BUSY(x) (((x) & 0x1) << 1) | ||
| 205 | #define G_0007C0_MWU_BUSY(x) (((x) >> 1) & 0x1) | ||
| 206 | #define C_0007C0_MWU_BUSY 0xFFFFFFFD | ||
| 207 | #define S_0007C0_RSIU_BUSY(x) (((x) & 0x1) << 2) | ||
| 208 | #define G_0007C0_RSIU_BUSY(x) (((x) >> 2) & 0x1) | ||
| 209 | #define C_0007C0_RSIU_BUSY 0xFFFFFFFB | ||
| 210 | #define S_0007C0_RCIU_BUSY(x) (((x) & 0x1) << 3) | ||
| 211 | #define G_0007C0_RCIU_BUSY(x) (((x) >> 3) & 0x1) | ||
| 212 | #define C_0007C0_RCIU_BUSY 0xFFFFFFF7 | ||
| 213 | #define S_0007C0_CSF_PRIMARY_BUSY(x) (((x) & 0x1) << 9) | ||
| 214 | #define G_0007C0_CSF_PRIMARY_BUSY(x) (((x) >> 9) & 0x1) | ||
| 215 | #define C_0007C0_CSF_PRIMARY_BUSY 0xFFFFFDFF | ||
| 216 | #define S_0007C0_CSF_INDIRECT_BUSY(x) (((x) & 0x1) << 10) | ||
| 217 | #define G_0007C0_CSF_INDIRECT_BUSY(x) (((x) >> 10) & 0x1) | ||
| 218 | #define C_0007C0_CSF_INDIRECT_BUSY 0xFFFFFBFF | ||
| 219 | #define S_0007C0_CSQ_PRIMARY_BUSY(x) (((x) & 0x1) << 11) | ||
| 220 | #define G_0007C0_CSQ_PRIMARY_BUSY(x) (((x) >> 11) & 0x1) | ||
| 221 | #define C_0007C0_CSQ_PRIMARY_BUSY 0xFFFFF7FF | ||
| 222 | #define S_0007C0_CSQ_INDIRECT_BUSY(x) (((x) & 0x1) << 12) | ||
| 223 | #define G_0007C0_CSQ_INDIRECT_BUSY(x) (((x) >> 12) & 0x1) | ||
| 224 | #define C_0007C0_CSQ_INDIRECT_BUSY 0xFFFFEFFF | ||
| 225 | #define S_0007C0_CSI_BUSY(x) (((x) & 0x1) << 13) | ||
| 226 | #define G_0007C0_CSI_BUSY(x) (((x) >> 13) & 0x1) | ||
| 227 | #define C_0007C0_CSI_BUSY 0xFFFFDFFF | ||
| 228 | #define S_0007C0_CSF_INDIRECT2_BUSY(x) (((x) & 0x1) << 14) | ||
| 229 | #define G_0007C0_CSF_INDIRECT2_BUSY(x) (((x) >> 14) & 0x1) | ||
| 230 | #define C_0007C0_CSF_INDIRECT2_BUSY 0xFFFFBFFF | ||
| 231 | #define S_0007C0_CSQ_INDIRECT2_BUSY(x) (((x) & 0x1) << 15) | ||
| 232 | #define G_0007C0_CSQ_INDIRECT2_BUSY(x) (((x) >> 15) & 0x1) | ||
| 233 | #define C_0007C0_CSQ_INDIRECT2_BUSY 0xFFFF7FFF | ||
| 234 | #define S_0007C0_GUIDMA_BUSY(x) (((x) & 0x1) << 28) | ||
| 235 | #define G_0007C0_GUIDMA_BUSY(x) (((x) >> 28) & 0x1) | ||
| 236 | #define C_0007C0_GUIDMA_BUSY 0xEFFFFFFF | ||
| 237 | #define S_0007C0_VIDDMA_BUSY(x) (((x) & 0x1) << 29) | ||
| 238 | #define G_0007C0_VIDDMA_BUSY(x) (((x) >> 29) & 0x1) | ||
| 239 | #define C_0007C0_VIDDMA_BUSY 0xDFFFFFFF | ||
| 240 | #define S_0007C0_CMDSTRM_BUSY(x) (((x) & 0x1) << 30) | ||
| 241 | #define G_0007C0_CMDSTRM_BUSY(x) (((x) >> 30) & 0x1) | ||
| 242 | #define C_0007C0_CMDSTRM_BUSY 0xBFFFFFFF | ||
| 243 | #define S_0007C0_CP_BUSY(x) (((x) & 0x1) << 31) | ||
| 244 | #define G_0007C0_CP_BUSY(x) (((x) >> 31) & 0x1) | ||
| 245 | #define C_0007C0_CP_BUSY 0x7FFFFFFF | ||
| 246 | #define R_000E40_RBBM_STATUS 0x000E40 | ||
| 247 | #define S_000E40_CMDFIFO_AVAIL(x) (((x) & 0x7F) << 0) | ||
| 248 | #define G_000E40_CMDFIFO_AVAIL(x) (((x) >> 0) & 0x7F) | ||
| 249 | #define C_000E40_CMDFIFO_AVAIL 0xFFFFFF80 | ||
| 250 | #define S_000E40_HIRQ_ON_RBB(x) (((x) & 0x1) << 8) | ||
| 251 | #define G_000E40_HIRQ_ON_RBB(x) (((x) >> 8) & 0x1) | ||
| 252 | #define C_000E40_HIRQ_ON_RBB 0xFFFFFEFF | ||
| 253 | #define S_000E40_CPRQ_ON_RBB(x) (((x) & 0x1) << 9) | ||
| 254 | #define G_000E40_CPRQ_ON_RBB(x) (((x) >> 9) & 0x1) | ||
| 255 | #define C_000E40_CPRQ_ON_RBB 0xFFFFFDFF | ||
| 256 | #define S_000E40_CFRQ_ON_RBB(x) (((x) & 0x1) << 10) | ||
| 257 | #define G_000E40_CFRQ_ON_RBB(x) (((x) >> 10) & 0x1) | ||
| 258 | #define C_000E40_CFRQ_ON_RBB 0xFFFFFBFF | ||
| 259 | #define S_000E40_HIRQ_IN_RTBUF(x) (((x) & 0x1) << 11) | ||
| 260 | #define G_000E40_HIRQ_IN_RTBUF(x) (((x) >> 11) & 0x1) | ||
| 261 | #define C_000E40_HIRQ_IN_RTBUF 0xFFFFF7FF | ||
| 262 | #define S_000E40_CPRQ_IN_RTBUF(x) (((x) & 0x1) << 12) | ||
| 263 | #define G_000E40_CPRQ_IN_RTBUF(x) (((x) >> 12) & 0x1) | ||
| 264 | #define C_000E40_CPRQ_IN_RTBUF 0xFFFFEFFF | ||
| 265 | #define S_000E40_CFRQ_IN_RTBUF(x) (((x) & 0x1) << 13) | ||
| 266 | #define G_000E40_CFRQ_IN_RTBUF(x) (((x) >> 13) & 0x1) | ||
| 267 | #define C_000E40_CFRQ_IN_RTBUF 0xFFFFDFFF | ||
| 268 | #define S_000E40_CF_PIPE_BUSY(x) (((x) & 0x1) << 14) | ||
| 269 | #define G_000E40_CF_PIPE_BUSY(x) (((x) >> 14) & 0x1) | ||
| 270 | #define C_000E40_CF_PIPE_BUSY 0xFFFFBFFF | ||
| 271 | #define S_000E40_ENG_EV_BUSY(x) (((x) & 0x1) << 15) | ||
| 272 | #define G_000E40_ENG_EV_BUSY(x) (((x) >> 15) & 0x1) | ||
| 273 | #define C_000E40_ENG_EV_BUSY 0xFFFF7FFF | ||
| 274 | #define S_000E40_CP_CMDSTRM_BUSY(x) (((x) & 0x1) << 16) | ||
| 275 | #define G_000E40_CP_CMDSTRM_BUSY(x) (((x) >> 16) & 0x1) | ||
| 276 | #define C_000E40_CP_CMDSTRM_BUSY 0xFFFEFFFF | ||
| 277 | #define S_000E40_E2_BUSY(x) (((x) & 0x1) << 17) | ||
| 278 | #define G_000E40_E2_BUSY(x) (((x) >> 17) & 0x1) | ||
| 279 | #define C_000E40_E2_BUSY 0xFFFDFFFF | ||
| 280 | #define S_000E40_RB2D_BUSY(x) (((x) & 0x1) << 18) | ||
| 281 | #define G_000E40_RB2D_BUSY(x) (((x) >> 18) & 0x1) | ||
| 282 | #define C_000E40_RB2D_BUSY 0xFFFBFFFF | ||
| 283 | #define S_000E40_RB3D_BUSY(x) (((x) & 0x1) << 19) | ||
| 284 | #define G_000E40_RB3D_BUSY(x) (((x) >> 19) & 0x1) | ||
| 285 | #define C_000E40_RB3D_BUSY 0xFFF7FFFF | ||
| 286 | #define S_000E40_VAP_BUSY(x) (((x) & 0x1) << 20) | ||
| 287 | #define G_000E40_VAP_BUSY(x) (((x) >> 20) & 0x1) | ||
| 288 | #define C_000E40_VAP_BUSY 0xFFEFFFFF | ||
| 289 | #define S_000E40_RE_BUSY(x) (((x) & 0x1) << 21) | ||
| 290 | #define G_000E40_RE_BUSY(x) (((x) >> 21) & 0x1) | ||
| 291 | #define C_000E40_RE_BUSY 0xFFDFFFFF | ||
| 292 | #define S_000E40_TAM_BUSY(x) (((x) & 0x1) << 22) | ||
| 293 | #define G_000E40_TAM_BUSY(x) (((x) >> 22) & 0x1) | ||
| 294 | #define C_000E40_TAM_BUSY 0xFFBFFFFF | ||
| 295 | #define S_000E40_TDM_BUSY(x) (((x) & 0x1) << 23) | ||
| 296 | #define G_000E40_TDM_BUSY(x) (((x) >> 23) & 0x1) | ||
| 297 | #define C_000E40_TDM_BUSY 0xFF7FFFFF | ||
| 298 | #define S_000E40_PB_BUSY(x) (((x) & 0x1) << 24) | ||
| 299 | #define G_000E40_PB_BUSY(x) (((x) >> 24) & 0x1) | ||
| 300 | #define C_000E40_PB_BUSY 0xFEFFFFFF | ||
| 301 | #define S_000E40_TIM_BUSY(x) (((x) & 0x1) << 25) | ||
| 302 | #define G_000E40_TIM_BUSY(x) (((x) >> 25) & 0x1) | ||
| 303 | #define C_000E40_TIM_BUSY 0xFDFFFFFF | ||
| 304 | #define S_000E40_GA_BUSY(x) (((x) & 0x1) << 26) | ||
| 305 | #define G_000E40_GA_BUSY(x) (((x) >> 26) & 0x1) | ||
| 306 | #define C_000E40_GA_BUSY 0xFBFFFFFF | ||
| 307 | #define S_000E40_CBA2D_BUSY(x) (((x) & 0x1) << 27) | ||
| 308 | #define G_000E40_CBA2D_BUSY(x) (((x) >> 27) & 0x1) | ||
| 309 | #define C_000E40_CBA2D_BUSY 0xF7FFFFFF | ||
| 310 | #define S_000E40_GUI_ACTIVE(x) (((x) & 0x1) << 31) | ||
| 311 | #define G_000E40_GUI_ACTIVE(x) (((x) >> 31) & 0x1) | ||
| 312 | #define C_000E40_GUI_ACTIVE 0x7FFFFFFF | ||
| 313 | #define R_0060A4_D1CRTC_STATUS_FRAME_COUNT 0x0060A4 | ||
| 314 | #define S_0060A4_D1CRTC_FRAME_COUNT(x) (((x) & 0xFFFFFF) << 0) | ||
| 315 | #define G_0060A4_D1CRTC_FRAME_COUNT(x) (((x) >> 0) & 0xFFFFFF) | ||
| 316 | #define C_0060A4_D1CRTC_FRAME_COUNT 0xFF000000 | ||
| 317 | #define R_006534_D1MODE_VBLANK_STATUS 0x006534 | ||
| 318 | #define S_006534_D1MODE_VBLANK_OCCURRED(x) (((x) & 0x1) << 0) | ||
| 319 | #define G_006534_D1MODE_VBLANK_OCCURRED(x) (((x) >> 0) & 0x1) | ||
| 320 | #define C_006534_D1MODE_VBLANK_OCCURRED 0xFFFFFFFE | ||
| 321 | #define S_006534_D1MODE_VBLANK_ACK(x) (((x) & 0x1) << 4) | ||
| 322 | #define G_006534_D1MODE_VBLANK_ACK(x) (((x) >> 4) & 0x1) | ||
| 323 | #define C_006534_D1MODE_VBLANK_ACK 0xFFFFFFEF | ||
| 324 | #define S_006534_D1MODE_VBLANK_STAT(x) (((x) & 0x1) << 12) | ||
| 325 | #define G_006534_D1MODE_VBLANK_STAT(x) (((x) >> 12) & 0x1) | ||
| 326 | #define C_006534_D1MODE_VBLANK_STAT 0xFFFFEFFF | ||
| 327 | #define S_006534_D1MODE_VBLANK_INTERRUPT(x) (((x) & 0x1) << 16) | ||
| 328 | #define G_006534_D1MODE_VBLANK_INTERRUPT(x) (((x) >> 16) & 0x1) | ||
| 329 | #define C_006534_D1MODE_VBLANK_INTERRUPT 0xFFFEFFFF | ||
| 330 | #define R_006540_DxMODE_INT_MASK 0x006540 | ||
| 331 | #define S_006540_D1MODE_VBLANK_INT_MASK(x) (((x) & 0x1) << 0) | ||
| 332 | #define G_006540_D1MODE_VBLANK_INT_MASK(x) (((x) >> 0) & 0x1) | ||
| 333 | #define C_006540_D1MODE_VBLANK_INT_MASK 0xFFFFFFFE | ||
| 334 | #define S_006540_D1MODE_VLINE_INT_MASK(x) (((x) & 0x1) << 4) | ||
| 335 | #define G_006540_D1MODE_VLINE_INT_MASK(x) (((x) >> 4) & 0x1) | ||
| 336 | #define C_006540_D1MODE_VLINE_INT_MASK 0xFFFFFFEF | ||
| 337 | #define S_006540_D2MODE_VBLANK_INT_MASK(x) (((x) & 0x1) << 8) | ||
| 338 | #define G_006540_D2MODE_VBLANK_INT_MASK(x) (((x) >> 8) & 0x1) | ||
| 339 | #define C_006540_D2MODE_VBLANK_INT_MASK 0xFFFFFEFF | ||
| 340 | #define S_006540_D2MODE_VLINE_INT_MASK(x) (((x) & 0x1) << 12) | ||
| 341 | #define G_006540_D2MODE_VLINE_INT_MASK(x) (((x) >> 12) & 0x1) | ||
| 342 | #define C_006540_D2MODE_VLINE_INT_MASK 0xFFFFEFFF | ||
| 343 | #define S_006540_D1MODE_VBLANK_CP_SEL(x) (((x) & 0x1) << 30) | ||
| 344 | #define G_006540_D1MODE_VBLANK_CP_SEL(x) (((x) >> 30) & 0x1) | ||
| 345 | #define C_006540_D1MODE_VBLANK_CP_SEL 0xBFFFFFFF | ||
| 346 | #define S_006540_D2MODE_VBLANK_CP_SEL(x) (((x) & 0x1) << 31) | ||
| 347 | #define G_006540_D2MODE_VBLANK_CP_SEL(x) (((x) >> 31) & 0x1) | ||
| 348 | #define C_006540_D2MODE_VBLANK_CP_SEL 0x7FFFFFFF | ||
| 349 | #define R_0068A4_D2CRTC_STATUS_FRAME_COUNT 0x0068A4 | ||
| 350 | #define S_0068A4_D2CRTC_FRAME_COUNT(x) (((x) & 0xFFFFFF) << 0) | ||
| 351 | #define G_0068A4_D2CRTC_FRAME_COUNT(x) (((x) >> 0) & 0xFFFFFF) | ||
| 352 | #define C_0068A4_D2CRTC_FRAME_COUNT 0xFF000000 | ||
| 353 | #define R_006D34_D2MODE_VBLANK_STATUS 0x006D34 | ||
| 354 | #define S_006D34_D2MODE_VBLANK_OCCURRED(x) (((x) & 0x1) << 0) | ||
| 355 | #define G_006D34_D2MODE_VBLANK_OCCURRED(x) (((x) >> 0) & 0x1) | ||
| 356 | #define C_006D34_D2MODE_VBLANK_OCCURRED 0xFFFFFFFE | ||
| 357 | #define S_006D34_D2MODE_VBLANK_ACK(x) (((x) & 0x1) << 4) | ||
| 358 | #define G_006D34_D2MODE_VBLANK_ACK(x) (((x) >> 4) & 0x1) | ||
| 359 | #define C_006D34_D2MODE_VBLANK_ACK 0xFFFFFFEF | ||
| 360 | #define S_006D34_D2MODE_VBLANK_STAT(x) (((x) & 0x1) << 12) | ||
| 361 | #define G_006D34_D2MODE_VBLANK_STAT(x) (((x) >> 12) & 0x1) | ||
| 362 | #define C_006D34_D2MODE_VBLANK_STAT 0xFFFFEFFF | ||
| 363 | #define S_006D34_D2MODE_VBLANK_INTERRUPT(x) (((x) & 0x1) << 16) | ||
| 364 | #define G_006D34_D2MODE_VBLANK_INTERRUPT(x) (((x) >> 16) & 0x1) | ||
| 365 | #define C_006D34_D2MODE_VBLANK_INTERRUPT 0xFFFEFFFF | ||
| 366 | #define R_007EDC_DISP_INTERRUPT_STATUS 0x007EDC | ||
| 367 | #define S_007EDC_LB_D1_VBLANK_INTERRUPT(x) (((x) & 0x1) << 4) | ||
| 368 | #define G_007EDC_LB_D1_VBLANK_INTERRUPT(x) (((x) >> 4) & 0x1) | ||
| 369 | #define C_007EDC_LB_D1_VBLANK_INTERRUPT 0xFFFFFFEF | ||
| 370 | #define S_007EDC_LB_D2_VBLANK_INTERRUPT(x) (((x) & 0x1) << 5) | ||
| 371 | #define G_007EDC_LB_D2_VBLANK_INTERRUPT(x) (((x) >> 5) & 0x1) | ||
| 372 | #define C_007EDC_LB_D2_VBLANK_INTERRUPT 0xFFFFFFDF | ||
| 373 | |||
| 374 | |||
| 375 | /* MC registers */ | ||
| 376 | #define R_000000_MC_STATUS 0x000000 | ||
| 377 | #define S_000000_MC_IDLE(x) (((x) & 0x1) << 0) | ||
| 378 | #define G_000000_MC_IDLE(x) (((x) >> 0) & 0x1) | ||
| 379 | #define C_000000_MC_IDLE 0xFFFFFFFE | ||
| 380 | #define R_000004_MC_FB_LOCATION 0x000004 | ||
| 381 | #define S_000004_MC_FB_START(x) (((x) & 0xFFFF) << 0) | ||
| 382 | #define G_000004_MC_FB_START(x) (((x) >> 0) & 0xFFFF) | ||
| 383 | #define C_000004_MC_FB_START 0xFFFF0000 | ||
| 384 | #define S_000004_MC_FB_TOP(x) (((x) & 0xFFFF) << 16) | ||
| 385 | #define G_000004_MC_FB_TOP(x) (((x) >> 16) & 0xFFFF) | ||
| 386 | #define C_000004_MC_FB_TOP 0x0000FFFF | ||
| 387 | #define R_000005_MC_AGP_LOCATION 0x000005 | ||
| 388 | #define S_000005_MC_AGP_START(x) (((x) & 0xFFFF) << 0) | ||
| 389 | #define G_000005_MC_AGP_START(x) (((x) >> 0) & 0xFFFF) | ||
| 390 | #define C_000005_MC_AGP_START 0xFFFF0000 | ||
| 391 | #define S_000005_MC_AGP_TOP(x) (((x) & 0xFFFF) << 16) | ||
| 392 | #define G_000005_MC_AGP_TOP(x) (((x) >> 16) & 0xFFFF) | ||
| 393 | #define C_000005_MC_AGP_TOP 0x0000FFFF | ||
| 394 | #define R_000006_AGP_BASE 0x000006 | ||
| 395 | #define S_000006_AGP_BASE_ADDR(x) (((x) & 0xFFFFFFFF) << 0) | ||
| 396 | #define G_000006_AGP_BASE_ADDR(x) (((x) >> 0) & 0xFFFFFFFF) | ||
| 397 | #define C_000006_AGP_BASE_ADDR 0x00000000 | ||
| 398 | #define R_000007_AGP_BASE_2 0x000007 | ||
| 399 | #define S_000007_AGP_BASE_ADDR_2(x) (((x) & 0xF) << 0) | ||
| 400 | #define G_000007_AGP_BASE_ADDR_2(x) (((x) >> 0) & 0xF) | ||
| 401 | #define C_000007_AGP_BASE_ADDR_2 0xFFFFFFF0 | ||
| 402 | #define R_000009_MC_CNTL1 0x000009 | ||
| 403 | #define S_000009_ENABLE_PAGE_TABLES(x) (((x) & 0x1) << 26) | ||
| 404 | #define G_000009_ENABLE_PAGE_TABLES(x) (((x) >> 26) & 0x1) | ||
| 405 | #define C_000009_ENABLE_PAGE_TABLES 0xFBFFFFFF | ||
| 406 | /* FIXME don't know the various field size need feedback from AMD */ | ||
| 407 | #define R_000100_MC_PT0_CNTL 0x000100 | ||
| 408 | #define S_000100_ENABLE_PT(x) (((x) & 0x1) << 0) | ||
| 409 | #define G_000100_ENABLE_PT(x) (((x) >> 0) & 0x1) | ||
| 410 | #define C_000100_ENABLE_PT 0xFFFFFFFE | ||
| 411 | #define S_000100_EFFECTIVE_L2_CACHE_SIZE(x) (((x) & 0x7) << 15) | ||
| 412 | #define G_000100_EFFECTIVE_L2_CACHE_SIZE(x) (((x) >> 15) & 0x7) | ||
| 413 | #define C_000100_EFFECTIVE_L2_CACHE_SIZE 0xFFFC7FFF | ||
| 414 | #define S_000100_EFFECTIVE_L2_QUEUE_SIZE(x) (((x) & 0x7) << 21) | ||
| 415 | #define G_000100_EFFECTIVE_L2_QUEUE_SIZE(x) (((x) >> 21) & 0x7) | ||
| 416 | #define C_000100_EFFECTIVE_L2_QUEUE_SIZE 0xFF1FFFFF | ||
| 417 | #define S_000100_INVALIDATE_ALL_L1_TLBS(x) (((x) & 0x1) << 28) | ||
| 418 | #define G_000100_INVALIDATE_ALL_L1_TLBS(x) (((x) >> 28) & 0x1) | ||
| 419 | #define C_000100_INVALIDATE_ALL_L1_TLBS 0xEFFFFFFF | ||
| 420 | #define S_000100_INVALIDATE_L2_CACHE(x) (((x) & 0x1) << 29) | ||
| 421 | #define G_000100_INVALIDATE_L2_CACHE(x) (((x) >> 29) & 0x1) | ||
| 422 | #define C_000100_INVALIDATE_L2_CACHE 0xDFFFFFFF | ||
| 423 | #define R_000102_MC_PT0_CONTEXT0_CNTL 0x000102 | ||
| 424 | #define S_000102_ENABLE_PAGE_TABLE(x) (((x) & 0x1) << 0) | ||
| 425 | #define G_000102_ENABLE_PAGE_TABLE(x) (((x) >> 0) & 0x1) | ||
| 426 | #define C_000102_ENABLE_PAGE_TABLE 0xFFFFFFFE | ||
| 427 | #define S_000102_PAGE_TABLE_DEPTH(x) (((x) & 0x3) << 1) | ||
| 428 | #define G_000102_PAGE_TABLE_DEPTH(x) (((x) >> 1) & 0x3) | ||
| 429 | #define C_000102_PAGE_TABLE_DEPTH 0xFFFFFFF9 | ||
| 430 | #define V_000102_PAGE_TABLE_FLAT 0 | ||
| 431 | /* R600 documentation suggest that this should be a number of pages */ | ||
| 432 | #define R_000112_MC_PT0_SYSTEM_APERTURE_LOW_ADDR 0x000112 | ||
| 433 | #define R_000114_MC_PT0_SYSTEM_APERTURE_HIGH_ADDR 0x000114 | ||
| 434 | #define R_00011C_MC_PT0_CONTEXT0_DEFAULT_READ_ADDR 0x00011C | ||
| 435 | #define R_00012C_MC_PT0_CONTEXT0_FLAT_BASE_ADDR 0x00012C | ||
| 436 | #define R_00013C_MC_PT0_CONTEXT0_FLAT_START_ADDR 0x00013C | ||
| 437 | #define R_00014C_MC_PT0_CONTEXT0_FLAT_END_ADDR 0x00014C | ||
| 438 | #define R_00016C_MC_PT0_CLIENT0_CNTL 0x00016C | ||
| 439 | #define S_00016C_ENABLE_TRANSLATION_MODE_OVERRIDE(x) (((x) & 0x1) << 0) | ||
| 440 | #define G_00016C_ENABLE_TRANSLATION_MODE_OVERRIDE(x) (((x) >> 0) & 0x1) | ||
| 441 | #define C_00016C_ENABLE_TRANSLATION_MODE_OVERRIDE 0xFFFFFFFE | ||
| 442 | #define S_00016C_TRANSLATION_MODE_OVERRIDE(x) (((x) & 0x1) << 1) | ||
| 443 | #define G_00016C_TRANSLATION_MODE_OVERRIDE(x) (((x) >> 1) & 0x1) | ||
| 444 | #define C_00016C_TRANSLATION_MODE_OVERRIDE 0xFFFFFFFD | ||
| 445 | #define S_00016C_SYSTEM_ACCESS_MODE_MASK(x) (((x) & 0x3) << 8) | ||
| 446 | #define G_00016C_SYSTEM_ACCESS_MODE_MASK(x) (((x) >> 8) & 0x3) | ||
| 447 | #define C_00016C_SYSTEM_ACCESS_MODE_MASK 0xFFFFFCFF | ||
| 448 | #define V_00016C_SYSTEM_ACCESS_MODE_PA_ONLY 0 | ||
| 449 | #define V_00016C_SYSTEM_ACCESS_MODE_USE_SYS_MAP 1 | ||
| 450 | #define V_00016C_SYSTEM_ACCESS_MODE_IN_SYS 2 | ||
| 451 | #define V_00016C_SYSTEM_ACCESS_MODE_NOT_IN_SYS 3 | ||
| 452 | #define S_00016C_SYSTEM_APERTURE_UNMAPPED_ACCESS(x) (((x) & 0x1) << 10) | ||
| 453 | #define G_00016C_SYSTEM_APERTURE_UNMAPPED_ACCESS(x) (((x) >> 10) & 0x1) | ||
| 454 | #define C_00016C_SYSTEM_APERTURE_UNMAPPED_ACCESS 0xFFFFFBFF | ||
| 455 | #define V_00016C_SYSTEM_APERTURE_UNMAPPED_PASSTHROUGH 0 | ||
| 456 | #define V_00016C_SYSTEM_APERTURE_UNMAPPED_DEFAULT_PAGE 1 | ||
| 457 | #define S_00016C_EFFECTIVE_L1_CACHE_SIZE(x) (((x) & 0x7) << 11) | ||
| 458 | #define G_00016C_EFFECTIVE_L1_CACHE_SIZE(x) (((x) >> 11) & 0x7) | ||
| 459 | #define C_00016C_EFFECTIVE_L1_CACHE_SIZE 0xFFFFC7FF | ||
| 460 | #define S_00016C_ENABLE_FRAGMENT_PROCESSING(x) (((x) & 0x1) << 14) | ||
| 461 | #define G_00016C_ENABLE_FRAGMENT_PROCESSING(x) (((x) >> 14) & 0x1) | ||
| 462 | #define C_00016C_ENABLE_FRAGMENT_PROCESSING 0xFFFFBFFF | ||
| 463 | #define S_00016C_EFFECTIVE_L1_QUEUE_SIZE(x) (((x) & 0x7) << 15) | ||
| 464 | #define G_00016C_EFFECTIVE_L1_QUEUE_SIZE(x) (((x) >> 15) & 0x7) | ||
| 465 | #define C_00016C_EFFECTIVE_L1_QUEUE_SIZE 0xFFFC7FFF | ||
| 466 | #define S_00016C_INVALIDATE_L1_TLB(x) (((x) & 0x1) << 20) | ||
| 467 | #define G_00016C_INVALIDATE_L1_TLB(x) (((x) >> 20) & 0x1) | ||
| 468 | #define C_00016C_INVALIDATE_L1_TLB 0xFFEFFFFF | ||
| 469 | |||
| 470 | #endif | ||
diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c index 7a0098ddf977..025e3225346c 100644 --- a/drivers/gpu/drm/radeon/rs690.c +++ b/drivers/gpu/drm/radeon/rs690.c | |||
| @@ -26,105 +26,29 @@ | |||
| 26 | * Jerome Glisse | 26 | * Jerome Glisse |
| 27 | */ | 27 | */ |
| 28 | #include "drmP.h" | 28 | #include "drmP.h" |
| 29 | #include "radeon_reg.h" | ||
| 30 | #include "radeon.h" | 29 | #include "radeon.h" |
| 31 | #include "rs690r.h" | ||
| 32 | #include "atom.h" | 30 | #include "atom.h" |
| 33 | #include "atom-bits.h" | 31 | #include "rs690d.h" |
| 34 | |||
| 35 | /* rs690,rs740 depends on : */ | ||
| 36 | void r100_hdp_reset(struct radeon_device *rdev); | ||
| 37 | int r300_mc_wait_for_idle(struct radeon_device *rdev); | ||
| 38 | void r420_pipes_init(struct radeon_device *rdev); | ||
| 39 | void rs400_gart_disable(struct radeon_device *rdev); | ||
| 40 | int rs400_gart_enable(struct radeon_device *rdev); | ||
| 41 | void rs400_gart_adjust_size(struct radeon_device *rdev); | ||
| 42 | void rs600_mc_disable_clients(struct radeon_device *rdev); | ||
| 43 | |||
| 44 | /* This files gather functions specifics to : | ||
| 45 | * rs690,rs740 | ||
| 46 | * | ||
| 47 | * Some of these functions might be used by newer ASICs. | ||
| 48 | */ | ||
| 49 | void rs690_gpu_init(struct radeon_device *rdev); | ||
| 50 | int rs690_mc_wait_for_idle(struct radeon_device *rdev); | ||
| 51 | |||
| 52 | |||
| 53 | /* | ||
| 54 | * MC functions. | ||
| 55 | */ | ||
| 56 | int rs690_mc_init(struct radeon_device *rdev) | ||
| 57 | { | ||
| 58 | uint32_t tmp; | ||
| 59 | int r; | ||
| 60 | |||
| 61 | if (r100_debugfs_rbbm_init(rdev)) { | ||
| 62 | DRM_ERROR("Failed to register debugfs file for RBBM !\n"); | ||
| 63 | } | ||
| 64 | |||
| 65 | rs690_gpu_init(rdev); | ||
| 66 | rs400_gart_disable(rdev); | ||
| 67 | |||
| 68 | /* Setup GPU memory space */ | ||
| 69 | rdev->mc.gtt_location = rdev->mc.mc_vram_size; | ||
| 70 | rdev->mc.gtt_location += (rdev->mc.gtt_size - 1); | ||
| 71 | rdev->mc.gtt_location &= ~(rdev->mc.gtt_size - 1); | ||
| 72 | rdev->mc.vram_location = 0xFFFFFFFFUL; | ||
| 73 | r = radeon_mc_setup(rdev); | ||
| 74 | if (r) { | ||
| 75 | return r; | ||
| 76 | } | ||
| 77 | |||
| 78 | /* Program GPU memory space */ | ||
| 79 | rs600_mc_disable_clients(rdev); | ||
| 80 | if (rs690_mc_wait_for_idle(rdev)) { | ||
| 81 | printk(KERN_WARNING "Failed to wait MC idle while " | ||
| 82 | "programming pipes. Bad things might happen.\n"); | ||
| 83 | } | ||
| 84 | tmp = rdev->mc.vram_location + rdev->mc.mc_vram_size - 1; | ||
| 85 | tmp = REG_SET(RS690_MC_FB_TOP, tmp >> 16); | ||
| 86 | tmp |= REG_SET(RS690_MC_FB_START, rdev->mc.vram_location >> 16); | ||
| 87 | WREG32_MC(RS690_MCCFG_FB_LOCATION, tmp); | ||
| 88 | /* FIXME: Does this reg exist on RS480,RS740 ? */ | ||
| 89 | WREG32(0x310, rdev->mc.vram_location); | ||
| 90 | WREG32(RS690_HDP_FB_LOCATION, rdev->mc.vram_location >> 16); | ||
| 91 | return 0; | ||
| 92 | } | ||
| 93 | |||
| 94 | void rs690_mc_fini(struct radeon_device *rdev) | ||
| 95 | { | ||
| 96 | } | ||
| 97 | |||
| 98 | 32 | ||
| 99 | /* | 33 | static int rs690_mc_wait_for_idle(struct radeon_device *rdev) |
| 100 | * Global GPU functions | ||
| 101 | */ | ||
| 102 | int rs690_mc_wait_for_idle(struct radeon_device *rdev) | ||
| 103 | { | 34 | { |
| 104 | unsigned i; | 35 | unsigned i; |
| 105 | uint32_t tmp; | 36 | uint32_t tmp; |
| 106 | 37 | ||
| 107 | for (i = 0; i < rdev->usec_timeout; i++) { | 38 | for (i = 0; i < rdev->usec_timeout; i++) { |
| 108 | /* read MC_STATUS */ | 39 | /* read MC_STATUS */ |
| 109 | tmp = RREG32_MC(RS690_MC_STATUS); | 40 | tmp = RREG32_MC(R_000090_MC_SYSTEM_STATUS); |
| 110 | if (tmp & RS690_MC_STATUS_IDLE) { | 41 | if (G_000090_MC_SYSTEM_IDLE(tmp)) |
| 111 | return 0; | 42 | return 0; |
| 112 | } | 43 | udelay(1); |
| 113 | DRM_UDELAY(1); | ||
| 114 | } | 44 | } |
| 115 | return -1; | 45 | return -1; |
| 116 | } | 46 | } |
| 117 | 47 | ||
| 118 | void rs690_errata(struct radeon_device *rdev) | 48 | static void rs690_gpu_init(struct radeon_device *rdev) |
| 119 | { | ||
| 120 | rdev->pll_errata = 0; | ||
| 121 | } | ||
| 122 | |||
| 123 | void rs690_gpu_init(struct radeon_device *rdev) | ||
| 124 | { | 49 | { |
| 125 | /* FIXME: HDP same place on rs690 ? */ | 50 | /* FIXME: HDP same place on rs690 ? */ |
| 126 | r100_hdp_reset(rdev); | 51 | r100_hdp_reset(rdev); |
| 127 | rv515_vga_render_disable(rdev); | ||
| 128 | /* FIXME: is this correct ? */ | 52 | /* FIXME: is this correct ? */ |
| 129 | r420_pipes_init(rdev); | 53 | r420_pipes_init(rdev); |
| 130 | if (rs690_mc_wait_for_idle(rdev)) { | 54 | if (rs690_mc_wait_for_idle(rdev)) { |
| @@ -133,10 +57,6 @@ void rs690_gpu_init(struct radeon_device *rdev) | |||
| 133 | } | 57 | } |
| 134 | } | 58 | } |
| 135 | 59 | ||
| 136 | |||
| 137 | /* | ||
| 138 | * VRAM info. | ||
| 139 | */ | ||
| 140 | void rs690_pm_info(struct radeon_device *rdev) | 60 | void rs690_pm_info(struct radeon_device *rdev) |
| 141 | { | 61 | { |
| 142 | int index = GetIndexIntoMasterTable(DATA, IntegratedSystemInfo); | 62 | int index = GetIndexIntoMasterTable(DATA, IntegratedSystemInfo); |
| @@ -250,39 +170,39 @@ void rs690_line_buffer_adjust(struct radeon_device *rdev, | |||
| 250 | /* | 170 | /* |
| 251 | * Line Buffer Setup | 171 | * Line Buffer Setup |
| 252 | * There is a single line buffer shared by both display controllers. | 172 | * There is a single line buffer shared by both display controllers. |
| 253 | * DC_LB_MEMORY_SPLIT controls how that line buffer is shared between | 173 | * R_006520_DC_LB_MEMORY_SPLIT controls how that line buffer is shared between |
| 254 | * the display controllers. The paritioning can either be done | 174 | * the display controllers. The paritioning can either be done |
| 255 | * manually or via one of four preset allocations specified in bits 1:0: | 175 | * manually or via one of four preset allocations specified in bits 1:0: |
| 256 | * 0 - line buffer is divided in half and shared between crtc | 176 | * 0 - line buffer is divided in half and shared between crtc |
| 257 | * 1 - D1 gets 3/4 of the line buffer, D2 gets 1/4 | 177 | * 1 - D1 gets 3/4 of the line buffer, D2 gets 1/4 |
| 258 | * 2 - D1 gets the whole buffer | 178 | * 2 - D1 gets the whole buffer |
| 259 | * 3 - D1 gets 1/4 of the line buffer, D2 gets 3/4 | 179 | * 3 - D1 gets 1/4 of the line buffer, D2 gets 3/4 |
| 260 | * Setting bit 2 of DC_LB_MEMORY_SPLIT controls switches to manual | 180 | * Setting bit 2 of R_006520_DC_LB_MEMORY_SPLIT controls switches to manual |
| 261 | * allocation mode. In manual allocation mode, D1 always starts at 0, | 181 | * allocation mode. In manual allocation mode, D1 always starts at 0, |
| 262 | * D1 end/2 is specified in bits 14:4; D2 allocation follows D1. | 182 | * D1 end/2 is specified in bits 14:4; D2 allocation follows D1. |
| 263 | */ | 183 | */ |
| 264 | tmp = RREG32(DC_LB_MEMORY_SPLIT) & ~DC_LB_MEMORY_SPLIT_MASK; | 184 | tmp = RREG32(R_006520_DC_LB_MEMORY_SPLIT) & C_006520_DC_LB_MEMORY_SPLIT; |
| 265 | tmp &= ~DC_LB_MEMORY_SPLIT_SHIFT_MODE; | 185 | tmp &= ~C_006520_DC_LB_MEMORY_SPLIT_MODE; |
| 266 | /* auto */ | 186 | /* auto */ |
| 267 | if (mode1 && mode2) { | 187 | if (mode1 && mode2) { |
| 268 | if (mode1->hdisplay > mode2->hdisplay) { | 188 | if (mode1->hdisplay > mode2->hdisplay) { |
| 269 | if (mode1->hdisplay > 2560) | 189 | if (mode1->hdisplay > 2560) |
| 270 | tmp |= DC_LB_MEMORY_SPLIT_D1_3Q_D2_1Q; | 190 | tmp |= V_006520_DC_LB_MEMORY_SPLIT_D1_3Q_D2_1Q; |
| 271 | else | 191 | else |
| 272 | tmp |= DC_LB_MEMORY_SPLIT_D1HALF_D2HALF; | 192 | tmp |= V_006520_DC_LB_MEMORY_SPLIT_D1HALF_D2HALF; |
| 273 | } else if (mode2->hdisplay > mode1->hdisplay) { | 193 | } else if (mode2->hdisplay > mode1->hdisplay) { |
| 274 | if (mode2->hdisplay > 2560) | 194 | if (mode2->hdisplay > 2560) |
| 275 | tmp |= DC_LB_MEMORY_SPLIT_D1_1Q_D2_3Q; | 195 | tmp |= V_006520_DC_LB_MEMORY_SPLIT_D1_1Q_D2_3Q; |
| 276 | else | 196 | else |
| 277 | tmp |= DC_LB_MEMORY_SPLIT_D1HALF_D2HALF; | 197 | tmp |= V_006520_DC_LB_MEMORY_SPLIT_D1HALF_D2HALF; |
| 278 | } else | 198 | } else |
| 279 | tmp |= AVIVO_DC_LB_MEMORY_SPLIT_D1HALF_D2HALF; | 199 | tmp |= V_006520_DC_LB_MEMORY_SPLIT_D1HALF_D2HALF; |
| 280 | } else if (mode1) { | 200 | } else if (mode1) { |
| 281 | tmp |= DC_LB_MEMORY_SPLIT_D1_ONLY; | 201 | tmp |= V_006520_DC_LB_MEMORY_SPLIT_D1_ONLY; |
| 282 | } else if (mode2) { | 202 | } else if (mode2) { |
| 283 | tmp |= DC_LB_MEMORY_SPLIT_D1_1Q_D2_3Q; | 203 | tmp |= V_006520_DC_LB_MEMORY_SPLIT_D1_1Q_D2_3Q; |
| 284 | } | 204 | } |
| 285 | WREG32(DC_LB_MEMORY_SPLIT, tmp); | 205 | WREG32(R_006520_DC_LB_MEMORY_SPLIT, tmp); |
| 286 | } | 206 | } |
| 287 | 207 | ||
| 288 | struct rs690_watermark { | 208 | struct rs690_watermark { |
| @@ -487,28 +407,28 @@ void rs690_bandwidth_update(struct radeon_device *rdev) | |||
| 487 | * option. | 407 | * option. |
| 488 | */ | 408 | */ |
| 489 | if (rdev->disp_priority == 2) { | 409 | if (rdev->disp_priority == 2) { |
| 490 | tmp = RREG32_MC(MC_INIT_MISC_LAT_TIMER); | 410 | tmp = RREG32_MC(R_000104_MC_INIT_MISC_LAT_TIMER); |
| 491 | tmp &= ~MC_DISP1R_INIT_LAT_MASK; | 411 | tmp &= C_000104_MC_DISP0R_INIT_LAT; |
| 492 | tmp &= ~MC_DISP0R_INIT_LAT_MASK; | 412 | tmp &= C_000104_MC_DISP1R_INIT_LAT; |
| 493 | if (mode1) | ||
| 494 | tmp |= (1 << MC_DISP1R_INIT_LAT_SHIFT); | ||
| 495 | if (mode0) | 413 | if (mode0) |
| 496 | tmp |= (1 << MC_DISP0R_INIT_LAT_SHIFT); | 414 | tmp |= S_000104_MC_DISP0R_INIT_LAT(1); |
| 497 | WREG32_MC(MC_INIT_MISC_LAT_TIMER, tmp); | 415 | if (mode1) |
| 416 | tmp |= S_000104_MC_DISP1R_INIT_LAT(1); | ||
| 417 | WREG32_MC(R_000104_MC_INIT_MISC_LAT_TIMER, tmp); | ||
| 498 | } | 418 | } |
| 499 | rs690_line_buffer_adjust(rdev, mode0, mode1); | 419 | rs690_line_buffer_adjust(rdev, mode0, mode1); |
| 500 | 420 | ||
| 501 | if ((rdev->family == CHIP_RS690) || (rdev->family == CHIP_RS740)) | 421 | if ((rdev->family == CHIP_RS690) || (rdev->family == CHIP_RS740)) |
| 502 | WREG32(DCP_CONTROL, 0); | 422 | WREG32(R_006C9C_DCP_CONTROL, 0); |
| 503 | if ((rdev->family == CHIP_RS780) || (rdev->family == CHIP_RS880)) | 423 | if ((rdev->family == CHIP_RS780) || (rdev->family == CHIP_RS880)) |
| 504 | WREG32(DCP_CONTROL, 2); | 424 | WREG32(R_006C9C_DCP_CONTROL, 2); |
| 505 | 425 | ||
| 506 | rs690_crtc_bandwidth_compute(rdev, rdev->mode_info.crtcs[0], &wm0); | 426 | rs690_crtc_bandwidth_compute(rdev, rdev->mode_info.crtcs[0], &wm0); |
| 507 | rs690_crtc_bandwidth_compute(rdev, rdev->mode_info.crtcs[1], &wm1); | 427 | rs690_crtc_bandwidth_compute(rdev, rdev->mode_info.crtcs[1], &wm1); |
| 508 | 428 | ||
| 509 | tmp = (wm0.lb_request_fifo_depth - 1); | 429 | tmp = (wm0.lb_request_fifo_depth - 1); |
| 510 | tmp |= (wm1.lb_request_fifo_depth - 1) << 16; | 430 | tmp |= (wm1.lb_request_fifo_depth - 1) << 16; |
| 511 | WREG32(LB_MAX_REQ_OUTSTANDING, tmp); | 431 | WREG32(R_006D58_LB_MAX_REQ_OUTSTANDING, tmp); |
| 512 | 432 | ||
| 513 | if (mode0 && mode1) { | 433 | if (mode0 && mode1) { |
| 514 | if (rfixed_trunc(wm0.dbpp) > 64) | 434 | if (rfixed_trunc(wm0.dbpp) > 64) |
| @@ -561,10 +481,10 @@ void rs690_bandwidth_update(struct radeon_device *rdev) | |||
| 561 | priority_mark12.full = 0; | 481 | priority_mark12.full = 0; |
| 562 | if (wm1.priority_mark_max.full > priority_mark12.full) | 482 | if (wm1.priority_mark_max.full > priority_mark12.full) |
| 563 | priority_mark12.full = wm1.priority_mark_max.full; | 483 | priority_mark12.full = wm1.priority_mark_max.full; |
| 564 | WREG32(D1MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark02)); | 484 | WREG32(R_006548_D1MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark02)); |
| 565 | WREG32(D1MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark02)); | 485 | WREG32(R_00654C_D1MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark02)); |
| 566 | WREG32(D2MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark12)); | 486 | WREG32(R_006D48_D2MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark12)); |
| 567 | WREG32(D2MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark12)); | 487 | WREG32(R_006D4C_D2MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark12)); |
| 568 | } else if (mode0) { | 488 | } else if (mode0) { |
| 569 | if (rfixed_trunc(wm0.dbpp) > 64) | 489 | if (rfixed_trunc(wm0.dbpp) > 64) |
| 570 | a.full = rfixed_mul(wm0.dbpp, wm0.num_line_pair); | 490 | a.full = rfixed_mul(wm0.dbpp, wm0.num_line_pair); |
| @@ -591,10 +511,12 @@ void rs690_bandwidth_update(struct radeon_device *rdev) | |||
| 591 | priority_mark02.full = 0; | 511 | priority_mark02.full = 0; |
| 592 | if (wm0.priority_mark_max.full > priority_mark02.full) | 512 | if (wm0.priority_mark_max.full > priority_mark02.full) |
| 593 | priority_mark02.full = wm0.priority_mark_max.full; | 513 | priority_mark02.full = wm0.priority_mark_max.full; |
| 594 | WREG32(D1MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark02)); | 514 | WREG32(R_006548_D1MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark02)); |
| 595 | WREG32(D1MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark02)); | 515 | WREG32(R_00654C_D1MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark02)); |
| 596 | WREG32(D2MODE_PRIORITY_A_CNT, MODE_PRIORITY_OFF); | 516 | WREG32(R_006D48_D2MODE_PRIORITY_A_CNT, |
| 597 | WREG32(D2MODE_PRIORITY_B_CNT, MODE_PRIORITY_OFF); | 517 | S_006D48_D2MODE_PRIORITY_A_OFF(1)); |
| 518 | WREG32(R_006D4C_D2MODE_PRIORITY_B_CNT, | ||
| 519 | S_006D4C_D2MODE_PRIORITY_B_OFF(1)); | ||
| 598 | } else { | 520 | } else { |
| 599 | if (rfixed_trunc(wm1.dbpp) > 64) | 521 | if (rfixed_trunc(wm1.dbpp) > 64) |
| 600 | a.full = rfixed_mul(wm1.dbpp, wm1.num_line_pair); | 522 | a.full = rfixed_mul(wm1.dbpp, wm1.num_line_pair); |
| @@ -621,30 +543,203 @@ void rs690_bandwidth_update(struct radeon_device *rdev) | |||
| 621 | priority_mark12.full = 0; | 543 | priority_mark12.full = 0; |
| 622 | if (wm1.priority_mark_max.full > priority_mark12.full) | 544 | if (wm1.priority_mark_max.full > priority_mark12.full) |
| 623 | priority_mark12.full = wm1.priority_mark_max.full; | 545 | priority_mark12.full = wm1.priority_mark_max.full; |
| 624 | WREG32(D1MODE_PRIORITY_A_CNT, MODE_PRIORITY_OFF); | 546 | WREG32(R_006548_D1MODE_PRIORITY_A_CNT, |
| 625 | WREG32(D1MODE_PRIORITY_B_CNT, MODE_PRIORITY_OFF); | 547 | S_006548_D1MODE_PRIORITY_A_OFF(1)); |
| 626 | WREG32(D2MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark12)); | 548 | WREG32(R_00654C_D1MODE_PRIORITY_B_CNT, |
| 627 | WREG32(D2MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark12)); | 549 | S_00654C_D1MODE_PRIORITY_B_OFF(1)); |
| 550 | WREG32(R_006D48_D2MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark12)); | ||
| 551 | WREG32(R_006D4C_D2MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark12)); | ||
| 628 | } | 552 | } |
| 629 | } | 553 | } |
| 630 | 554 | ||
| 631 | /* | ||
| 632 | * Indirect registers accessor | ||
| 633 | */ | ||
| 634 | uint32_t rs690_mc_rreg(struct radeon_device *rdev, uint32_t reg) | 555 | uint32_t rs690_mc_rreg(struct radeon_device *rdev, uint32_t reg) |
| 635 | { | 556 | { |
| 636 | uint32_t r; | 557 | uint32_t r; |
| 637 | 558 | ||
| 638 | WREG32(RS690_MC_INDEX, (reg & RS690_MC_INDEX_MASK)); | 559 | WREG32(R_000078_MC_INDEX, S_000078_MC_IND_ADDR(reg)); |
| 639 | r = RREG32(RS690_MC_DATA); | 560 | r = RREG32(R_00007C_MC_DATA); |
| 640 | WREG32(RS690_MC_INDEX, RS690_MC_INDEX_MASK); | 561 | WREG32(R_000078_MC_INDEX, ~C_000078_MC_IND_ADDR); |
| 641 | return r; | 562 | return r; |
| 642 | } | 563 | } |
| 643 | 564 | ||
| 644 | void rs690_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) | 565 | void rs690_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) |
| 645 | { | 566 | { |
| 646 | WREG32(RS690_MC_INDEX, | 567 | WREG32(R_000078_MC_INDEX, S_000078_MC_IND_ADDR(reg) | |
| 647 | RS690_MC_INDEX_WR_EN | ((reg) & RS690_MC_INDEX_MASK)); | 568 | S_000078_MC_IND_WR_EN(1)); |
| 648 | WREG32(RS690_MC_DATA, v); | 569 | WREG32(R_00007C_MC_DATA, v); |
| 649 | WREG32(RS690_MC_INDEX, RS690_MC_INDEX_WR_ACK); | 570 | WREG32(R_000078_MC_INDEX, 0x7F); |
| 571 | } | ||
| 572 | |||
| 573 | void rs690_mc_program(struct radeon_device *rdev) | ||
| 574 | { | ||
| 575 | struct rv515_mc_save save; | ||
| 576 | |||
| 577 | /* Stops all mc clients */ | ||
| 578 | rv515_mc_stop(rdev, &save); | ||
| 579 | |||
| 580 | /* Wait for mc idle */ | ||
| 581 | if (rs690_mc_wait_for_idle(rdev)) | ||
| 582 | dev_warn(rdev->dev, "Wait MC idle timeout before updating MC.\n"); | ||
| 583 | /* Program MC, should be a 32bits limited address space */ | ||
| 584 | WREG32_MC(R_000100_MCCFG_FB_LOCATION, | ||
| 585 | S_000100_MC_FB_START(rdev->mc.vram_start >> 16) | | ||
| 586 | S_000100_MC_FB_TOP(rdev->mc.vram_end >> 16)); | ||
| 587 | WREG32(R_000134_HDP_FB_LOCATION, | ||
| 588 | S_000134_HDP_FB_START(rdev->mc.vram_start >> 16)); | ||
| 589 | |||
| 590 | rv515_mc_resume(rdev, &save); | ||
| 591 | } | ||
| 592 | |||
| 593 | static int rs690_startup(struct radeon_device *rdev) | ||
| 594 | { | ||
| 595 | int r; | ||
| 596 | |||
| 597 | rs690_mc_program(rdev); | ||
| 598 | /* Resume clock */ | ||
| 599 | rv515_clock_startup(rdev); | ||
| 600 | /* Initialize GPU configuration (# pipes, ...) */ | ||
| 601 | rs690_gpu_init(rdev); | ||
| 602 | /* Initialize GART (initialize after TTM so we can allocate | ||
| 603 | * memory through TTM but finalize after TTM) */ | ||
| 604 | r = rs400_gart_enable(rdev); | ||
| 605 | if (r) | ||
| 606 | return r; | ||
| 607 | /* Enable IRQ */ | ||
| 608 | rdev->irq.sw_int = true; | ||
| 609 | rs600_irq_set(rdev); | ||
| 610 | /* 1M ring buffer */ | ||
| 611 | r = r100_cp_init(rdev, 1024 * 1024); | ||
| 612 | if (r) { | ||
| 613 | dev_err(rdev->dev, "failled initializing CP (%d).\n", r); | ||
| 614 | return r; | ||
| 615 | } | ||
| 616 | r = r100_wb_init(rdev); | ||
| 617 | if (r) | ||
| 618 | dev_err(rdev->dev, "failled initializing WB (%d).\n", r); | ||
| 619 | r = r100_ib_init(rdev); | ||
| 620 | if (r) { | ||
| 621 | dev_err(rdev->dev, "failled initializing IB (%d).\n", r); | ||
| 622 | return r; | ||
| 623 | } | ||
| 624 | return 0; | ||
| 625 | } | ||
| 626 | |||
| 627 | int rs690_resume(struct radeon_device *rdev) | ||
| 628 | { | ||
| 629 | /* Make sur GART are not working */ | ||
| 630 | rs400_gart_disable(rdev); | ||
| 631 | /* Resume clock before doing reset */ | ||
| 632 | rv515_clock_startup(rdev); | ||
| 633 | /* Reset gpu before posting otherwise ATOM will enter infinite loop */ | ||
| 634 | if (radeon_gpu_reset(rdev)) { | ||
| 635 | dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", | ||
| 636 | RREG32(R_000E40_RBBM_STATUS), | ||
| 637 | RREG32(R_0007C0_CP_STAT)); | ||
| 638 | } | ||
| 639 | /* post */ | ||
| 640 | atom_asic_init(rdev->mode_info.atom_context); | ||
| 641 | /* Resume clock after posting */ | ||
| 642 | rv515_clock_startup(rdev); | ||
| 643 | return rs690_startup(rdev); | ||
| 644 | } | ||
| 645 | |||
| 646 | int rs690_suspend(struct radeon_device *rdev) | ||
| 647 | { | ||
| 648 | r100_cp_disable(rdev); | ||
| 649 | r100_wb_disable(rdev); | ||
| 650 | rs600_irq_disable(rdev); | ||
| 651 | rs400_gart_disable(rdev); | ||
| 652 | return 0; | ||
| 653 | } | ||
| 654 | |||
| 655 | void rs690_fini(struct radeon_device *rdev) | ||
| 656 | { | ||
| 657 | rs690_suspend(rdev); | ||
| 658 | r100_cp_fini(rdev); | ||
| 659 | r100_wb_fini(rdev); | ||
| 660 | r100_ib_fini(rdev); | ||
| 661 | radeon_gem_fini(rdev); | ||
| 662 | rs400_gart_fini(rdev); | ||
| 663 | radeon_irq_kms_fini(rdev); | ||
| 664 | radeon_fence_driver_fini(rdev); | ||
| 665 | radeon_object_fini(rdev); | ||
| 666 | radeon_atombios_fini(rdev); | ||
| 667 | kfree(rdev->bios); | ||
| 668 | rdev->bios = NULL; | ||
| 669 | } | ||
| 670 | |||
| 671 | int rs690_init(struct radeon_device *rdev) | ||
| 672 | { | ||
| 673 | int r; | ||
| 674 | |||
| 675 | /* Disable VGA */ | ||
| 676 | rv515_vga_render_disable(rdev); | ||
| 677 | /* Initialize scratch registers */ | ||
| 678 | radeon_scratch_init(rdev); | ||
| 679 | /* Initialize surface registers */ | ||
| 680 | radeon_surface_init(rdev); | ||
| 681 | /* TODO: disable VGA need to use VGA request */ | ||
| 682 | /* BIOS*/ | ||
| 683 | if (!radeon_get_bios(rdev)) { | ||
| 684 | if (ASIC_IS_AVIVO(rdev)) | ||
| 685 | return -EINVAL; | ||
| 686 | } | ||
| 687 | if (rdev->is_atom_bios) { | ||
| 688 | r = radeon_atombios_init(rdev); | ||
| 689 | if (r) | ||
| 690 | return r; | ||
| 691 | } else { | ||
| 692 | dev_err(rdev->dev, "Expecting atombios for RV515 GPU\n"); | ||
| 693 | return -EINVAL; | ||
| 694 | } | ||
| 695 | /* Reset gpu before posting otherwise ATOM will enter infinite loop */ | ||
| 696 | if (radeon_gpu_reset(rdev)) { | ||
| 697 | dev_warn(rdev->dev, | ||
| 698 | "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", | ||
| 699 | RREG32(R_000E40_RBBM_STATUS), | ||
| 700 | RREG32(R_0007C0_CP_STAT)); | ||
| 701 | } | ||
| 702 | /* check if cards are posted or not */ | ||
| 703 | if (!radeon_card_posted(rdev) && rdev->bios) { | ||
| 704 | DRM_INFO("GPU not posted. posting now...\n"); | ||
| 705 | atom_asic_init(rdev->mode_info.atom_context); | ||
| 706 | } | ||
| 707 | /* Initialize clocks */ | ||
| 708 | radeon_get_clock_info(rdev->ddev); | ||
| 709 | /* Get vram informations */ | ||
| 710 | rs690_vram_info(rdev); | ||
| 711 | /* Initialize memory controller (also test AGP) */ | ||
| 712 | r = r420_mc_init(rdev); | ||
| 713 | if (r) | ||
| 714 | return r; | ||
| 715 | rv515_debugfs(rdev); | ||
| 716 | /* Fence driver */ | ||
| 717 | r = radeon_fence_driver_init(rdev); | ||
| 718 | if (r) | ||
| 719 | return r; | ||
| 720 | r = radeon_irq_kms_init(rdev); | ||
| 721 | if (r) | ||
| 722 | return r; | ||
| 723 | /* Memory manager */ | ||
| 724 | r = radeon_object_init(rdev); | ||
| 725 | if (r) | ||
| 726 | return r; | ||
| 727 | r = rs400_gart_init(rdev); | ||
| 728 | if (r) | ||
| 729 | return r; | ||
| 730 | rs600_set_safe_registers(rdev); | ||
| 731 | rdev->accel_working = true; | ||
| 732 | r = rs690_startup(rdev); | ||
| 733 | if (r) { | ||
| 734 | /* Somethings want wront with the accel init stop accel */ | ||
| 735 | dev_err(rdev->dev, "Disabling GPU acceleration\n"); | ||
| 736 | rs690_suspend(rdev); | ||
| 737 | r100_cp_fini(rdev); | ||
| 738 | r100_wb_fini(rdev); | ||
| 739 | r100_ib_fini(rdev); | ||
| 740 | rs400_gart_fini(rdev); | ||
| 741 | radeon_irq_kms_fini(rdev); | ||
| 742 | rdev->accel_working = false; | ||
| 743 | } | ||
| 744 | return 0; | ||
| 650 | } | 745 | } |
diff --git a/drivers/gpu/drm/radeon/rs690d.h b/drivers/gpu/drm/radeon/rs690d.h new file mode 100644 index 000000000000..62d31e7a897f --- /dev/null +++ b/drivers/gpu/drm/radeon/rs690d.h | |||
| @@ -0,0 +1,307 @@ | |||
| 1 | /* | ||
| 2 | * Copyright 2008 Advanced Micro Devices, Inc. | ||
| 3 | * Copyright 2008 Red Hat Inc. | ||
| 4 | * Copyright 2009 Jerome Glisse. | ||
| 5 | * | ||
| 6 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
| 7 | * copy of this software and associated documentation files (the "Software"), | ||
| 8 | * to deal in the Software without restriction, including without limitation | ||
| 9 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
| 10 | * and/or sell copies of the Software, and to permit persons to whom the | ||
| 11 | * Software is furnished to do so, subject to the following conditions: | ||
| 12 | * | ||
| 13 | * The above copyright notice and this permission notice shall be included in | ||
| 14 | * all copies or substantial portions of the Software. | ||
| 15 | * | ||
| 16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
| 17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
| 18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
| 19 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
| 20 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
| 21 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
| 22 | * OTHER DEALINGS IN THE SOFTWARE. | ||
| 23 | * | ||
| 24 | * Authors: Dave Airlie | ||
| 25 | * Alex Deucher | ||
| 26 | * Jerome Glisse | ||
| 27 | */ | ||
| 28 | #ifndef __RS690D_H__ | ||
| 29 | #define __RS690D_H__ | ||
| 30 | |||
| 31 | /* Registers */ | ||
| 32 | #define R_000078_MC_INDEX 0x000078 | ||
| 33 | #define S_000078_MC_IND_ADDR(x) (((x) & 0x1FF) << 0) | ||
| 34 | #define G_000078_MC_IND_ADDR(x) (((x) >> 0) & 0x1FF) | ||
| 35 | #define C_000078_MC_IND_ADDR 0xFFFFFE00 | ||
| 36 | #define S_000078_MC_IND_WR_EN(x) (((x) & 0x1) << 9) | ||
| 37 | #define G_000078_MC_IND_WR_EN(x) (((x) >> 9) & 0x1) | ||
| 38 | #define C_000078_MC_IND_WR_EN 0xFFFFFDFF | ||
| 39 | #define R_00007C_MC_DATA 0x00007C | ||
| 40 | #define S_00007C_MC_DATA(x) (((x) & 0xFFFFFFFF) << 0) | ||
| 41 | #define G_00007C_MC_DATA(x) (((x) >> 0) & 0xFFFFFFFF) | ||
| 42 | #define C_00007C_MC_DATA 0x00000000 | ||
| 43 | #define R_0000F8_CONFIG_MEMSIZE 0x0000F8 | ||
| 44 | #define S_0000F8_CONFIG_MEMSIZE(x) (((x) & 0xFFFFFFFF) << 0) | ||
| 45 | #define G_0000F8_CONFIG_MEMSIZE(x) (((x) >> 0) & 0xFFFFFFFF) | ||
| 46 | #define C_0000F8_CONFIG_MEMSIZE 0x00000000 | ||
| 47 | #define R_000134_HDP_FB_LOCATION 0x000134 | ||
| 48 | #define S_000134_HDP_FB_START(x) (((x) & 0xFFFF) << 0) | ||
| 49 | #define G_000134_HDP_FB_START(x) (((x) >> 0) & 0xFFFF) | ||
| 50 | #define C_000134_HDP_FB_START 0xFFFF0000 | ||
| 51 | #define R_0007C0_CP_STAT 0x0007C0 | ||
| 52 | #define S_0007C0_MRU_BUSY(x) (((x) & 0x1) << 0) | ||
| 53 | #define G_0007C0_MRU_BUSY(x) (((x) >> 0) & 0x1) | ||
| 54 | #define C_0007C0_MRU_BUSY 0xFFFFFFFE | ||
| 55 | #define S_0007C0_MWU_BUSY(x) (((x) & 0x1) << 1) | ||
| 56 | #define G_0007C0_MWU_BUSY(x) (((x) >> 1) & 0x1) | ||
| 57 | #define C_0007C0_MWU_BUSY 0xFFFFFFFD | ||
| 58 | #define S_0007C0_RSIU_BUSY(x) (((x) & 0x1) << 2) | ||
| 59 | #define G_0007C0_RSIU_BUSY(x) (((x) >> 2) & 0x1) | ||
| 60 | #define C_0007C0_RSIU_BUSY 0xFFFFFFFB | ||
| 61 | #define S_0007C0_RCIU_BUSY(x) (((x) & 0x1) << 3) | ||
| 62 | #define G_0007C0_RCIU_BUSY(x) (((x) >> 3) & 0x1) | ||
| 63 | #define C_0007C0_RCIU_BUSY 0xFFFFFFF7 | ||
| 64 | #define S_0007C0_CSF_PRIMARY_BUSY(x) (((x) & 0x1) << 9) | ||
| 65 | #define G_0007C0_CSF_PRIMARY_BUSY(x) (((x) >> 9) & 0x1) | ||
| 66 | #define C_0007C0_CSF_PRIMARY_BUSY 0xFFFFFDFF | ||
| 67 | #define S_0007C0_CSF_INDIRECT_BUSY(x) (((x) & 0x1) << 10) | ||
| 68 | #define G_0007C0_CSF_INDIRECT_BUSY(x) (((x) >> 10) & 0x1) | ||
| 69 | #define C_0007C0_CSF_INDIRECT_BUSY 0xFFFFFBFF | ||
| 70 | #define S_0007C0_CSQ_PRIMARY_BUSY(x) (((x) & 0x1) << 11) | ||
| 71 | #define G_0007C0_CSQ_PRIMARY_BUSY(x) (((x) >> 11) & 0x1) | ||
| 72 | #define C_0007C0_CSQ_PRIMARY_BUSY 0xFFFFF7FF | ||
| 73 | #define S_0007C0_CSQ_INDIRECT_BUSY(x) (((x) & 0x1) << 12) | ||
| 74 | #define G_0007C0_CSQ_INDIRECT_BUSY(x) (((x) >> 12) & 0x1) | ||
| 75 | #define C_0007C0_CSQ_INDIRECT_BUSY 0xFFFFEFFF | ||
| 76 | #define S_0007C0_CSI_BUSY(x) (((x) & 0x1) << 13) | ||
| 77 | #define G_0007C0_CSI_BUSY(x) (((x) >> 13) & 0x1) | ||
| 78 | #define C_0007C0_CSI_BUSY 0xFFFFDFFF | ||
| 79 | #define S_0007C0_CSF_INDIRECT2_BUSY(x) (((x) & 0x1) << 14) | ||
| 80 | #define G_0007C0_CSF_INDIRECT2_BUSY(x) (((x) >> 14) & 0x1) | ||
| 81 | #define C_0007C0_CSF_INDIRECT2_BUSY 0xFFFFBFFF | ||
| 82 | #define S_0007C0_CSQ_INDIRECT2_BUSY(x) (((x) & 0x1) << 15) | ||
| 83 | #define G_0007C0_CSQ_INDIRECT2_BUSY(x) (((x) >> 15) & 0x1) | ||
| 84 | #define C_0007C0_CSQ_INDIRECT2_BUSY 0xFFFF7FFF | ||
| 85 | #define S_0007C0_GUIDMA_BUSY(x) (((x) & 0x1) << 28) | ||
| 86 | #define G_0007C0_GUIDMA_BUSY(x) (((x) >> 28) & 0x1) | ||
| 87 | #define C_0007C0_GUIDMA_BUSY 0xEFFFFFFF | ||
| 88 | #define S_0007C0_VIDDMA_BUSY(x) (((x) & 0x1) << 29) | ||
| 89 | #define G_0007C0_VIDDMA_BUSY(x) (((x) >> 29) & 0x1) | ||
| 90 | #define C_0007C0_VIDDMA_BUSY 0xDFFFFFFF | ||
| 91 | #define S_0007C0_CMDSTRM_BUSY(x) (((x) & 0x1) << 30) | ||
| 92 | #define G_0007C0_CMDSTRM_BUSY(x) (((x) >> 30) & 0x1) | ||
| 93 | #define C_0007C0_CMDSTRM_BUSY 0xBFFFFFFF | ||
| 94 | #define S_0007C0_CP_BUSY(x) (((x) & 0x1) << 31) | ||
| 95 | #define G_0007C0_CP_BUSY(x) (((x) >> 31) & 0x1) | ||
| 96 | #define C_0007C0_CP_BUSY 0x7FFFFFFF | ||
| 97 | #define R_000E40_RBBM_STATUS 0x000E40 | ||
| 98 | #define S_000E40_CMDFIFO_AVAIL(x) (((x) & 0x7F) << 0) | ||
| 99 | #define G_000E40_CMDFIFO_AVAIL(x) (((x) >> 0) & 0x7F) | ||
| 100 | #define C_000E40_CMDFIFO_AVAIL 0xFFFFFF80 | ||
| 101 | #define S_000E40_HIRQ_ON_RBB(x) (((x) & 0x1) << 8) | ||
| 102 | #define G_000E40_HIRQ_ON_RBB(x) (((x) >> 8) & 0x1) | ||
| 103 | #define C_000E40_HIRQ_ON_RBB 0xFFFFFEFF | ||
| 104 | #define S_000E40_CPRQ_ON_RBB(x) (((x) & 0x1) << 9) | ||
| 105 | #define G_000E40_CPRQ_ON_RBB(x) (((x) >> 9) & 0x1) | ||
| 106 | #define C_000E40_CPRQ_ON_RBB 0xFFFFFDFF | ||
| 107 | #define S_000E40_CFRQ_ON_RBB(x) (((x) & 0x1) << 10) | ||
| 108 | #define G_000E40_CFRQ_ON_RBB(x) (((x) >> 10) & 0x1) | ||
| 109 | #define C_000E40_CFRQ_ON_RBB 0xFFFFFBFF | ||
| 110 | #define S_000E40_HIRQ_IN_RTBUF(x) (((x) & 0x1) << 11) | ||
| 111 | #define G_000E40_HIRQ_IN_RTBUF(x) (((x) >> 11) & 0x1) | ||
| 112 | #define C_000E40_HIRQ_IN_RTBUF 0xFFFFF7FF | ||
| 113 | #define S_000E40_CPRQ_IN_RTBUF(x) (((x) & 0x1) << 12) | ||
| 114 | #define G_000E40_CPRQ_IN_RTBUF(x) (((x) >> 12) & 0x1) | ||
| 115 | #define C_000E40_CPRQ_IN_RTBUF 0xFFFFEFFF | ||
| 116 | #define S_000E40_CFRQ_IN_RTBUF(x) (((x) & 0x1) << 13) | ||
| 117 | #define G_000E40_CFRQ_IN_RTBUF(x) (((x) >> 13) & 0x1) | ||
| 118 | #define C_000E40_CFRQ_IN_RTBUF 0xFFFFDFFF | ||
| 119 | #define S_000E40_CF_PIPE_BUSY(x) (((x) & 0x1) << 14) | ||
| 120 | #define G_000E40_CF_PIPE_BUSY(x) (((x) >> 14) & 0x1) | ||
| 121 | #define C_000E40_CF_PIPE_BUSY 0xFFFFBFFF | ||
| 122 | #define S_000E40_ENG_EV_BUSY(x) (((x) & 0x1) << 15) | ||
| 123 | #define G_000E40_ENG_EV_BUSY(x) (((x) >> 15) & 0x1) | ||
| 124 | #define C_000E40_ENG_EV_BUSY 0xFFFF7FFF | ||
| 125 | #define S_000E40_CP_CMDSTRM_BUSY(x) (((x) & 0x1) << 16) | ||
| 126 | #define G_000E40_CP_CMDSTRM_BUSY(x) (((x) >> 16) & 0x1) | ||
| 127 | #define C_000E40_CP_CMDSTRM_BUSY 0xFFFEFFFF | ||
| 128 | #define S_000E40_E2_BUSY(x) (((x) & 0x1) << 17) | ||
| 129 | #define G_000E40_E2_BUSY(x) (((x) >> 17) & 0x1) | ||
| 130 | #define C_000E40_E2_BUSY 0xFFFDFFFF | ||
| 131 | #define S_000E40_RB2D_BUSY(x) (((x) & 0x1) << 18) | ||
| 132 | #define G_000E40_RB2D_BUSY(x) (((x) >> 18) & 0x1) | ||
| 133 | #define C_000E40_RB2D_BUSY 0xFFFBFFFF | ||
| 134 | #define S_000E40_RB3D_BUSY(x) (((x) & 0x1) << 19) | ||
| 135 | #define G_000E40_RB3D_BUSY(x) (((x) >> 19) & 0x1) | ||
| 136 | #define C_000E40_RB3D_BUSY 0xFFF7FFFF | ||
| 137 | #define S_000E40_VAP_BUSY(x) (((x) & 0x1) << 20) | ||
| 138 | #define G_000E40_VAP_BUSY(x) (((x) >> 20) & 0x1) | ||
| 139 | #define C_000E40_VAP_BUSY 0xFFEFFFFF | ||
| 140 | #define S_000E40_RE_BUSY(x) (((x) & 0x1) << 21) | ||
| 141 | #define G_000E40_RE_BUSY(x) (((x) >> 21) & 0x1) | ||
| 142 | #define C_000E40_RE_BUSY 0xFFDFFFFF | ||
| 143 | #define S_000E40_TAM_BUSY(x) (((x) & 0x1) << 22) | ||
| 144 | #define G_000E40_TAM_BUSY(x) (((x) >> 22) & 0x1) | ||
| 145 | #define C_000E40_TAM_BUSY 0xFFBFFFFF | ||
| 146 | #define S_000E40_TDM_BUSY(x) (((x) & 0x1) << 23) | ||
| 147 | #define G_000E40_TDM_BUSY(x) (((x) >> 23) & 0x1) | ||
| 148 | #define C_000E40_TDM_BUSY 0xFF7FFFFF | ||
| 149 | #define S_000E40_PB_BUSY(x) (((x) & 0x1) << 24) | ||
| 150 | #define G_000E40_PB_BUSY(x) (((x) >> 24) & 0x1) | ||
| 151 | #define C_000E40_PB_BUSY 0xFEFFFFFF | ||
| 152 | #define S_000E40_TIM_BUSY(x) (((x) & 0x1) << 25) | ||
| 153 | #define G_000E40_TIM_BUSY(x) (((x) >> 25) & 0x1) | ||
| 154 | #define C_000E40_TIM_BUSY 0xFDFFFFFF | ||
| 155 | #define S_000E40_GA_BUSY(x) (((x) & 0x1) << 26) | ||
| 156 | #define G_000E40_GA_BUSY(x) (((x) >> 26) & 0x1) | ||
| 157 | #define C_000E40_GA_BUSY 0xFBFFFFFF | ||
| 158 | #define S_000E40_CBA2D_BUSY(x) (((x) & 0x1) << 27) | ||
| 159 | #define G_000E40_CBA2D_BUSY(x) (((x) >> 27) & 0x1) | ||
| 160 | #define C_000E40_CBA2D_BUSY 0xF7FFFFFF | ||
| 161 | #define S_000E40_GUI_ACTIVE(x) (((x) & 0x1) << 31) | ||
| 162 | #define G_000E40_GUI_ACTIVE(x) (((x) >> 31) & 0x1) | ||
| 163 | #define C_000E40_GUI_ACTIVE 0x7FFFFFFF | ||
| 164 | #define R_006520_DC_LB_MEMORY_SPLIT 0x006520 | ||
| 165 | #define S_006520_DC_LB_MEMORY_SPLIT(x) (((x) & 0x3) << 0) | ||
| 166 | #define G_006520_DC_LB_MEMORY_SPLIT(x) (((x) >> 0) & 0x3) | ||
| 167 | #define C_006520_DC_LB_MEMORY_SPLIT 0xFFFFFFFC | ||
| 168 | #define S_006520_DC_LB_MEMORY_SPLIT_MODE(x) (((x) & 0x1) << 2) | ||
| 169 | #define G_006520_DC_LB_MEMORY_SPLIT_MODE(x) (((x) >> 2) & 0x1) | ||
| 170 | #define C_006520_DC_LB_MEMORY_SPLIT_MODE 0xFFFFFFFB | ||
| 171 | #define V_006520_DC_LB_MEMORY_SPLIT_D1HALF_D2HALF 0 | ||
| 172 | #define V_006520_DC_LB_MEMORY_SPLIT_D1_3Q_D2_1Q 1 | ||
| 173 | #define V_006520_DC_LB_MEMORY_SPLIT_D1_ONLY 2 | ||
| 174 | #define V_006520_DC_LB_MEMORY_SPLIT_D1_1Q_D2_3Q 3 | ||
| 175 | #define S_006520_DC_LB_DISP1_END_ADR(x) (((x) & 0x7FF) << 4) | ||
| 176 | #define G_006520_DC_LB_DISP1_END_ADR(x) (((x) >> 4) & 0x7FF) | ||
| 177 | #define C_006520_DC_LB_DISP1_END_ADR 0xFFFF800F | ||
| 178 | #define R_006548_D1MODE_PRIORITY_A_CNT 0x006548 | ||
| 179 | #define S_006548_D1MODE_PRIORITY_MARK_A(x) (((x) & 0x7FFF) << 0) | ||
| 180 | #define G_006548_D1MODE_PRIORITY_MARK_A(x) (((x) >> 0) & 0x7FFF) | ||
| 181 | #define C_006548_D1MODE_PRIORITY_MARK_A 0xFFFF8000 | ||
| 182 | #define S_006548_D1MODE_PRIORITY_A_OFF(x) (((x) & 0x1) << 16) | ||
| 183 | #define G_006548_D1MODE_PRIORITY_A_OFF(x) (((x) >> 16) & 0x1) | ||
| 184 | #define C_006548_D1MODE_PRIORITY_A_OFF 0xFFFEFFFF | ||
| 185 | #define S_006548_D1MODE_PRIORITY_A_FORCE_MASK(x) (((x) & 0x1) << 24) | ||
| 186 | #define G_006548_D1MODE_PRIORITY_A_FORCE_MASK(x) (((x) >> 24) & 0x1) | ||
| 187 | #define C_006548_D1MODE_PRIORITY_A_FORCE_MASK 0xFEFFFFFF | ||
| 188 | #define R_00654C_D1MODE_PRIORITY_B_CNT 0x00654C | ||
| 189 | #define S_00654C_D1MODE_PRIORITY_MARK_B(x) (((x) & 0x7FFF) << 0) | ||
| 190 | #define G_00654C_D1MODE_PRIORITY_MARK_B(x) (((x) >> 0) & 0x7FFF) | ||
| 191 | #define C_00654C_D1MODE_PRIORITY_MARK_B 0xFFFF8000 | ||
| 192 | #define S_00654C_D1MODE_PRIORITY_B_OFF(x) (((x) & 0x1) << 16) | ||
| 193 | #define G_00654C_D1MODE_PRIORITY_B_OFF(x) (((x) >> 16) & 0x1) | ||
| 194 | #define C_00654C_D1MODE_PRIORITY_B_OFF 0xFFFEFFFF | ||
| 195 | #define S_00654C_D1MODE_PRIORITY_B_ALWAYS_ON(x) (((x) & 0x1) << 20) | ||
| 196 | #define G_00654C_D1MODE_PRIORITY_B_ALWAYS_ON(x) (((x) >> 20) & 0x1) | ||
| 197 | #define C_00654C_D1MODE_PRIORITY_B_ALWAYS_ON 0xFFEFFFFF | ||
| 198 | #define S_00654C_D1MODE_PRIORITY_B_FORCE_MASK(x) (((x) & 0x1) << 24) | ||
| 199 | #define G_00654C_D1MODE_PRIORITY_B_FORCE_MASK(x) (((x) >> 24) & 0x1) | ||
| 200 | #define C_00654C_D1MODE_PRIORITY_B_FORCE_MASK 0xFEFFFFFF | ||
| 201 | #define R_006C9C_DCP_CONTROL 0x006C9C | ||
| 202 | #define R_006D48_D2MODE_PRIORITY_A_CNT 0x006D48 | ||
| 203 | #define S_006D48_D2MODE_PRIORITY_MARK_A(x) (((x) & 0x7FFF) << 0) | ||
| 204 | #define G_006D48_D2MODE_PRIORITY_MARK_A(x) (((x) >> 0) & 0x7FFF) | ||
| 205 | #define C_006D48_D2MODE_PRIORITY_MARK_A 0xFFFF8000 | ||
| 206 | #define S_006D48_D2MODE_PRIORITY_A_OFF(x) (((x) & 0x1) << 16) | ||
| 207 | #define G_006D48_D2MODE_PRIORITY_A_OFF(x) (((x) >> 16) & 0x1) | ||
| 208 | #define C_006D48_D2MODE_PRIORITY_A_OFF 0xFFFEFFFF | ||
| 209 | #define S_006D48_D2MODE_PRIORITY_A_ALWAYS_ON(x) (((x) & 0x1) << 20) | ||
| 210 | #define G_006D48_D2MODE_PRIORITY_A_ALWAYS_ON(x) (((x) >> 20) & 0x1) | ||
| 211 | #define C_006D48_D2MODE_PRIORITY_A_ALWAYS_ON 0xFFEFFFFF | ||
| 212 | #define S_006D48_D2MODE_PRIORITY_A_FORCE_MASK(x) (((x) & 0x1) << 24) | ||
| 213 | #define G_006D48_D2MODE_PRIORITY_A_FORCE_MASK(x) (((x) >> 24) & 0x1) | ||
| 214 | #define C_006D48_D2MODE_PRIORITY_A_FORCE_MASK 0xFEFFFFFF | ||
| 215 | #define R_006D4C_D2MODE_PRIORITY_B_CNT 0x006D4C | ||
| 216 | #define S_006D4C_D2MODE_PRIORITY_MARK_B(x) (((x) & 0x7FFF) << 0) | ||
| 217 | #define G_006D4C_D2MODE_PRIORITY_MARK_B(x) (((x) >> 0) & 0x7FFF) | ||
| 218 | #define C_006D4C_D2MODE_PRIORITY_MARK_B 0xFFFF8000 | ||
| 219 | #define S_006D4C_D2MODE_PRIORITY_B_OFF(x) (((x) & 0x1) << 16) | ||
| 220 | #define G_006D4C_D2MODE_PRIORITY_B_OFF(x) (((x) >> 16) & 0x1) | ||
| 221 | #define C_006D4C_D2MODE_PRIORITY_B_OFF 0xFFFEFFFF | ||
| 222 | #define S_006D4C_D2MODE_PRIORITY_B_ALWAYS_ON(x) (((x) & 0x1) << 20) | ||
| 223 | #define G_006D4C_D2MODE_PRIORITY_B_ALWAYS_ON(x) (((x) >> 20) & 0x1) | ||
| 224 | #define C_006D4C_D2MODE_PRIORITY_B_ALWAYS_ON 0xFFEFFFFF | ||
| 225 | #define S_006D4C_D2MODE_PRIORITY_B_FORCE_MASK(x) (((x) & 0x1) << 24) | ||
| 226 | #define G_006D4C_D2MODE_PRIORITY_B_FORCE_MASK(x) (((x) >> 24) & 0x1) | ||
| 227 | #define C_006D4C_D2MODE_PRIORITY_B_FORCE_MASK 0xFEFFFFFF | ||
| 228 | #define R_006D58_LB_MAX_REQ_OUTSTANDING 0x006D58 | ||
| 229 | #define S_006D58_LB_D1_MAX_REQ_OUTSTANDING(x) (((x) & 0xF) << 0) | ||
| 230 | #define G_006D58_LB_D1_MAX_REQ_OUTSTANDING(x) (((x) >> 0) & 0xF) | ||
| 231 | #define C_006D58_LB_D1_MAX_REQ_OUTSTANDING 0xFFFFFFF0 | ||
| 232 | #define S_006D58_LB_D2_MAX_REQ_OUTSTANDING(x) (((x) & 0xF) << 16) | ||
| 233 | #define G_006D58_LB_D2_MAX_REQ_OUTSTANDING(x) (((x) >> 16) & 0xF) | ||
| 234 | #define C_006D58_LB_D2_MAX_REQ_OUTSTANDING 0xFFF0FFFF | ||
| 235 | |||
| 236 | |||
| 237 | #define R_000090_MC_SYSTEM_STATUS 0x000090 | ||
| 238 | #define S_000090_MC_SYSTEM_IDLE(x) (((x) & 0x1) << 0) | ||
| 239 | #define G_000090_MC_SYSTEM_IDLE(x) (((x) >> 0) & 0x1) | ||
| 240 | #define C_000090_MC_SYSTEM_IDLE 0xFFFFFFFE | ||
| 241 | #define S_000090_MC_SEQUENCER_IDLE(x) (((x) & 0x1) << 1) | ||
| 242 | #define G_000090_MC_SEQUENCER_IDLE(x) (((x) >> 1) & 0x1) | ||
| 243 | #define C_000090_MC_SEQUENCER_IDLE 0xFFFFFFFD | ||
| 244 | #define S_000090_MC_ARBITER_IDLE(x) (((x) & 0x1) << 2) | ||
| 245 | #define G_000090_MC_ARBITER_IDLE(x) (((x) >> 2) & 0x1) | ||
| 246 | #define C_000090_MC_ARBITER_IDLE 0xFFFFFFFB | ||
| 247 | #define S_000090_MC_SELECT_PM(x) (((x) & 0x1) << 3) | ||
| 248 | #define G_000090_MC_SELECT_PM(x) (((x) >> 3) & 0x1) | ||
| 249 | #define C_000090_MC_SELECT_PM 0xFFFFFFF7 | ||
| 250 | #define S_000090_RESERVED4(x) (((x) & 0xF) << 4) | ||
| 251 | #define G_000090_RESERVED4(x) (((x) >> 4) & 0xF) | ||
| 252 | #define C_000090_RESERVED4 0xFFFFFF0F | ||
| 253 | #define S_000090_RESERVED8(x) (((x) & 0xF) << 8) | ||
| 254 | #define G_000090_RESERVED8(x) (((x) >> 8) & 0xF) | ||
| 255 | #define C_000090_RESERVED8 0xFFFFF0FF | ||
| 256 | #define S_000090_RESERVED12(x) (((x) & 0xF) << 12) | ||
| 257 | #define G_000090_RESERVED12(x) (((x) >> 12) & 0xF) | ||
| 258 | #define C_000090_RESERVED12 0xFFFF0FFF | ||
| 259 | #define S_000090_MCA_INIT_EXECUTED(x) (((x) & 0x1) << 16) | ||
| 260 | #define G_000090_MCA_INIT_EXECUTED(x) (((x) >> 16) & 0x1) | ||
| 261 | #define C_000090_MCA_INIT_EXECUTED 0xFFFEFFFF | ||
| 262 | #define S_000090_MCA_IDLE(x) (((x) & 0x1) << 17) | ||
| 263 | #define G_000090_MCA_IDLE(x) (((x) >> 17) & 0x1) | ||
| 264 | #define C_000090_MCA_IDLE 0xFFFDFFFF | ||
| 265 | #define S_000090_MCA_SEQ_IDLE(x) (((x) & 0x1) << 18) | ||
| 266 | #define G_000090_MCA_SEQ_IDLE(x) (((x) >> 18) & 0x1) | ||
| 267 | #define C_000090_MCA_SEQ_IDLE 0xFFFBFFFF | ||
| 268 | #define S_000090_MCA_ARB_IDLE(x) (((x) & 0x1) << 19) | ||
| 269 | #define G_000090_MCA_ARB_IDLE(x) (((x) >> 19) & 0x1) | ||
| 270 | #define C_000090_MCA_ARB_IDLE 0xFFF7FFFF | ||
| 271 | #define S_000090_RESERVED20(x) (((x) & 0xFFF) << 20) | ||
| 272 | #define G_000090_RESERVED20(x) (((x) >> 20) & 0xFFF) | ||
| 273 | #define C_000090_RESERVED20 0x000FFFFF | ||
| 274 | #define R_000100_MCCFG_FB_LOCATION 0x000100 | ||
| 275 | #define S_000100_MC_FB_START(x) (((x) & 0xFFFF) << 0) | ||
| 276 | #define G_000100_MC_FB_START(x) (((x) >> 0) & 0xFFFF) | ||
| 277 | #define C_000100_MC_FB_START 0xFFFF0000 | ||
| 278 | #define S_000100_MC_FB_TOP(x) (((x) & 0xFFFF) << 16) | ||
| 279 | #define G_000100_MC_FB_TOP(x) (((x) >> 16) & 0xFFFF) | ||
| 280 | #define C_000100_MC_FB_TOP 0x0000FFFF | ||
| 281 | #define R_000104_MC_INIT_MISC_LAT_TIMER 0x000104 | ||
| 282 | #define S_000104_MC_CPR_INIT_LAT(x) (((x) & 0xF) << 0) | ||
| 283 | #define G_000104_MC_CPR_INIT_LAT(x) (((x) >> 0) & 0xF) | ||
| 284 | #define C_000104_MC_CPR_INIT_LAT 0xFFFFFFF0 | ||
| 285 | #define S_000104_MC_VF_INIT_LAT(x) (((x) & 0xF) << 4) | ||
| 286 | #define G_000104_MC_VF_INIT_LAT(x) (((x) >> 4) & 0xF) | ||
| 287 | #define C_000104_MC_VF_INIT_LAT 0xFFFFFF0F | ||
| 288 | #define S_000104_MC_DISP0R_INIT_LAT(x) (((x) & 0xF) << 8) | ||
| 289 | #define G_000104_MC_DISP0R_INIT_LAT(x) (((x) >> 8) & 0xF) | ||
| 290 | #define C_000104_MC_DISP0R_INIT_LAT 0xFFFFF0FF | ||
| 291 | #define S_000104_MC_DISP1R_INIT_LAT(x) (((x) & 0xF) << 12) | ||
| 292 | #define G_000104_MC_DISP1R_INIT_LAT(x) (((x) >> 12) & 0xF) | ||
| 293 | #define C_000104_MC_DISP1R_INIT_LAT 0xFFFF0FFF | ||
| 294 | #define S_000104_MC_FIXED_INIT_LAT(x) (((x) & 0xF) << 16) | ||
| 295 | #define G_000104_MC_FIXED_INIT_LAT(x) (((x) >> 16) & 0xF) | ||
| 296 | #define C_000104_MC_FIXED_INIT_LAT 0xFFF0FFFF | ||
| 297 | #define S_000104_MC_E2R_INIT_LAT(x) (((x) & 0xF) << 20) | ||
| 298 | #define G_000104_MC_E2R_INIT_LAT(x) (((x) >> 20) & 0xF) | ||
| 299 | #define C_000104_MC_E2R_INIT_LAT 0xFF0FFFFF | ||
| 300 | #define S_000104_SAME_PAGE_PRIO(x) (((x) & 0xF) << 24) | ||
| 301 | #define G_000104_SAME_PAGE_PRIO(x) (((x) >> 24) & 0xF) | ||
| 302 | #define C_000104_SAME_PAGE_PRIO 0xF0FFFFFF | ||
| 303 | #define S_000104_MC_GLOBW_INIT_LAT(x) (((x) & 0xF) << 28) | ||
| 304 | #define G_000104_MC_GLOBW_INIT_LAT(x) (((x) >> 28) & 0xF) | ||
| 305 | #define C_000104_MC_GLOBW_INIT_LAT 0x0FFFFFFF | ||
| 306 | |||
| 307 | #endif | ||
diff --git a/drivers/gpu/drm/radeon/rs690r.h b/drivers/gpu/drm/radeon/rs690r.h deleted file mode 100644 index c0d9faa2175b..000000000000 --- a/drivers/gpu/drm/radeon/rs690r.h +++ /dev/null | |||
| @@ -1,99 +0,0 @@ | |||
| 1 | /* | ||
| 2 | * Copyright 2008 Advanced Micro Devices, Inc. | ||
| 3 | * Copyright 2008 Red Hat Inc. | ||
| 4 | * Copyright 2009 Jerome Glisse. | ||
| 5 | * | ||
| 6 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
| 7 | * copy of this software and associated documentation files (the "Software"), | ||
| 8 | * to deal in the Software without restriction, including without limitation | ||
| 9 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
| 10 | * and/or sell copies of the Software, and to permit persons to whom the | ||
| 11 | * Software is furnished to do so, subject to the following conditions: | ||
| 12 | * | ||
| 13 | * The above copyright notice and this permission notice shall be included in | ||
| 14 | * all copies or substantial portions of the Software. | ||
| 15 | * | ||
| 16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
| 17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
| 18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
| 19 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
| 20 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
| 21 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
| 22 | * OTHER DEALINGS IN THE SOFTWARE. | ||
| 23 | * | ||
| 24 | * Authors: Dave Airlie | ||
| 25 | * Alex Deucher | ||
| 26 | * Jerome Glisse | ||
| 27 | */ | ||
| 28 | #ifndef RS690R_H | ||
| 29 | #define RS690R_H | ||
| 30 | |||
| 31 | /* RS690/RS740 registers */ | ||
| 32 | #define MC_INDEX 0x0078 | ||
| 33 | # define MC_INDEX_MASK 0x1FF | ||
| 34 | # define MC_INDEX_WR_EN (1 << 9) | ||
| 35 | # define MC_INDEX_WR_ACK 0x7F | ||
| 36 | #define MC_DATA 0x007C | ||
| 37 | #define HDP_FB_LOCATION 0x0134 | ||
| 38 | #define DC_LB_MEMORY_SPLIT 0x6520 | ||
| 39 | #define DC_LB_MEMORY_SPLIT_MASK 0x00000003 | ||
| 40 | #define DC_LB_MEMORY_SPLIT_SHIFT 0 | ||
| 41 | #define DC_LB_MEMORY_SPLIT_D1HALF_D2HALF 0 | ||
| 42 | #define DC_LB_MEMORY_SPLIT_D1_3Q_D2_1Q 1 | ||
| 43 | #define DC_LB_MEMORY_SPLIT_D1_ONLY 2 | ||
| 44 | #define DC_LB_MEMORY_SPLIT_D1_1Q_D2_3Q 3 | ||
| 45 | #define DC_LB_MEMORY_SPLIT_SHIFT_MODE (1 << 2) | ||
| 46 | #define DC_LB_DISP1_END_ADR_SHIFT 4 | ||
| 47 | #define DC_LB_DISP1_END_ADR_MASK 0x00007FF0 | ||
| 48 | #define D1MODE_PRIORITY_A_CNT 0x6548 | ||
| 49 | #define MODE_PRIORITY_MARK_MASK 0x00007FFF | ||
| 50 | #define MODE_PRIORITY_OFF (1 << 16) | ||
| 51 | #define MODE_PRIORITY_ALWAYS_ON (1 << 20) | ||
| 52 | #define MODE_PRIORITY_FORCE_MASK (1 << 24) | ||
| 53 | #define D1MODE_PRIORITY_B_CNT 0x654C | ||
| 54 | #define LB_MAX_REQ_OUTSTANDING 0x6D58 | ||
| 55 | #define LB_D1_MAX_REQ_OUTSTANDING_MASK 0x0000000F | ||
| 56 | #define LB_D1_MAX_REQ_OUTSTANDING_SHIFT 0 | ||
| 57 | #define LB_D2_MAX_REQ_OUTSTANDING_MASK 0x000F0000 | ||
| 58 | #define LB_D2_MAX_REQ_OUTSTANDING_SHIFT 16 | ||
| 59 | #define DCP_CONTROL 0x6C9C | ||
| 60 | #define D2MODE_PRIORITY_A_CNT 0x6D48 | ||
| 61 | #define D2MODE_PRIORITY_B_CNT 0x6D4C | ||
| 62 | |||
| 63 | /* MC indirect registers */ | ||
| 64 | #define MC_STATUS_IDLE (1 << 0) | ||
| 65 | #define MC_MISC_CNTL 0x18 | ||
| 66 | #define DISABLE_GTW (1 << 1) | ||
| 67 | #define GART_INDEX_REG_EN (1 << 12) | ||
| 68 | #define BLOCK_GFX_D3_EN (1 << 14) | ||
| 69 | #define GART_FEATURE_ID 0x2B | ||
| 70 | #define HANG_EN (1 << 11) | ||
| 71 | #define TLB_ENABLE (1 << 18) | ||
| 72 | #define P2P_ENABLE (1 << 19) | ||
| 73 | #define GTW_LAC_EN (1 << 25) | ||
| 74 | #define LEVEL2_GART (0 << 30) | ||
| 75 | #define LEVEL1_GART (1 << 30) | ||
| 76 | #define PDC_EN (1 << 31) | ||
| 77 | #define GART_BASE 0x2C | ||
| 78 | #define GART_CACHE_CNTRL 0x2E | ||
| 79 | # define GART_CACHE_INVALIDATE (1 << 0) | ||
| 80 | #define MC_STATUS 0x90 | ||
| 81 | #define MCCFG_FB_LOCATION 0x100 | ||
| 82 | #define MC_FB_START_MASK 0x0000FFFF | ||
| 83 | #define MC_FB_START_SHIFT 0 | ||
| 84 | #define MC_FB_TOP_MASK 0xFFFF0000 | ||
| 85 | #define MC_FB_TOP_SHIFT 16 | ||
| 86 | #define MCCFG_AGP_LOCATION 0x101 | ||
| 87 | #define MC_AGP_START_MASK 0x0000FFFF | ||
| 88 | #define MC_AGP_START_SHIFT 0 | ||
| 89 | #define MC_AGP_TOP_MASK 0xFFFF0000 | ||
| 90 | #define MC_AGP_TOP_SHIFT 16 | ||
| 91 | #define MCCFG_AGP_BASE 0x102 | ||
| 92 | #define MCCFG_AGP_BASE_2 0x103 | ||
| 93 | #define MC_INIT_MISC_LAT_TIMER 0x104 | ||
| 94 | #define MC_DISP0R_INIT_LAT_SHIFT 8 | ||
| 95 | #define MC_DISP0R_INIT_LAT_MASK 0x00000F00 | ||
| 96 | #define MC_DISP1R_INIT_LAT_SHIFT 12 | ||
| 97 | #define MC_DISP1R_INIT_LAT_MASK 0x0000F000 | ||
| 98 | |||
| 99 | #endif | ||
diff --git a/drivers/gpu/drm/radeon/rv200d.h b/drivers/gpu/drm/radeon/rv200d.h new file mode 100644 index 000000000000..c5b398330c26 --- /dev/null +++ b/drivers/gpu/drm/radeon/rv200d.h | |||
| @@ -0,0 +1,36 @@ | |||
| 1 | /* | ||
| 2 | * Copyright 2008 Advanced Micro Devices, Inc. | ||
| 3 | * Copyright 2008 Red Hat Inc. | ||
| 4 | * Copyright 2009 Jerome Glisse. | ||
| 5 | * | ||
| 6 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
| 7 | * copy of this software and associated documentation files (the "Software"), | ||
| 8 | * to deal in the Software without restriction, including without limitation | ||
| 9 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
| 10 | * and/or sell copies of the Software, and to permit persons to whom the | ||
| 11 | * Software is furnished to do so, subject to the following conditions: | ||
| 12 | * | ||
| 13 | * The above copyright notice and this permission notice shall be included in | ||
| 14 | * all copies or substantial portions of the Software. | ||
| 15 | * | ||
| 16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
| 17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
| 18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
| 19 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
| 20 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
| 21 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
| 22 | * OTHER DEALINGS IN THE SOFTWARE. | ||
| 23 | * | ||
| 24 | * Authors: Dave Airlie | ||
| 25 | * Alex Deucher | ||
| 26 | * Jerome Glisse | ||
| 27 | */ | ||
| 28 | #ifndef __RV200D_H__ | ||
| 29 | #define __RV200D_H__ | ||
| 30 | |||
| 31 | #define R_00015C_AGP_BASE_2 0x00015C | ||
| 32 | #define S_00015C_AGP_BASE_ADDR_2(x) (((x) & 0xF) << 0) | ||
| 33 | #define G_00015C_AGP_BASE_ADDR_2(x) (((x) >> 0) & 0xF) | ||
| 34 | #define C_00015C_AGP_BASE_ADDR_2 0xFFFFFFF0 | ||
| 35 | |||
| 36 | #endif | ||
diff --git a/drivers/gpu/drm/radeon/rv250d.h b/drivers/gpu/drm/radeon/rv250d.h new file mode 100644 index 000000000000..e5a70b06fe1f --- /dev/null +++ b/drivers/gpu/drm/radeon/rv250d.h | |||
| @@ -0,0 +1,123 @@ | |||
| 1 | /* | ||
| 2 | * Copyright 2008 Advanced Micro Devices, Inc. | ||
| 3 | * Copyright 2008 Red Hat Inc. | ||
| 4 | * Copyright 2009 Jerome Glisse. | ||
| 5 | * | ||
| 6 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
| 7 | * copy of this software and associated documentation files (the "Software"), | ||
| 8 | * to deal in the Software without restriction, including without limitation | ||
| 9 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
| 10 | * and/or sell copies of the Software, and to permit persons to whom the | ||
| 11 | * Software is furnished to do so, subject to the following conditions: | ||
| 12 | * | ||
| 13 | * The above copyright notice and this permission notice shall be included in | ||
| 14 | * all copies or substantial portions of the Software. | ||
| 15 | * | ||
| 16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
| 17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
| 18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
| 19 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
| 20 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
| 21 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
| 22 | * OTHER DEALINGS IN THE SOFTWARE. | ||
| 23 | * | ||
| 24 | * Authors: Dave Airlie | ||
| 25 | * Alex Deucher | ||
| 26 | * Jerome Glisse | ||
| 27 | */ | ||
| 28 | #ifndef __RV250D_H__ | ||
| 29 | #define __RV250D_H__ | ||
| 30 | |||
| 31 | #define R_00000D_SCLK_CNTL_M6 0x00000D | ||
| 32 | #define S_00000D_SCLK_SRC_SEL(x) (((x) & 0x7) << 0) | ||
| 33 | #define G_00000D_SCLK_SRC_SEL(x) (((x) >> 0) & 0x7) | ||
| 34 | #define C_00000D_SCLK_SRC_SEL 0xFFFFFFF8 | ||
| 35 | #define S_00000D_CP_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 3) | ||
| 36 | #define G_00000D_CP_MAX_DYN_STOP_LAT(x) (((x) >> 3) & 0x1) | ||
| 37 | #define C_00000D_CP_MAX_DYN_STOP_LAT 0xFFFFFFF7 | ||
| 38 | #define S_00000D_HDP_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 4) | ||
| 39 | #define G_00000D_HDP_MAX_DYN_STOP_LAT(x) (((x) >> 4) & 0x1) | ||
| 40 | #define C_00000D_HDP_MAX_DYN_STOP_LAT 0xFFFFFFEF | ||
| 41 | #define S_00000D_TV_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 5) | ||
| 42 | #define G_00000D_TV_MAX_DYN_STOP_LAT(x) (((x) >> 5) & 0x1) | ||
| 43 | #define C_00000D_TV_MAX_DYN_STOP_LAT 0xFFFFFFDF | ||
| 44 | #define S_00000D_E2_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 6) | ||
| 45 | #define G_00000D_E2_MAX_DYN_STOP_LAT(x) (((x) >> 6) & 0x1) | ||
| 46 | #define C_00000D_E2_MAX_DYN_STOP_LAT 0xFFFFFFBF | ||
| 47 | #define S_00000D_SE_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 7) | ||
| 48 | #define G_00000D_SE_MAX_DYN_STOP_LAT(x) (((x) >> 7) & 0x1) | ||
| 49 | #define C_00000D_SE_MAX_DYN_STOP_LAT 0xFFFFFF7F | ||
| 50 | #define S_00000D_IDCT_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 8) | ||
| 51 | #define G_00000D_IDCT_MAX_DYN_STOP_LAT(x) (((x) >> 8) & 0x1) | ||
| 52 | #define C_00000D_IDCT_MAX_DYN_STOP_LAT 0xFFFFFEFF | ||
| 53 | #define S_00000D_VIP_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 9) | ||
| 54 | #define G_00000D_VIP_MAX_DYN_STOP_LAT(x) (((x) >> 9) & 0x1) | ||
| 55 | #define C_00000D_VIP_MAX_DYN_STOP_LAT 0xFFFFFDFF | ||
| 56 | #define S_00000D_RE_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 10) | ||
| 57 | #define G_00000D_RE_MAX_DYN_STOP_LAT(x) (((x) >> 10) & 0x1) | ||
| 58 | #define C_00000D_RE_MAX_DYN_STOP_LAT 0xFFFFFBFF | ||
| 59 | #define S_00000D_PB_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 11) | ||
| 60 | #define G_00000D_PB_MAX_DYN_STOP_LAT(x) (((x) >> 11) & 0x1) | ||
| 61 | #define C_00000D_PB_MAX_DYN_STOP_LAT 0xFFFFF7FF | ||
| 62 | #define S_00000D_TAM_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 12) | ||
| 63 | #define G_00000D_TAM_MAX_DYN_STOP_LAT(x) (((x) >> 12) & 0x1) | ||
| 64 | #define C_00000D_TAM_MAX_DYN_STOP_LAT 0xFFFFEFFF | ||
| 65 | #define S_00000D_TDM_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 13) | ||
| 66 | #define G_00000D_TDM_MAX_DYN_STOP_LAT(x) (((x) >> 13) & 0x1) | ||
| 67 | #define C_00000D_TDM_MAX_DYN_STOP_LAT 0xFFFFDFFF | ||
| 68 | #define S_00000D_RB_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 14) | ||
| 69 | #define G_00000D_RB_MAX_DYN_STOP_LAT(x) (((x) >> 14) & 0x1) | ||
| 70 | #define C_00000D_RB_MAX_DYN_STOP_LAT 0xFFFFBFFF | ||
| 71 | #define S_00000D_FORCE_DISP2(x) (((x) & 0x1) << 15) | ||
| 72 | #define G_00000D_FORCE_DISP2(x) (((x) >> 15) & 0x1) | ||
| 73 | #define C_00000D_FORCE_DISP2 0xFFFF7FFF | ||
| 74 | #define S_00000D_FORCE_CP(x) (((x) & 0x1) << 16) | ||
| 75 | #define G_00000D_FORCE_CP(x) (((x) >> 16) & 0x1) | ||
| 76 | #define C_00000D_FORCE_CP 0xFFFEFFFF | ||
| 77 | #define S_00000D_FORCE_HDP(x) (((x) & 0x1) << 17) | ||
| 78 | #define G_00000D_FORCE_HDP(x) (((x) >> 17) & 0x1) | ||
| 79 | #define C_00000D_FORCE_HDP 0xFFFDFFFF | ||
| 80 | #define S_00000D_FORCE_DISP1(x) (((x) & 0x1) << 18) | ||
| 81 | #define G_00000D_FORCE_DISP1(x) (((x) >> 18) & 0x1) | ||
| 82 | #define C_00000D_FORCE_DISP1 0xFFFBFFFF | ||
| 83 | #define S_00000D_FORCE_TOP(x) (((x) & 0x1) << 19) | ||
| 84 | #define G_00000D_FORCE_TOP(x) (((x) >> 19) & 0x1) | ||
| 85 | #define C_00000D_FORCE_TOP 0xFFF7FFFF | ||
| 86 | #define S_00000D_FORCE_E2(x) (((x) & 0x1) << 20) | ||
| 87 | #define G_00000D_FORCE_E2(x) (((x) >> 20) & 0x1) | ||
| 88 | #define C_00000D_FORCE_E2 0xFFEFFFFF | ||
| 89 | #define S_00000D_FORCE_SE(x) (((x) & 0x1) << 21) | ||
| 90 | #define G_00000D_FORCE_SE(x) (((x) >> 21) & 0x1) | ||
| 91 | #define C_00000D_FORCE_SE 0xFFDFFFFF | ||
| 92 | #define S_00000D_FORCE_IDCT(x) (((x) & 0x1) << 22) | ||
| 93 | #define G_00000D_FORCE_IDCT(x) (((x) >> 22) & 0x1) | ||
| 94 | #define C_00000D_FORCE_IDCT 0xFFBFFFFF | ||
| 95 | #define S_00000D_FORCE_VIP(x) (((x) & 0x1) << 23) | ||
| 96 | #define G_00000D_FORCE_VIP(x) (((x) >> 23) & 0x1) | ||
| 97 | #define C_00000D_FORCE_VIP 0xFF7FFFFF | ||
| 98 | #define S_00000D_FORCE_RE(x) (((x) & 0x1) << 24) | ||
| 99 | #define G_00000D_FORCE_RE(x) (((x) >> 24) & 0x1) | ||
| 100 | #define C_00000D_FORCE_RE 0xFEFFFFFF | ||
| 101 | #define S_00000D_FORCE_PB(x) (((x) & 0x1) << 25) | ||
| 102 | #define G_00000D_FORCE_PB(x) (((x) >> 25) & 0x1) | ||
| 103 | #define C_00000D_FORCE_PB 0xFDFFFFFF | ||
| 104 | #define S_00000D_FORCE_TAM(x) (((x) & 0x1) << 26) | ||
| 105 | #define G_00000D_FORCE_TAM(x) (((x) >> 26) & 0x1) | ||
| 106 | #define C_00000D_FORCE_TAM 0xFBFFFFFF | ||
| 107 | #define S_00000D_FORCE_TDM(x) (((x) & 0x1) << 27) | ||
| 108 | #define G_00000D_FORCE_TDM(x) (((x) >> 27) & 0x1) | ||
| 109 | #define C_00000D_FORCE_TDM 0xF7FFFFFF | ||
| 110 | #define S_00000D_FORCE_RB(x) (((x) & 0x1) << 28) | ||
| 111 | #define G_00000D_FORCE_RB(x) (((x) >> 28) & 0x1) | ||
| 112 | #define C_00000D_FORCE_RB 0xEFFFFFFF | ||
| 113 | #define S_00000D_FORCE_TV_SCLK(x) (((x) & 0x1) << 29) | ||
| 114 | #define G_00000D_FORCE_TV_SCLK(x) (((x) >> 29) & 0x1) | ||
| 115 | #define C_00000D_FORCE_TV_SCLK 0xDFFFFFFF | ||
| 116 | #define S_00000D_FORCE_SUBPIC(x) (((x) & 0x1) << 30) | ||
| 117 | #define G_00000D_FORCE_SUBPIC(x) (((x) >> 30) & 0x1) | ||
| 118 | #define C_00000D_FORCE_SUBPIC 0xBFFFFFFF | ||
| 119 | #define S_00000D_FORCE_OV0(x) (((x) & 0x1) << 31) | ||
| 120 | #define G_00000D_FORCE_OV0(x) (((x) >> 31) & 0x1) | ||
| 121 | #define C_00000D_FORCE_OV0 0x7FFFFFFF | ||
| 122 | |||
| 123 | #endif | ||
diff --git a/drivers/gpu/drm/radeon/rv350d.h b/drivers/gpu/drm/radeon/rv350d.h new file mode 100644 index 000000000000..c75c5ed9e654 --- /dev/null +++ b/drivers/gpu/drm/radeon/rv350d.h | |||
| @@ -0,0 +1,52 @@ | |||
| 1 | /* | ||
| 2 | * Copyright 2008 Advanced Micro Devices, Inc. | ||
| 3 | * Copyright 2008 Red Hat Inc. | ||
| 4 | * Copyright 2009 Jerome Glisse. | ||
| 5 | * | ||
| 6 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
| 7 | * copy of this software and associated documentation files (the "Software"), | ||
| 8 | * to deal in the Software without restriction, including without limitation | ||
| 9 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
| 10 | * and/or sell copies of the Software, and to permit persons to whom the | ||
| 11 | * Software is furnished to do so, subject to the following conditions: | ||
| 12 | * | ||
| 13 | * The above copyright notice and this permission notice shall be included in | ||
| 14 | * all copies or substantial portions of the Software. | ||
| 15 | * | ||
| 16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
| 17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
| 18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
| 19 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
| 20 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
| 21 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
| 22 | * OTHER DEALINGS IN THE SOFTWARE. | ||
| 23 | * | ||
| 24 | * Authors: Dave Airlie | ||
| 25 | * Alex Deucher | ||
| 26 | * Jerome Glisse | ||
| 27 | */ | ||
| 28 | #ifndef __RV350D_H__ | ||
| 29 | #define __RV350D_H__ | ||
| 30 | |||
| 31 | /* RV350, RV380 registers */ | ||
| 32 | /* #define R_00000D_SCLK_CNTL 0x00000D */ | ||
| 33 | #define S_00000D_FORCE_VAP(x) (((x) & 0x1) << 21) | ||
| 34 | #define G_00000D_FORCE_VAP(x) (((x) >> 21) & 0x1) | ||
| 35 | #define C_00000D_FORCE_VAP 0xFFDFFFFF | ||
| 36 | #define S_00000D_FORCE_SR(x) (((x) & 0x1) << 25) | ||
| 37 | #define G_00000D_FORCE_SR(x) (((x) >> 25) & 0x1) | ||
| 38 | #define C_00000D_FORCE_SR 0xFDFFFFFF | ||
| 39 | #define S_00000D_FORCE_PX(x) (((x) & 0x1) << 26) | ||
| 40 | #define G_00000D_FORCE_PX(x) (((x) >> 26) & 0x1) | ||
| 41 | #define C_00000D_FORCE_PX 0xFBFFFFFF | ||
| 42 | #define S_00000D_FORCE_TX(x) (((x) & 0x1) << 27) | ||
| 43 | #define G_00000D_FORCE_TX(x) (((x) >> 27) & 0x1) | ||
| 44 | #define C_00000D_FORCE_TX 0xF7FFFFFF | ||
| 45 | #define S_00000D_FORCE_US(x) (((x) & 0x1) << 28) | ||
| 46 | #define G_00000D_FORCE_US(x) (((x) >> 28) & 0x1) | ||
| 47 | #define C_00000D_FORCE_US 0xEFFFFFFF | ||
| 48 | #define S_00000D_FORCE_SU(x) (((x) & 0x1) << 30) | ||
| 49 | #define G_00000D_FORCE_SU(x) (((x) >> 30) & 0x1) | ||
| 50 | #define C_00000D_FORCE_SU 0xBFFFFFFF | ||
| 51 | |||
| 52 | #endif | ||
diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c index e53b5ca7a253..41a34c23e6d8 100644 --- a/drivers/gpu/drm/radeon/rv515.c +++ b/drivers/gpu/drm/radeon/rv515.c | |||
| @@ -478,7 +478,7 @@ static int rv515_startup(struct radeon_device *rdev) | |||
| 478 | } | 478 | } |
| 479 | /* Enable IRQ */ | 479 | /* Enable IRQ */ |
| 480 | rdev->irq.sw_int = true; | 480 | rdev->irq.sw_int = true; |
| 481 | r100_irq_set(rdev); | 481 | rs600_irq_set(rdev); |
| 482 | /* 1M ring buffer */ | 482 | /* 1M ring buffer */ |
| 483 | r = r100_cp_init(rdev, 1024 * 1024); | 483 | r = r100_cp_init(rdev, 1024 * 1024); |
| 484 | if (r) { | 484 | if (r) { |
| @@ -520,7 +520,7 @@ int rv515_suspend(struct radeon_device *rdev) | |||
| 520 | { | 520 | { |
| 521 | r100_cp_disable(rdev); | 521 | r100_cp_disable(rdev); |
| 522 | r100_wb_disable(rdev); | 522 | r100_wb_disable(rdev); |
| 523 | r100_irq_disable(rdev); | 523 | rs600_irq_disable(rdev); |
| 524 | if (rdev->flags & RADEON_IS_PCIE) | 524 | if (rdev->flags & RADEON_IS_PCIE) |
| 525 | rv370_pcie_gart_disable(rdev); | 525 | rv370_pcie_gart_disable(rdev); |
| 526 | return 0; | 526 | return 0; |
| @@ -553,7 +553,6 @@ int rv515_init(struct radeon_device *rdev) | |||
| 553 | { | 553 | { |
| 554 | int r; | 554 | int r; |
| 555 | 555 | ||
| 556 | rdev->new_init_path = true; | ||
| 557 | /* Initialize scratch registers */ | 556 | /* Initialize scratch registers */ |
| 558 | radeon_scratch_init(rdev); | 557 | radeon_scratch_init(rdev); |
| 559 | /* Initialize surface registers */ | 558 | /* Initialize surface registers */ |
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c index e0b97d161397..595ac638039d 100644 --- a/drivers/gpu/drm/radeon/rv770.c +++ b/drivers/gpu/drm/radeon/rv770.c | |||
| @@ -75,7 +75,7 @@ int rv770_pcie_gart_enable(struct radeon_device *rdev) | |||
| 75 | WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp); | 75 | WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp); |
| 76 | WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp); | 76 | WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp); |
| 77 | WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12); | 77 | WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12); |
| 78 | WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, (rdev->mc.gtt_end - 1) >> 12); | 78 | WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12); |
| 79 | WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12); | 79 | WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12); |
| 80 | WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) | | 80 | WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) | |
| 81 | RANGE_PROTECTION_FAULT_ENABLE_DEFAULT); | 81 | RANGE_PROTECTION_FAULT_ENABLE_DEFAULT); |
| @@ -126,17 +126,36 @@ void rv770_pcie_gart_fini(struct radeon_device *rdev) | |||
| 126 | } | 126 | } |
| 127 | 127 | ||
| 128 | 128 | ||
| 129 | /* | 129 | void rv770_agp_enable(struct radeon_device *rdev) |
| 130 | * MC | ||
| 131 | */ | ||
| 132 | static void rv770_mc_resume(struct radeon_device *rdev) | ||
| 133 | { | 130 | { |
| 134 | u32 d1vga_control, d2vga_control; | 131 | u32 tmp; |
| 135 | u32 vga_render_control, vga_hdp_control; | 132 | int i; |
| 136 | u32 d1crtc_control, d2crtc_control; | 133 | |
| 137 | u32 new_d1grph_primary, new_d1grph_secondary; | 134 | /* Setup L2 cache */ |
| 138 | u32 new_d2grph_primary, new_d2grph_secondary; | 135 | WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING | |
| 139 | u64 old_vram_start; | 136 | ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE | |
| 137 | EFFECTIVE_L2_QUEUE_SIZE(7)); | ||
| 138 | WREG32(VM_L2_CNTL2, 0); | ||
| 139 | WREG32(VM_L2_CNTL3, BANK_SELECT(0) | CACHE_UPDATE_MODE(2)); | ||
| 140 | /* Setup TLB control */ | ||
| 141 | tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING | | ||
| 142 | SYSTEM_ACCESS_MODE_NOT_IN_SYS | | ||
| 143 | SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU | | ||
| 144 | EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5); | ||
| 145 | WREG32(MC_VM_MD_L1_TLB0_CNTL, tmp); | ||
| 146 | WREG32(MC_VM_MD_L1_TLB1_CNTL, tmp); | ||
| 147 | WREG32(MC_VM_MD_L1_TLB2_CNTL, tmp); | ||
| 148 | WREG32(MC_VM_MB_L1_TLB0_CNTL, tmp); | ||
| 149 | WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp); | ||
| 150 | WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp); | ||
| 151 | WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp); | ||
| 152 | for (i = 0; i < 7; i++) | ||
| 153 | WREG32(VM_CONTEXT0_CNTL + (i * 4), 0); | ||
| 154 | } | ||
| 155 | |||
| 156 | static void rv770_mc_program(struct radeon_device *rdev) | ||
| 157 | { | ||
| 158 | struct rv515_mc_save save; | ||
| 140 | u32 tmp; | 159 | u32 tmp; |
| 141 | int i, j; | 160 | int i, j; |
| 142 | 161 | ||
| @@ -150,53 +169,42 @@ static void rv770_mc_resume(struct radeon_device *rdev) | |||
| 150 | } | 169 | } |
| 151 | WREG32(HDP_REG_COHERENCY_FLUSH_CNTL, 0); | 170 | WREG32(HDP_REG_COHERENCY_FLUSH_CNTL, 0); |
| 152 | 171 | ||
| 153 | d1vga_control = RREG32(D1VGA_CONTROL); | 172 | rv515_mc_stop(rdev, &save); |
| 154 | d2vga_control = RREG32(D2VGA_CONTROL); | ||
| 155 | vga_render_control = RREG32(VGA_RENDER_CONTROL); | ||
| 156 | vga_hdp_control = RREG32(VGA_HDP_CONTROL); | ||
| 157 | d1crtc_control = RREG32(D1CRTC_CONTROL); | ||
| 158 | d2crtc_control = RREG32(D2CRTC_CONTROL); | ||
| 159 | old_vram_start = (u64)(RREG32(MC_VM_FB_LOCATION) & 0xFFFF) << 24; | ||
| 160 | new_d1grph_primary = RREG32(D1GRPH_PRIMARY_SURFACE_ADDRESS); | ||
| 161 | new_d1grph_secondary = RREG32(D1GRPH_SECONDARY_SURFACE_ADDRESS); | ||
| 162 | new_d1grph_primary += rdev->mc.vram_start - old_vram_start; | ||
| 163 | new_d1grph_secondary += rdev->mc.vram_start - old_vram_start; | ||
| 164 | new_d2grph_primary = RREG32(D2GRPH_PRIMARY_SURFACE_ADDRESS); | ||
| 165 | new_d2grph_secondary = RREG32(D2GRPH_SECONDARY_SURFACE_ADDRESS); | ||
| 166 | new_d2grph_primary += rdev->mc.vram_start - old_vram_start; | ||
| 167 | new_d2grph_secondary += rdev->mc.vram_start - old_vram_start; | ||
| 168 | |||
| 169 | /* Stop all video */ | ||
| 170 | WREG32(D1VGA_CONTROL, 0); | ||
| 171 | WREG32(D2VGA_CONTROL, 0); | ||
| 172 | WREG32(VGA_RENDER_CONTROL, 0); | ||
| 173 | WREG32(D1CRTC_UPDATE_LOCK, 1); | ||
| 174 | WREG32(D2CRTC_UPDATE_LOCK, 1); | ||
| 175 | WREG32(D1CRTC_CONTROL, 0); | ||
| 176 | WREG32(D2CRTC_CONTROL, 0); | ||
| 177 | WREG32(D1CRTC_UPDATE_LOCK, 0); | ||
| 178 | WREG32(D2CRTC_UPDATE_LOCK, 0); | ||
| 179 | |||
| 180 | mdelay(1); | ||
| 181 | if (r600_mc_wait_for_idle(rdev)) { | 173 | if (r600_mc_wait_for_idle(rdev)) { |
| 182 | printk(KERN_WARNING "[drm] MC not idle !\n"); | 174 | dev_warn(rdev->dev, "Wait for MC idle timedout !\n"); |
| 183 | } | 175 | } |
| 184 | |||
| 185 | /* Lockout access through VGA aperture*/ | 176 | /* Lockout access through VGA aperture*/ |
| 186 | WREG32(VGA_HDP_CONTROL, VGA_MEMORY_DISABLE); | 177 | WREG32(VGA_HDP_CONTROL, VGA_MEMORY_DISABLE); |
| 187 | |||
| 188 | /* Update configuration */ | 178 | /* Update configuration */ |
| 189 | WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR, rdev->mc.vram_start >> 12); | 179 | if (rdev->flags & RADEON_IS_AGP) { |
| 190 | WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR, (rdev->mc.vram_end - 1) >> 12); | 180 | if (rdev->mc.vram_start < rdev->mc.gtt_start) { |
| 181 | /* VRAM before AGP */ | ||
| 182 | WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR, | ||
| 183 | rdev->mc.vram_start >> 12); | ||
| 184 | WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR, | ||
| 185 | rdev->mc.gtt_end >> 12); | ||
| 186 | } else { | ||
| 187 | /* VRAM after AGP */ | ||
| 188 | WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR, | ||
| 189 | rdev->mc.gtt_start >> 12); | ||
| 190 | WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR, | ||
| 191 | rdev->mc.vram_end >> 12); | ||
| 192 | } | ||
| 193 | } else { | ||
| 194 | WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR, | ||
| 195 | rdev->mc.vram_start >> 12); | ||
| 196 | WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR, | ||
| 197 | rdev->mc.vram_end >> 12); | ||
| 198 | } | ||
| 191 | WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, 0); | 199 | WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, 0); |
| 192 | tmp = (((rdev->mc.vram_end - 1) >> 24) & 0xFFFF) << 16; | 200 | tmp = ((rdev->mc.vram_end >> 24) & 0xFFFF) << 16; |
| 193 | tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF); | 201 | tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF); |
| 194 | WREG32(MC_VM_FB_LOCATION, tmp); | 202 | WREG32(MC_VM_FB_LOCATION, tmp); |
| 195 | WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8)); | 203 | WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8)); |
| 196 | WREG32(HDP_NONSURFACE_INFO, (2 << 7)); | 204 | WREG32(HDP_NONSURFACE_INFO, (2 << 7)); |
| 197 | WREG32(HDP_NONSURFACE_SIZE, (rdev->mc.mc_vram_size - 1) | 0x3FF); | 205 | WREG32(HDP_NONSURFACE_SIZE, (rdev->mc.mc_vram_size - 1) | 0x3FF); |
| 198 | if (rdev->flags & RADEON_IS_AGP) { | 206 | if (rdev->flags & RADEON_IS_AGP) { |
| 199 | WREG32(MC_VM_AGP_TOP, (rdev->mc.gtt_end - 1) >> 16); | 207 | WREG32(MC_VM_AGP_TOP, rdev->mc.gtt_end >> 16); |
| 200 | WREG32(MC_VM_AGP_BOT, rdev->mc.gtt_start >> 16); | 208 | WREG32(MC_VM_AGP_BOT, rdev->mc.gtt_start >> 16); |
| 201 | WREG32(MC_VM_AGP_BASE, rdev->mc.agp_base >> 22); | 209 | WREG32(MC_VM_AGP_BASE, rdev->mc.agp_base >> 22); |
| 202 | } else { | 210 | } else { |
| @@ -204,31 +212,10 @@ static void rv770_mc_resume(struct radeon_device *rdev) | |||
| 204 | WREG32(MC_VM_AGP_TOP, 0x0FFFFFFF); | 212 | WREG32(MC_VM_AGP_TOP, 0x0FFFFFFF); |
| 205 | WREG32(MC_VM_AGP_BOT, 0x0FFFFFFF); | 213 | WREG32(MC_VM_AGP_BOT, 0x0FFFFFFF); |
| 206 | } | 214 | } |
| 207 | WREG32(D1GRPH_PRIMARY_SURFACE_ADDRESS, new_d1grph_primary); | ||
| 208 | WREG32(D1GRPH_SECONDARY_SURFACE_ADDRESS, new_d1grph_secondary); | ||
| 209 | WREG32(D2GRPH_PRIMARY_SURFACE_ADDRESS, new_d2grph_primary); | ||
| 210 | WREG32(D2GRPH_SECONDARY_SURFACE_ADDRESS, new_d2grph_secondary); | ||
| 211 | WREG32(VGA_MEMORY_BASE_ADDRESS, rdev->mc.vram_start); | ||
| 212 | |||
| 213 | /* Unlock host access */ | ||
| 214 | WREG32(VGA_HDP_CONTROL, vga_hdp_control); | ||
| 215 | |||
| 216 | mdelay(1); | ||
| 217 | if (r600_mc_wait_for_idle(rdev)) { | 215 | if (r600_mc_wait_for_idle(rdev)) { |
| 218 | printk(KERN_WARNING "[drm] MC not idle !\n"); | 216 | dev_warn(rdev->dev, "Wait for MC idle timedout !\n"); |
| 219 | } | 217 | } |
| 220 | 218 | rv515_mc_resume(rdev, &save); | |
| 221 | /* Restore video state */ | ||
| 222 | WREG32(D1CRTC_UPDATE_LOCK, 1); | ||
| 223 | WREG32(D2CRTC_UPDATE_LOCK, 1); | ||
| 224 | WREG32(D1CRTC_CONTROL, d1crtc_control); | ||
| 225 | WREG32(D2CRTC_CONTROL, d2crtc_control); | ||
| 226 | WREG32(D1CRTC_UPDATE_LOCK, 0); | ||
| 227 | WREG32(D2CRTC_UPDATE_LOCK, 0); | ||
| 228 | WREG32(D1VGA_CONTROL, d1vga_control); | ||
| 229 | WREG32(D2VGA_CONTROL, d2vga_control); | ||
| 230 | WREG32(VGA_RENDER_CONTROL, vga_render_control); | ||
| 231 | |||
| 232 | /* we need to own VRAM, so turn off the VGA renderer here | 219 | /* we need to own VRAM, so turn off the VGA renderer here |
| 233 | * to stop it overwriting our objects */ | 220 | * to stop it overwriting our objects */ |
| 234 | rv515_vga_render_disable(rdev); | 221 | rv515_vga_render_disable(rdev); |
| @@ -840,9 +827,9 @@ int rv770_mc_init(struct radeon_device *rdev) | |||
| 840 | rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024; | 827 | rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024; |
| 841 | } | 828 | } |
| 842 | rdev->mc.vram_start = rdev->mc.vram_location; | 829 | rdev->mc.vram_start = rdev->mc.vram_location; |
| 843 | rdev->mc.vram_end = rdev->mc.vram_location + rdev->mc.mc_vram_size; | 830 | rdev->mc.vram_end = rdev->mc.vram_location + rdev->mc.mc_vram_size - 1; |
| 844 | rdev->mc.gtt_start = rdev->mc.gtt_location; | 831 | rdev->mc.gtt_start = rdev->mc.gtt_location; |
| 845 | rdev->mc.gtt_end = rdev->mc.gtt_location + rdev->mc.gtt_size; | 832 | rdev->mc.gtt_end = rdev->mc.gtt_location + rdev->mc.gtt_size - 1; |
| 846 | /* FIXME: we should enforce default clock in case GPU is not in | 833 | /* FIXME: we should enforce default clock in case GPU is not in |
| 847 | * default setup | 834 | * default setup |
| 848 | */ | 835 | */ |
| @@ -861,11 +848,14 @@ static int rv770_startup(struct radeon_device *rdev) | |||
| 861 | { | 848 | { |
| 862 | int r; | 849 | int r; |
| 863 | 850 | ||
| 864 | radeon_gpu_reset(rdev); | 851 | rv770_mc_program(rdev); |
| 865 | rv770_mc_resume(rdev); | 852 | if (rdev->flags & RADEON_IS_AGP) { |
| 866 | r = rv770_pcie_gart_enable(rdev); | 853 | rv770_agp_enable(rdev); |
| 867 | if (r) | 854 | } else { |
| 868 | return r; | 855 | r = rv770_pcie_gart_enable(rdev); |
| 856 | if (r) | ||
| 857 | return r; | ||
| 858 | } | ||
| 869 | rv770_gpu_init(rdev); | 859 | rv770_gpu_init(rdev); |
| 870 | 860 | ||
| 871 | r = radeon_object_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM, | 861 | r = radeon_object_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM, |
| @@ -884,9 +874,8 @@ static int rv770_startup(struct radeon_device *rdev) | |||
| 884 | r = r600_cp_resume(rdev); | 874 | r = r600_cp_resume(rdev); |
| 885 | if (r) | 875 | if (r) |
| 886 | return r; | 876 | return r; |
| 887 | r = r600_wb_init(rdev); | 877 | /* write back buffer are not vital so don't worry about failure */ |
| 888 | if (r) | 878 | r600_wb_enable(rdev); |
| 889 | return r; | ||
| 890 | return 0; | 879 | return 0; |
| 891 | } | 880 | } |
| 892 | 881 | ||
| @@ -894,15 +883,12 @@ int rv770_resume(struct radeon_device *rdev) | |||
| 894 | { | 883 | { |
| 895 | int r; | 884 | int r; |
| 896 | 885 | ||
| 897 | if (radeon_gpu_reset(rdev)) { | 886 | /* Do not reset GPU before posting, on rv770 hw unlike on r500 hw, |
| 898 | /* FIXME: what do we want to do here ? */ | 887 | * posting will perform necessary task to bring back GPU into good |
| 899 | } | 888 | * shape. |
| 889 | */ | ||
| 900 | /* post card */ | 890 | /* post card */ |
| 901 | if (rdev->is_atom_bios) { | 891 | atom_asic_init(rdev->mode_info.atom_context); |
| 902 | atom_asic_init(rdev->mode_info.atom_context); | ||
| 903 | } else { | ||
| 904 | radeon_combios_asic_init(rdev->ddev); | ||
| 905 | } | ||
| 906 | /* Initialize clocks */ | 892 | /* Initialize clocks */ |
| 907 | r = radeon_clocks_init(rdev); | 893 | r = radeon_clocks_init(rdev); |
| 908 | if (r) { | 894 | if (r) { |
| @@ -915,7 +901,7 @@ int rv770_resume(struct radeon_device *rdev) | |||
| 915 | return r; | 901 | return r; |
| 916 | } | 902 | } |
| 917 | 903 | ||
| 918 | r = radeon_ib_test(rdev); | 904 | r = r600_ib_test(rdev); |
| 919 | if (r) { | 905 | if (r) { |
| 920 | DRM_ERROR("radeon: failled testing IB (%d).\n", r); | 906 | DRM_ERROR("radeon: failled testing IB (%d).\n", r); |
| 921 | return r; | 907 | return r; |
| @@ -929,8 +915,8 @@ int rv770_suspend(struct radeon_device *rdev) | |||
| 929 | /* FIXME: we should wait for ring to be empty */ | 915 | /* FIXME: we should wait for ring to be empty */ |
| 930 | r700_cp_stop(rdev); | 916 | r700_cp_stop(rdev); |
| 931 | rdev->cp.ready = false; | 917 | rdev->cp.ready = false; |
| 918 | r600_wb_disable(rdev); | ||
| 932 | rv770_pcie_gart_disable(rdev); | 919 | rv770_pcie_gart_disable(rdev); |
| 933 | |||
| 934 | /* unpin shaders bo */ | 920 | /* unpin shaders bo */ |
| 935 | radeon_object_unpin(rdev->r600_blit.shader_obj); | 921 | radeon_object_unpin(rdev->r600_blit.shader_obj); |
| 936 | return 0; | 922 | return 0; |
| @@ -946,7 +932,6 @@ int rv770_init(struct radeon_device *rdev) | |||
| 946 | { | 932 | { |
| 947 | int r; | 933 | int r; |
| 948 | 934 | ||
| 949 | rdev->new_init_path = true; | ||
| 950 | r = radeon_dummy_page_init(rdev); | 935 | r = radeon_dummy_page_init(rdev); |
| 951 | if (r) | 936 | if (r) |
| 952 | return r; | 937 | return r; |
| @@ -960,8 +945,10 @@ int rv770_init(struct radeon_device *rdev) | |||
| 960 | return -EINVAL; | 945 | return -EINVAL; |
| 961 | } | 946 | } |
| 962 | /* Must be an ATOMBIOS */ | 947 | /* Must be an ATOMBIOS */ |
| 963 | if (!rdev->is_atom_bios) | 948 | if (!rdev->is_atom_bios) { |
| 949 | dev_err(rdev->dev, "Expecting atombios for R600 GPU\n"); | ||
| 964 | return -EINVAL; | 950 | return -EINVAL; |
| 951 | } | ||
| 965 | r = radeon_atombios_init(rdev); | 952 | r = radeon_atombios_init(rdev); |
| 966 | if (r) | 953 | if (r) |
| 967 | return r; | 954 | return r; |
| @@ -983,15 +970,8 @@ int rv770_init(struct radeon_device *rdev) | |||
| 983 | if (r) | 970 | if (r) |
| 984 | return r; | 971 | return r; |
| 985 | r = rv770_mc_init(rdev); | 972 | r = rv770_mc_init(rdev); |
| 986 | if (r) { | 973 | if (r) |
| 987 | if (rdev->flags & RADEON_IS_AGP) { | ||
| 988 | /* Retry with disabling AGP */ | ||
| 989 | rv770_fini(rdev); | ||
| 990 | rdev->flags &= ~RADEON_IS_AGP; | ||
| 991 | return rv770_init(rdev); | ||
| 992 | } | ||
| 993 | return r; | 974 | return r; |
| 994 | } | ||
| 995 | /* Memory manager */ | 975 | /* Memory manager */ |
| 996 | r = radeon_object_init(rdev); | 976 | r = radeon_object_init(rdev); |
| 997 | if (r) | 977 | if (r) |
| @@ -1020,12 +1000,10 @@ int rv770_init(struct radeon_device *rdev) | |||
| 1020 | 1000 | ||
| 1021 | r = rv770_startup(rdev); | 1001 | r = rv770_startup(rdev); |
| 1022 | if (r) { | 1002 | if (r) { |
| 1023 | if (rdev->flags & RADEON_IS_AGP) { | 1003 | rv770_suspend(rdev); |
| 1024 | /* Retry with disabling AGP */ | 1004 | r600_wb_fini(rdev); |
| 1025 | rv770_fini(rdev); | 1005 | radeon_ring_fini(rdev); |
| 1026 | rdev->flags &= ~RADEON_IS_AGP; | 1006 | rv770_pcie_gart_fini(rdev); |
| 1027 | return rv770_init(rdev); | ||
| 1028 | } | ||
| 1029 | rdev->accel_working = false; | 1007 | rdev->accel_working = false; |
| 1030 | } | 1008 | } |
| 1031 | if (rdev->accel_working) { | 1009 | if (rdev->accel_working) { |
| @@ -1034,7 +1012,7 @@ int rv770_init(struct radeon_device *rdev) | |||
| 1034 | DRM_ERROR("radeon: failled initializing IB pool (%d).\n", r); | 1012 | DRM_ERROR("radeon: failled initializing IB pool (%d).\n", r); |
| 1035 | rdev->accel_working = false; | 1013 | rdev->accel_working = false; |
| 1036 | } | 1014 | } |
| 1037 | r = radeon_ib_test(rdev); | 1015 | r = r600_ib_test(rdev); |
| 1038 | if (r) { | 1016 | if (r) { |
| 1039 | DRM_ERROR("radeon: failled testing IB (%d).\n", r); | 1017 | DRM_ERROR("radeon: failled testing IB (%d).\n", r); |
| 1040 | rdev->accel_working = false; | 1018 | rdev->accel_working = false; |
| @@ -1049,20 +1027,15 @@ void rv770_fini(struct radeon_device *rdev) | |||
| 1049 | 1027 | ||
| 1050 | r600_blit_fini(rdev); | 1028 | r600_blit_fini(rdev); |
| 1051 | radeon_ring_fini(rdev); | 1029 | radeon_ring_fini(rdev); |
| 1030 | r600_wb_fini(rdev); | ||
| 1052 | rv770_pcie_gart_fini(rdev); | 1031 | rv770_pcie_gart_fini(rdev); |
| 1053 | radeon_gem_fini(rdev); | 1032 | radeon_gem_fini(rdev); |
| 1054 | radeon_fence_driver_fini(rdev); | 1033 | radeon_fence_driver_fini(rdev); |
| 1055 | radeon_clocks_fini(rdev); | 1034 | radeon_clocks_fini(rdev); |
| 1056 | #if __OS_HAS_AGP | ||
| 1057 | if (rdev->flags & RADEON_IS_AGP) | 1035 | if (rdev->flags & RADEON_IS_AGP) |
| 1058 | radeon_agp_fini(rdev); | 1036 | radeon_agp_fini(rdev); |
| 1059 | #endif | ||
| 1060 | radeon_object_fini(rdev); | 1037 | radeon_object_fini(rdev); |
| 1061 | if (rdev->is_atom_bios) { | 1038 | radeon_atombios_fini(rdev); |
| 1062 | radeon_atombios_fini(rdev); | ||
| 1063 | } else { | ||
| 1064 | radeon_combios_fini(rdev); | ||
| 1065 | } | ||
| 1066 | kfree(rdev->bios); | 1039 | kfree(rdev->bios); |
| 1067 | rdev->bios = NULL; | 1040 | rdev->bios = NULL; |
| 1068 | radeon_dummy_page_fini(rdev); | 1041 | radeon_dummy_page_fini(rdev); |
diff --git a/drivers/gpu/drm/ttm/ttm_global.c b/drivers/gpu/drm/ttm/ttm_global.c index 541744d00d3e..b17007178a36 100644 --- a/drivers/gpu/drm/ttm/ttm_global.c +++ b/drivers/gpu/drm/ttm/ttm_global.c | |||
| @@ -82,8 +82,8 @@ int ttm_global_item_ref(struct ttm_global_reference *ref) | |||
| 82 | if (unlikely(ret != 0)) | 82 | if (unlikely(ret != 0)) |
| 83 | goto out_err; | 83 | goto out_err; |
| 84 | 84 | ||
| 85 | ++item->refcount; | ||
| 86 | } | 85 | } |
| 86 | ++item->refcount; | ||
| 87 | ref->object = item->object; | 87 | ref->object = item->object; |
| 88 | object = item->object; | 88 | object = item->object; |
| 89 | mutex_unlock(&item->mutex); | 89 | mutex_unlock(&item->mutex); |
diff --git a/drivers/hid/hidraw.c b/drivers/hid/hidraw.c index 0c6639ea03dd..ba05275e5104 100644 --- a/drivers/hid/hidraw.c +++ b/drivers/hid/hidraw.c | |||
| @@ -30,6 +30,7 @@ | |||
| 30 | #include <linux/major.h> | 30 | #include <linux/major.h> |
| 31 | #include <linux/hid.h> | 31 | #include <linux/hid.h> |
| 32 | #include <linux/mutex.h> | 32 | #include <linux/mutex.h> |
| 33 | #include <linux/sched.h> | ||
| 33 | #include <linux/smp_lock.h> | 34 | #include <linux/smp_lock.h> |
| 34 | 35 | ||
| 35 | #include <linux/hidraw.h> | 36 | #include <linux/hidraw.h> |
diff --git a/drivers/hwmon/lis3lv02d_spi.c b/drivers/hwmon/lis3lv02d_spi.c index ecd739534f6a..82b16808a274 100644 --- a/drivers/hwmon/lis3lv02d_spi.c +++ b/drivers/hwmon/lis3lv02d_spi.c | |||
| @@ -83,7 +83,8 @@ static int __devexit lis302dl_spi_remove(struct spi_device *spi) | |||
| 83 | struct lis3lv02d *lis3 = spi_get_drvdata(spi); | 83 | struct lis3lv02d *lis3 = spi_get_drvdata(spi); |
| 84 | lis3lv02d_joystick_disable(); | 84 | lis3lv02d_joystick_disable(); |
| 85 | lis3lv02d_poweroff(lis3); | 85 | lis3lv02d_poweroff(lis3); |
| 86 | return 0; | 86 | |
| 87 | return lis3lv02d_remove_fs(&lis3_dev); | ||
| 87 | } | 88 | } |
| 88 | 89 | ||
| 89 | #ifdef CONFIG_PM | 90 | #ifdef CONFIG_PM |
diff --git a/drivers/hwmon/ltc4215.c b/drivers/hwmon/ltc4215.c index 6c9a04136e0a..00d975eb5b83 100644 --- a/drivers/hwmon/ltc4215.c +++ b/drivers/hwmon/ltc4215.c | |||
| @@ -20,11 +20,6 @@ | |||
| 20 | #include <linux/hwmon.h> | 20 | #include <linux/hwmon.h> |
| 21 | #include <linux/hwmon-sysfs.h> | 21 | #include <linux/hwmon-sysfs.h> |
| 22 | 22 | ||
| 23 | static const unsigned short normal_i2c[] = { I2C_CLIENT_END }; | ||
| 24 | |||
| 25 | /* Insmod parameters */ | ||
| 26 | I2C_CLIENT_INSMOD_1(ltc4215); | ||
| 27 | |||
| 28 | /* Here are names of the chip's registers (a.k.a. commands) */ | 23 | /* Here are names of the chip's registers (a.k.a. commands) */ |
| 29 | enum ltc4215_cmd { | 24 | enum ltc4215_cmd { |
| 30 | LTC4215_CONTROL = 0x00, /* rw */ | 25 | LTC4215_CONTROL = 0x00, /* rw */ |
| @@ -246,9 +241,13 @@ static const struct attribute_group ltc4215_group = { | |||
| 246 | static int ltc4215_probe(struct i2c_client *client, | 241 | static int ltc4215_probe(struct i2c_client *client, |
| 247 | const struct i2c_device_id *id) | 242 | const struct i2c_device_id *id) |
| 248 | { | 243 | { |
| 244 | struct i2c_adapter *adapter = client->adapter; | ||
| 249 | struct ltc4215_data *data; | 245 | struct ltc4215_data *data; |
| 250 | int ret; | 246 | int ret; |
| 251 | 247 | ||
| 248 | if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) | ||
| 249 | return -ENODEV; | ||
| 250 | |||
| 252 | data = kzalloc(sizeof(*data), GFP_KERNEL); | 251 | data = kzalloc(sizeof(*data), GFP_KERNEL); |
| 253 | if (!data) { | 252 | if (!data) { |
| 254 | ret = -ENOMEM; | 253 | ret = -ENOMEM; |
| @@ -294,56 +293,20 @@ static int ltc4215_remove(struct i2c_client *client) | |||
| 294 | return 0; | 293 | return 0; |
| 295 | } | 294 | } |
| 296 | 295 | ||
| 297 | static int ltc4215_detect(struct i2c_client *client, | ||
| 298 | int kind, | ||
| 299 | struct i2c_board_info *info) | ||
| 300 | { | ||
| 301 | struct i2c_adapter *adapter = client->adapter; | ||
| 302 | |||
| 303 | if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) | ||
| 304 | return -ENODEV; | ||
| 305 | |||
| 306 | if (kind < 0) { /* probed detection - check the chip type */ | ||
| 307 | s32 v; /* 8 bits from the chip, or -ERRNO */ | ||
| 308 | |||
| 309 | /* | ||
| 310 | * Register 0x01 bit b7 is reserved, expect 0 | ||
| 311 | * Register 0x03 bit b6 and b7 are reserved, expect 0 | ||
| 312 | */ | ||
| 313 | v = i2c_smbus_read_byte_data(client, LTC4215_ALERT); | ||
| 314 | if (v < 0 || (v & (1 << 7)) != 0) | ||
| 315 | return -ENODEV; | ||
| 316 | |||
| 317 | v = i2c_smbus_read_byte_data(client, LTC4215_FAULT); | ||
| 318 | if (v < 0 || (v & ((1 << 6) | (1 << 7))) != 0) | ||
| 319 | return -ENODEV; | ||
| 320 | } | ||
| 321 | |||
| 322 | strlcpy(info->type, "ltc4215", I2C_NAME_SIZE); | ||
| 323 | dev_info(&adapter->dev, "ltc4215 %s at address 0x%02x\n", | ||
| 324 | kind < 0 ? "probed" : "forced", | ||
| 325 | client->addr); | ||
| 326 | |||
| 327 | return 0; | ||
| 328 | } | ||
| 329 | |||
| 330 | static const struct i2c_device_id ltc4215_id[] = { | 296 | static const struct i2c_device_id ltc4215_id[] = { |
| 331 | { "ltc4215", ltc4215 }, | 297 | { "ltc4215", 0 }, |
| 332 | { } | 298 | { } |
| 333 | }; | 299 | }; |
| 334 | MODULE_DEVICE_TABLE(i2c, ltc4215_id); | 300 | MODULE_DEVICE_TABLE(i2c, ltc4215_id); |
| 335 | 301 | ||
| 336 | /* This is the driver that will be inserted */ | 302 | /* This is the driver that will be inserted */ |
| 337 | static struct i2c_driver ltc4215_driver = { | 303 | static struct i2c_driver ltc4215_driver = { |
| 338 | .class = I2C_CLASS_HWMON, | ||
| 339 | .driver = { | 304 | .driver = { |
| 340 | .name = "ltc4215", | 305 | .name = "ltc4215", |
| 341 | }, | 306 | }, |
| 342 | .probe = ltc4215_probe, | 307 | .probe = ltc4215_probe, |
| 343 | .remove = ltc4215_remove, | 308 | .remove = ltc4215_remove, |
| 344 | .id_table = ltc4215_id, | 309 | .id_table = ltc4215_id, |
| 345 | .detect = ltc4215_detect, | ||
| 346 | .address_data = &addr_data, | ||
| 347 | }; | 310 | }; |
| 348 | 311 | ||
| 349 | static int __init ltc4215_init(void) | 312 | static int __init ltc4215_init(void) |
diff --git a/drivers/hwmon/ltc4245.c b/drivers/hwmon/ltc4245.c index e38964333612..65c232a9d0c5 100644 --- a/drivers/hwmon/ltc4245.c +++ b/drivers/hwmon/ltc4245.c | |||
| @@ -22,15 +22,6 @@ | |||
| 22 | #include <linux/hwmon.h> | 22 | #include <linux/hwmon.h> |
| 23 | #include <linux/hwmon-sysfs.h> | 23 | #include <linux/hwmon-sysfs.h> |
| 24 | 24 | ||
| 25 | /* Valid addresses are 0x20 - 0x3f | ||
| 26 | * | ||
| 27 | * For now, we do not probe, since some of these addresses | ||
| 28 | * are known to be unfriendly to probing */ | ||
| 29 | static const unsigned short normal_i2c[] = { I2C_CLIENT_END }; | ||
| 30 | |||
| 31 | /* Insmod parameters */ | ||
| 32 | I2C_CLIENT_INSMOD_1(ltc4245); | ||
| 33 | |||
| 34 | /* Here are names of the chip's registers (a.k.a. commands) */ | 25 | /* Here are names of the chip's registers (a.k.a. commands) */ |
| 35 | enum ltc4245_cmd { | 26 | enum ltc4245_cmd { |
| 36 | LTC4245_STATUS = 0x00, /* readonly */ | 27 | LTC4245_STATUS = 0x00, /* readonly */ |
| @@ -369,9 +360,13 @@ static const struct attribute_group ltc4245_group = { | |||
| 369 | static int ltc4245_probe(struct i2c_client *client, | 360 | static int ltc4245_probe(struct i2c_client *client, |
| 370 | const struct i2c_device_id *id) | 361 | const struct i2c_device_id *id) |
| 371 | { | 362 | { |
| 363 | struct i2c_adapter *adapter = client->adapter; | ||
| 372 | struct ltc4245_data *data; | 364 | struct ltc4245_data *data; |
| 373 | int ret; | 365 | int ret; |
| 374 | 366 | ||
| 367 | if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) | ||
| 368 | return -ENODEV; | ||
| 369 | |||
| 375 | data = kzalloc(sizeof(*data), GFP_KERNEL); | 370 | data = kzalloc(sizeof(*data), GFP_KERNEL); |
| 376 | if (!data) { | 371 | if (!data) { |
| 377 | ret = -ENOMEM; | 372 | ret = -ENOMEM; |
| @@ -418,136 +413,20 @@ static int ltc4245_remove(struct i2c_client *client) | |||
| 418 | return 0; | 413 | return 0; |
| 419 | } | 414 | } |
| 420 | 415 | ||
| 421 | /* Check that some bits in a control register appear at all possible | ||
| 422 | * locations without changing value | ||
| 423 | * | ||
| 424 | * @client: the i2c client to use | ||
| 425 | * @reg: the register to read | ||
| 426 | * @bits: the bits to check (0xff checks all bits, | ||
| 427 | * 0x03 checks only the last two bits) | ||
| 428 | * | ||
| 429 | * return -ERRNO if the register read failed | ||
| 430 | * return -ENODEV if the register value doesn't stay constant at all | ||
| 431 | * possible addresses | ||
| 432 | * | ||
| 433 | * return 0 for success | ||
| 434 | */ | ||
| 435 | static int ltc4245_check_control_reg(struct i2c_client *client, u8 reg, u8 bits) | ||
| 436 | { | ||
| 437 | int i; | ||
| 438 | s32 v, voff1, voff2; | ||
| 439 | |||
| 440 | /* Read register and check for error */ | ||
| 441 | v = i2c_smbus_read_byte_data(client, reg); | ||
| 442 | if (v < 0) | ||
| 443 | return v; | ||
| 444 | |||
| 445 | v &= bits; | ||
| 446 | |||
| 447 | for (i = 0x00; i < 0xff; i += 0x20) { | ||
| 448 | |||
| 449 | voff1 = i2c_smbus_read_byte_data(client, reg + i); | ||
| 450 | if (voff1 < 0) | ||
| 451 | return voff1; | ||
| 452 | |||
| 453 | voff2 = i2c_smbus_read_byte_data(client, reg + i + 0x08); | ||
| 454 | if (voff2 < 0) | ||
| 455 | return voff2; | ||
| 456 | |||
| 457 | voff1 &= bits; | ||
| 458 | voff2 &= bits; | ||
| 459 | |||
| 460 | if (v != voff1 || v != voff2) | ||
| 461 | return -ENODEV; | ||
| 462 | } | ||
| 463 | |||
| 464 | return 0; | ||
| 465 | } | ||
| 466 | |||
| 467 | static int ltc4245_detect(struct i2c_client *client, | ||
| 468 | int kind, | ||
| 469 | struct i2c_board_info *info) | ||
| 470 | { | ||
| 471 | struct i2c_adapter *adapter = client->adapter; | ||
| 472 | |||
| 473 | if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) | ||
| 474 | return -ENODEV; | ||
| 475 | |||
| 476 | if (kind < 0) { /* probed detection - check the chip type */ | ||
| 477 | s32 v; /* 8 bits from the chip, or -ERRNO */ | ||
| 478 | |||
| 479 | /* Chip registers 0x00-0x07 are control registers | ||
| 480 | * Chip registers 0x10-0x1f are data registers | ||
| 481 | * | ||
| 482 | * Address bits b7-b5 are ignored. This makes the chip "repeat" | ||
| 483 | * in steps of 0x20. Any control registers should appear with | ||
| 484 | * the same values across all duplicated addresses. | ||
| 485 | * | ||
| 486 | * Register 0x02 bit b2 is reserved, expect 0 | ||
| 487 | * Register 0x07 bits b7 to b4 are reserved, expect 0 | ||
| 488 | * | ||
| 489 | * Registers 0x01, 0x02 are control registers and should not | ||
| 490 | * change on their own. | ||
| 491 | * | ||
| 492 | * Register 0x06 bits b6 and b7 are control bits, and should | ||
| 493 | * not change on their own. | ||
| 494 | * | ||
| 495 | * Register 0x07 bits b3 to b0 are control bits, and should | ||
| 496 | * not change on their own. | ||
| 497 | */ | ||
| 498 | |||
| 499 | /* read register 0x02 reserved bit, expect 0 */ | ||
| 500 | v = i2c_smbus_read_byte_data(client, LTC4245_CONTROL); | ||
| 501 | if (v < 0 || (v & 0x04) != 0) | ||
| 502 | return -ENODEV; | ||
| 503 | |||
| 504 | /* read register 0x07 reserved bits, expect 0 */ | ||
| 505 | v = i2c_smbus_read_byte_data(client, LTC4245_ADCADR); | ||
| 506 | if (v < 0 || (v & 0xf0) != 0) | ||
| 507 | return -ENODEV; | ||
| 508 | |||
| 509 | /* check that the alert register appears at all locations */ | ||
| 510 | if (ltc4245_check_control_reg(client, LTC4245_ALERT, 0xff)) | ||
| 511 | return -ENODEV; | ||
| 512 | |||
| 513 | /* check that the control register appears at all locations */ | ||
| 514 | if (ltc4245_check_control_reg(client, LTC4245_CONTROL, 0xff)) | ||
| 515 | return -ENODEV; | ||
| 516 | |||
| 517 | /* check that register 0x06 bits b6 and b7 stay constant */ | ||
| 518 | if (ltc4245_check_control_reg(client, LTC4245_GPIO, 0xc0)) | ||
| 519 | return -ENODEV; | ||
| 520 | |||
| 521 | /* check that register 0x07 bits b3-b0 stay constant */ | ||
| 522 | if (ltc4245_check_control_reg(client, LTC4245_ADCADR, 0x0f)) | ||
| 523 | return -ENODEV; | ||
| 524 | } | ||
| 525 | |||
| 526 | strlcpy(info->type, "ltc4245", I2C_NAME_SIZE); | ||
| 527 | dev_info(&adapter->dev, "ltc4245 %s at address 0x%02x\n", | ||
| 528 | kind < 0 ? "probed" : "forced", | ||
| 529 | client->addr); | ||
| 530 | |||
| 531 | return 0; | ||
| 532 | } | ||
| 533 | |||
| 534 | static const struct i2c_device_id ltc4245_id[] = { | 416 | static const struct i2c_device_id ltc4245_id[] = { |
| 535 | { "ltc4245", ltc4245 }, | 417 | { "ltc4245", 0 }, |
| 536 | { } | 418 | { } |
| 537 | }; | 419 | }; |
| 538 | MODULE_DEVICE_TABLE(i2c, ltc4245_id); | 420 | MODULE_DEVICE_TABLE(i2c, ltc4245_id); |
| 539 | 421 | ||
| 540 | /* This is the driver that will be inserted */ | 422 | /* This is the driver that will be inserted */ |
| 541 | static struct i2c_driver ltc4245_driver = { | 423 | static struct i2c_driver ltc4245_driver = { |
| 542 | .class = I2C_CLASS_HWMON, | ||
| 543 | .driver = { | 424 | .driver = { |
| 544 | .name = "ltc4245", | 425 | .name = "ltc4245", |
| 545 | }, | 426 | }, |
| 546 | .probe = ltc4245_probe, | 427 | .probe = ltc4245_probe, |
| 547 | .remove = ltc4245_remove, | 428 | .remove = ltc4245_remove, |
| 548 | .id_table = ltc4245_id, | 429 | .id_table = ltc4245_id, |
| 549 | .detect = ltc4245_detect, | ||
| 550 | .address_data = &addr_data, | ||
| 551 | }; | 430 | }; |
| 552 | 431 | ||
| 553 | static int __init ltc4245_init(void) | 432 | static int __init ltc4245_init(void) |
diff --git a/drivers/i2c/busses/i2c-amd756.c b/drivers/i2c/busses/i2c-amd756.c index f7d6fe9c49ba..8f0b90ef8c76 100644 --- a/drivers/i2c/busses/i2c-amd756.c +++ b/drivers/i2c/busses/i2c-amd756.c | |||
| @@ -364,7 +364,7 @@ static int __devinit amd756_probe(struct pci_dev *pdev, | |||
| 364 | error = acpi_check_region(amd756_ioport, SMB_IOSIZE, | 364 | error = acpi_check_region(amd756_ioport, SMB_IOSIZE, |
| 365 | amd756_driver.name); | 365 | amd756_driver.name); |
| 366 | if (error) | 366 | if (error) |
| 367 | return error; | 367 | return -ENODEV; |
| 368 | 368 | ||
| 369 | if (!request_region(amd756_ioport, SMB_IOSIZE, amd756_driver.name)) { | 369 | if (!request_region(amd756_ioport, SMB_IOSIZE, amd756_driver.name)) { |
| 370 | dev_err(&pdev->dev, "SMB region 0x%x already in use!\n", | 370 | dev_err(&pdev->dev, "SMB region 0x%x already in use!\n", |
diff --git a/drivers/i2c/busses/i2c-amd8111.c b/drivers/i2c/busses/i2c-amd8111.c index a7c59908c457..5b4ad86ca166 100644 --- a/drivers/i2c/busses/i2c-amd8111.c +++ b/drivers/i2c/busses/i2c-amd8111.c | |||
| @@ -376,8 +376,10 @@ static int __devinit amd8111_probe(struct pci_dev *dev, | |||
| 376 | smbus->size = pci_resource_len(dev, 0); | 376 | smbus->size = pci_resource_len(dev, 0); |
| 377 | 377 | ||
| 378 | error = acpi_check_resource_conflict(&dev->resource[0]); | 378 | error = acpi_check_resource_conflict(&dev->resource[0]); |
| 379 | if (error) | 379 | if (error) { |
| 380 | error = -ENODEV; | ||
| 380 | goto out_kfree; | 381 | goto out_kfree; |
| 382 | } | ||
| 381 | 383 | ||
| 382 | if (!request_region(smbus->base, smbus->size, amd8111_driver.name)) { | 384 | if (!request_region(smbus->base, smbus->size, amd8111_driver.name)) { |
| 383 | error = -EBUSY; | 385 | error = -EBUSY; |
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c index 9d2c5adf5d4f..55edcfe5b851 100644 --- a/drivers/i2c/busses/i2c-i801.c +++ b/drivers/i2c/busses/i2c-i801.c | |||
| @@ -732,8 +732,10 @@ static int __devinit i801_probe(struct pci_dev *dev, const struct pci_device_id | |||
| 732 | } | 732 | } |
| 733 | 733 | ||
| 734 | err = acpi_check_resource_conflict(&dev->resource[SMBBAR]); | 734 | err = acpi_check_resource_conflict(&dev->resource[SMBBAR]); |
| 735 | if (err) | 735 | if (err) { |
| 736 | err = -ENODEV; | ||
| 736 | goto exit; | 737 | goto exit; |
| 738 | } | ||
| 737 | 739 | ||
| 738 | err = pci_request_region(dev, SMBBAR, i801_driver.name); | 740 | err = pci_request_region(dev, SMBBAR, i801_driver.name); |
| 739 | if (err) { | 741 | if (err) { |
diff --git a/drivers/i2c/busses/i2c-isch.c b/drivers/i2c/busses/i2c-isch.c index 9f6b8e0f8632..dba6eb053e2f 100644 --- a/drivers/i2c/busses/i2c-isch.c +++ b/drivers/i2c/busses/i2c-isch.c | |||
| @@ -281,7 +281,7 @@ static int __devinit sch_probe(struct pci_dev *dev, | |||
| 281 | return -ENODEV; | 281 | return -ENODEV; |
| 282 | } | 282 | } |
| 283 | if (acpi_check_region(sch_smba, SMBIOSIZE, sch_driver.name)) | 283 | if (acpi_check_region(sch_smba, SMBIOSIZE, sch_driver.name)) |
| 284 | return -EBUSY; | 284 | return -ENODEV; |
| 285 | if (!request_region(sch_smba, SMBIOSIZE, sch_driver.name)) { | 285 | if (!request_region(sch_smba, SMBIOSIZE, sch_driver.name)) { |
| 286 | dev_err(&dev->dev, "SMBus region 0x%x already in use!\n", | 286 | dev_err(&dev->dev, "SMBus region 0x%x already in use!\n", |
| 287 | sch_smba); | 287 | sch_smba); |
diff --git a/drivers/i2c/busses/i2c-piix4.c b/drivers/i2c/busses/i2c-piix4.c index a782c7a08f9e..d26a972aacaa 100644 --- a/drivers/i2c/busses/i2c-piix4.c +++ b/drivers/i2c/busses/i2c-piix4.c | |||
| @@ -169,7 +169,7 @@ static int __devinit piix4_setup(struct pci_dev *PIIX4_dev, | |||
| 169 | } | 169 | } |
| 170 | 170 | ||
| 171 | if (acpi_check_region(piix4_smba, SMBIOSIZE, piix4_driver.name)) | 171 | if (acpi_check_region(piix4_smba, SMBIOSIZE, piix4_driver.name)) |
| 172 | return -EBUSY; | 172 | return -ENODEV; |
| 173 | 173 | ||
| 174 | if (!request_region(piix4_smba, SMBIOSIZE, piix4_driver.name)) { | 174 | if (!request_region(piix4_smba, SMBIOSIZE, piix4_driver.name)) { |
| 175 | dev_err(&PIIX4_dev->dev, "SMBus region 0x%x already in use!\n", | 175 | dev_err(&PIIX4_dev->dev, "SMBus region 0x%x already in use!\n", |
| @@ -260,7 +260,7 @@ static int __devinit piix4_setup_sb800(struct pci_dev *PIIX4_dev, | |||
| 260 | 260 | ||
| 261 | piix4_smba = ((smba_en_hi << 8) | smba_en_lo) & 0xffe0; | 261 | piix4_smba = ((smba_en_hi << 8) | smba_en_lo) & 0xffe0; |
| 262 | if (acpi_check_region(piix4_smba, SMBIOSIZE, piix4_driver.name)) | 262 | if (acpi_check_region(piix4_smba, SMBIOSIZE, piix4_driver.name)) |
| 263 | return -EBUSY; | 263 | return -ENODEV; |
| 264 | 264 | ||
| 265 | if (!request_region(piix4_smba, SMBIOSIZE, piix4_driver.name)) { | 265 | if (!request_region(piix4_smba, SMBIOSIZE, piix4_driver.name)) { |
| 266 | dev_err(&PIIX4_dev->dev, "SMBus region 0x%x already in use!\n", | 266 | dev_err(&PIIX4_dev->dev, "SMBus region 0x%x already in use!\n", |
diff --git a/drivers/i2c/busses/i2c-sis96x.c b/drivers/i2c/busses/i2c-sis96x.c index 8295885b2fdb..1649963b00dc 100644 --- a/drivers/i2c/busses/i2c-sis96x.c +++ b/drivers/i2c/busses/i2c-sis96x.c | |||
| @@ -280,7 +280,7 @@ static int __devinit sis96x_probe(struct pci_dev *dev, | |||
| 280 | 280 | ||
| 281 | retval = acpi_check_resource_conflict(&dev->resource[SIS96x_BAR]); | 281 | retval = acpi_check_resource_conflict(&dev->resource[SIS96x_BAR]); |
| 282 | if (retval) | 282 | if (retval) |
| 283 | return retval; | 283 | return -ENODEV; |
| 284 | 284 | ||
| 285 | /* Everything is happy, let's grab the memory and set things up. */ | 285 | /* Everything is happy, let's grab the memory and set things up. */ |
| 286 | if (!request_region(sis96x_smbus_base, SMB_IOSIZE, | 286 | if (!request_region(sis96x_smbus_base, SMB_IOSIZE, |
diff --git a/drivers/i2c/busses/i2c-viapro.c b/drivers/i2c/busses/i2c-viapro.c index 54d810a4d00f..e4b1543015af 100644 --- a/drivers/i2c/busses/i2c-viapro.c +++ b/drivers/i2c/busses/i2c-viapro.c | |||
| @@ -365,7 +365,7 @@ static int __devinit vt596_probe(struct pci_dev *pdev, | |||
| 365 | found: | 365 | found: |
| 366 | error = acpi_check_region(vt596_smba, 8, vt596_driver.name); | 366 | error = acpi_check_region(vt596_smba, 8, vt596_driver.name); |
| 367 | if (error) | 367 | if (error) |
| 368 | return error; | 368 | return -ENODEV; |
| 369 | 369 | ||
| 370 | if (!request_region(vt596_smba, 8, vt596_driver.name)) { | 370 | if (!request_region(vt596_smba, 8, vt596_driver.name)) { |
| 371 | dev_err(&pdev->dev, "SMBus region 0x%x already in use!\n", | 371 | dev_err(&pdev->dev, "SMBus region 0x%x already in use!\n", |
diff --git a/drivers/ide/ide-proc.c b/drivers/ide/ide-proc.c index 28d09a5d8450..017c09540c2f 100644 --- a/drivers/ide/ide-proc.c +++ b/drivers/ide/ide-proc.c | |||
| @@ -273,14 +273,8 @@ static const struct ide_proc_devset ide_generic_settings[] = { | |||
| 273 | 273 | ||
| 274 | static void proc_ide_settings_warn(void) | 274 | static void proc_ide_settings_warn(void) |
| 275 | { | 275 | { |
| 276 | static int warned; | 276 | printk_once(KERN_WARNING "Warning: /proc/ide/hd?/settings interface is " |
| 277 | |||
| 278 | if (warned) | ||
| 279 | return; | ||
| 280 | |||
| 281 | printk(KERN_WARNING "Warning: /proc/ide/hd?/settings interface is " | ||
| 282 | "obsolete, and will be removed soon!\n"); | 277 | "obsolete, and will be removed soon!\n"); |
| 283 | warned = 1; | ||
| 284 | } | 278 | } |
| 285 | 279 | ||
| 286 | static int ide_settings_proc_show(struct seq_file *m, void *v) | 280 | static int ide_settings_proc_show(struct seq_file *m, void *v) |
diff --git a/drivers/ide/sis5513.c b/drivers/ide/sis5513.c index afca22beaadf..3b88eba04c9c 100644 --- a/drivers/ide/sis5513.c +++ b/drivers/ide/sis5513.c | |||
| @@ -2,7 +2,7 @@ | |||
| 2 | * Copyright (C) 1999-2000 Andre Hedrick <andre@linux-ide.org> | 2 | * Copyright (C) 1999-2000 Andre Hedrick <andre@linux-ide.org> |
| 3 | * Copyright (C) 2002 Lionel Bouton <Lionel.Bouton@inet6.fr>, Maintainer | 3 | * Copyright (C) 2002 Lionel Bouton <Lionel.Bouton@inet6.fr>, Maintainer |
| 4 | * Copyright (C) 2003 Vojtech Pavlik <vojtech@suse.cz> | 4 | * Copyright (C) 2003 Vojtech Pavlik <vojtech@suse.cz> |
| 5 | * Copyright (C) 2007 Bartlomiej Zolnierkiewicz | 5 | * Copyright (C) 2007-2009 Bartlomiej Zolnierkiewicz |
| 6 | * | 6 | * |
| 7 | * May be copied or modified under the terms of the GNU General Public License | 7 | * May be copied or modified under the terms of the GNU General Public License |
| 8 | * | 8 | * |
| @@ -281,11 +281,13 @@ static void config_drive_art_rwp(ide_drive_t *drive) | |||
| 281 | 281 | ||
| 282 | pci_read_config_byte(dev, 0x4b, ®4bh); | 282 | pci_read_config_byte(dev, 0x4b, ®4bh); |
| 283 | 283 | ||
| 284 | rw_prefetch = reg4bh & ~(0x11 << drive->dn); | ||
| 285 | |||
| 284 | if (drive->media == ide_disk) | 286 | if (drive->media == ide_disk) |
| 285 | rw_prefetch = 0x11 << drive->dn; | 287 | rw_prefetch |= 0x11 << drive->dn; |
| 286 | 288 | ||
| 287 | if ((reg4bh & (0x11 << drive->dn)) != rw_prefetch) | 289 | if (reg4bh != rw_prefetch) |
| 288 | pci_write_config_byte(dev, 0x4b, reg4bh|rw_prefetch); | 290 | pci_write_config_byte(dev, 0x4b, rw_prefetch); |
| 289 | } | 291 | } |
| 290 | 292 | ||
| 291 | static void sis_set_pio_mode(ide_drive_t *drive, const u8 pio) | 293 | static void sis_set_pio_mode(ide_drive_t *drive, const u8 pio) |
diff --git a/drivers/infiniband/core/ucm.c b/drivers/infiniband/core/ucm.c index 51bd9669cb1f..f504c9b00c1b 100644 --- a/drivers/infiniband/core/ucm.c +++ b/drivers/infiniband/core/ucm.c | |||
| @@ -38,6 +38,7 @@ | |||
| 38 | #include <linux/device.h> | 38 | #include <linux/device.h> |
| 39 | #include <linux/err.h> | 39 | #include <linux/err.h> |
| 40 | #include <linux/poll.h> | 40 | #include <linux/poll.h> |
| 41 | #include <linux/sched.h> | ||
| 41 | #include <linux/file.h> | 42 | #include <linux/file.h> |
| 42 | #include <linux/mount.h> | 43 | #include <linux/mount.h> |
| 43 | #include <linux/cdev.h> | 44 | #include <linux/cdev.h> |
diff --git a/drivers/infiniband/core/user_mad.c b/drivers/infiniband/core/user_mad.c index 8c46f2257098..7de02969ed7d 100644 --- a/drivers/infiniband/core/user_mad.c +++ b/drivers/infiniband/core/user_mad.c | |||
| @@ -44,6 +44,7 @@ | |||
| 44 | #include <linux/mutex.h> | 44 | #include <linux/mutex.h> |
| 45 | #include <linux/kref.h> | 45 | #include <linux/kref.h> |
| 46 | #include <linux/compat.h> | 46 | #include <linux/compat.h> |
| 47 | #include <linux/sched.h> | ||
| 47 | #include <linux/semaphore.h> | 48 | #include <linux/semaphore.h> |
| 48 | 49 | ||
| 49 | #include <asm/uaccess.h> | 50 | #include <asm/uaccess.h> |
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c index d3fff9e008a3..aec0fbdfe7f0 100644 --- a/drivers/infiniband/core/uverbs_main.c +++ b/drivers/infiniband/core/uverbs_main.c | |||
| @@ -40,6 +40,7 @@ | |||
| 40 | #include <linux/err.h> | 40 | #include <linux/err.h> |
| 41 | #include <linux/fs.h> | 41 | #include <linux/fs.h> |
| 42 | #include <linux/poll.h> | 42 | #include <linux/poll.h> |
| 43 | #include <linux/sched.h> | ||
| 43 | #include <linux/file.h> | 44 | #include <linux/file.h> |
| 44 | #include <linux/mount.h> | 45 | #include <linux/mount.h> |
| 45 | #include <linux/cdev.h> | 46 | #include <linux/cdev.h> |
diff --git a/drivers/input/evdev.c b/drivers/input/evdev.c index 1148140d08a1..dee6706038aa 100644 --- a/drivers/input/evdev.c +++ b/drivers/input/evdev.c | |||
| @@ -13,6 +13,7 @@ | |||
| 13 | #define EVDEV_BUFFER_SIZE 64 | 13 | #define EVDEV_BUFFER_SIZE 64 |
| 14 | 14 | ||
| 15 | #include <linux/poll.h> | 15 | #include <linux/poll.h> |
| 16 | #include <linux/sched.h> | ||
| 16 | #include <linux/slab.h> | 17 | #include <linux/slab.h> |
| 17 | #include <linux/module.h> | 18 | #include <linux/module.h> |
| 18 | #include <linux/init.h> | 19 | #include <linux/init.h> |
diff --git a/drivers/input/input.c b/drivers/input/input.c index 16ec33f27c5d..c6f88ebb40c7 100644 --- a/drivers/input/input.c +++ b/drivers/input/input.c | |||
| @@ -17,6 +17,7 @@ | |||
| 17 | #include <linux/random.h> | 17 | #include <linux/random.h> |
| 18 | #include <linux/major.h> | 18 | #include <linux/major.h> |
| 19 | #include <linux/proc_fs.h> | 19 | #include <linux/proc_fs.h> |
| 20 | #include <linux/sched.h> | ||
| 20 | #include <linux/seq_file.h> | 21 | #include <linux/seq_file.h> |
| 21 | #include <linux/poll.h> | 22 | #include <linux/poll.h> |
| 22 | #include <linux/device.h> | 23 | #include <linux/device.h> |
diff --git a/drivers/input/joydev.c b/drivers/input/joydev.c index 901b2525993e..b1bd6dd32286 100644 --- a/drivers/input/joydev.c +++ b/drivers/input/joydev.c | |||
| @@ -18,6 +18,7 @@ | |||
| 18 | #include <linux/input.h> | 18 | #include <linux/input.h> |
| 19 | #include <linux/kernel.h> | 19 | #include <linux/kernel.h> |
| 20 | #include <linux/major.h> | 20 | #include <linux/major.h> |
| 21 | #include <linux/sched.h> | ||
| 21 | #include <linux/slab.h> | 22 | #include <linux/slab.h> |
| 22 | #include <linux/mm.h> | 23 | #include <linux/mm.h> |
| 23 | #include <linux/miscdevice.h> | 24 | #include <linux/miscdevice.h> |
diff --git a/drivers/input/misc/uinput.c b/drivers/input/misc/uinput.c index c5a49aba418f..d3f57245420a 100644 --- a/drivers/input/misc/uinput.c +++ b/drivers/input/misc/uinput.c | |||
| @@ -30,6 +30,7 @@ | |||
| 30 | * - first public version | 30 | * - first public version |
| 31 | */ | 31 | */ |
| 32 | #include <linux/poll.h> | 32 | #include <linux/poll.h> |
| 33 | #include <linux/sched.h> | ||
| 33 | #include <linux/slab.h> | 34 | #include <linux/slab.h> |
| 34 | #include <linux/module.h> | 35 | #include <linux/module.h> |
| 35 | #include <linux/init.h> | 36 | #include <linux/init.h> |
diff --git a/drivers/input/mousedev.c b/drivers/input/mousedev.c index 966b8868f792..a13d80f7da17 100644 --- a/drivers/input/mousedev.c +++ b/drivers/input/mousedev.c | |||
| @@ -13,6 +13,7 @@ | |||
| 13 | #define MOUSEDEV_MINORS 32 | 13 | #define MOUSEDEV_MINORS 32 |
| 14 | #define MOUSEDEV_MIX 31 | 14 | #define MOUSEDEV_MIX 31 |
| 15 | 15 | ||
| 16 | #include <linux/sched.h> | ||
| 16 | #include <linux/slab.h> | 17 | #include <linux/slab.h> |
| 17 | #include <linux/smp_lock.h> | 18 | #include <linux/smp_lock.h> |
| 18 | #include <linux/poll.h> | 19 | #include <linux/poll.h> |
diff --git a/drivers/isdn/capi/capi.c b/drivers/isdn/capi/capi.c index 2d8352419c0d..65bf91e16a42 100644 --- a/drivers/isdn/capi/capi.c +++ b/drivers/isdn/capi/capi.c | |||
| @@ -603,7 +603,7 @@ static void capi_recv_message(struct capi20_appl *ap, struct sk_buff *skb) | |||
| 603 | 603 | ||
| 604 | if (CAPIMSG_CMD(skb->data) == CAPI_CONNECT_B3_CONF) { | 604 | if (CAPIMSG_CMD(skb->data) == CAPI_CONNECT_B3_CONF) { |
| 605 | u16 info = CAPIMSG_U16(skb->data, 12); // Info field | 605 | u16 info = CAPIMSG_U16(skb->data, 12); // Info field |
| 606 | if (info == 0) { | 606 | if ((info & 0xff00) == 0) { |
| 607 | mutex_lock(&cdev->ncci_list_mtx); | 607 | mutex_lock(&cdev->ncci_list_mtx); |
| 608 | capincci_alloc(cdev, CAPIMSG_NCCI(skb->data)); | 608 | capincci_alloc(cdev, CAPIMSG_NCCI(skb->data)); |
| 609 | mutex_unlock(&cdev->ncci_list_mtx); | 609 | mutex_unlock(&cdev->ncci_list_mtx); |
diff --git a/drivers/isdn/capi/capidrv.c b/drivers/isdn/capi/capidrv.c index 650120261abf..3e6d17f42a98 100644 --- a/drivers/isdn/capi/capidrv.c +++ b/drivers/isdn/capi/capidrv.c | |||
| @@ -40,7 +40,7 @@ static int debugmode = 0; | |||
| 40 | MODULE_DESCRIPTION("CAPI4Linux: Interface to ISDN4Linux"); | 40 | MODULE_DESCRIPTION("CAPI4Linux: Interface to ISDN4Linux"); |
| 41 | MODULE_AUTHOR("Carsten Paeth"); | 41 | MODULE_AUTHOR("Carsten Paeth"); |
| 42 | MODULE_LICENSE("GPL"); | 42 | MODULE_LICENSE("GPL"); |
| 43 | module_param(debugmode, uint, 0); | 43 | module_param(debugmode, uint, S_IRUGO|S_IWUSR); |
| 44 | 44 | ||
| 45 | /* -------- type definitions ----------------------------------------- */ | 45 | /* -------- type definitions ----------------------------------------- */ |
| 46 | 46 | ||
| @@ -671,8 +671,8 @@ static void n0(capidrv_contr * card, capidrv_ncci * ncci) | |||
| 671 | NULL, /* Useruserdata */ /* $$$$ */ | 671 | NULL, /* Useruserdata */ /* $$$$ */ |
| 672 | NULL /* Facilitydataarray */ | 672 | NULL /* Facilitydataarray */ |
| 673 | ); | 673 | ); |
| 674 | send_message(card, &cmsg); | ||
| 675 | plci_change_state(card, ncci->plcip, EV_PLCI_DISCONNECT_REQ); | 674 | plci_change_state(card, ncci->plcip, EV_PLCI_DISCONNECT_REQ); |
| 675 | send_message(card, &cmsg); | ||
| 676 | 676 | ||
| 677 | cmd.command = ISDN_STAT_BHUP; | 677 | cmd.command = ISDN_STAT_BHUP; |
| 678 | cmd.driver = card->myid; | 678 | cmd.driver = card->myid; |
| @@ -924,8 +924,8 @@ static void handle_incoming_call(capidrv_contr * card, _cmsg * cmsg) | |||
| 924 | */ | 924 | */ |
| 925 | capi_cmsg_answer(cmsg); | 925 | capi_cmsg_answer(cmsg); |
| 926 | cmsg->Reject = 1; /* ignore */ | 926 | cmsg->Reject = 1; /* ignore */ |
| 927 | send_message(card, cmsg); | ||
| 928 | plci_change_state(card, plcip, EV_PLCI_CONNECT_REJECT); | 927 | plci_change_state(card, plcip, EV_PLCI_CONNECT_REJECT); |
| 928 | send_message(card, cmsg); | ||
| 929 | printk(KERN_INFO "capidrv-%d: incoming call %s,%d,%d,%s ignored\n", | 929 | printk(KERN_INFO "capidrv-%d: incoming call %s,%d,%d,%s ignored\n", |
| 930 | card->contrnr, | 930 | card->contrnr, |
| 931 | cmd.parm.setup.phone, | 931 | cmd.parm.setup.phone, |
| @@ -974,8 +974,8 @@ static void handle_incoming_call(capidrv_contr * card, _cmsg * cmsg) | |||
| 974 | case 2: /* Call will be rejected. */ | 974 | case 2: /* Call will be rejected. */ |
| 975 | capi_cmsg_answer(cmsg); | 975 | capi_cmsg_answer(cmsg); |
| 976 | cmsg->Reject = 2; /* reject call, normal call clearing */ | 976 | cmsg->Reject = 2; /* reject call, normal call clearing */ |
| 977 | send_message(card, cmsg); | ||
| 978 | plci_change_state(card, plcip, EV_PLCI_CONNECT_REJECT); | 977 | plci_change_state(card, plcip, EV_PLCI_CONNECT_REJECT); |
| 978 | send_message(card, cmsg); | ||
| 979 | break; | 979 | break; |
| 980 | 980 | ||
| 981 | default: | 981 | default: |
| @@ -983,8 +983,8 @@ static void handle_incoming_call(capidrv_contr * card, _cmsg * cmsg) | |||
| 983 | capi_cmsg_answer(cmsg); | 983 | capi_cmsg_answer(cmsg); |
| 984 | cmsg->Reject = 8; /* reject call, | 984 | cmsg->Reject = 8; /* reject call, |
| 985 | destination out of order */ | 985 | destination out of order */ |
| 986 | send_message(card, cmsg); | ||
| 987 | plci_change_state(card, plcip, EV_PLCI_CONNECT_REJECT); | 986 | plci_change_state(card, plcip, EV_PLCI_CONNECT_REJECT); |
| 987 | send_message(card, cmsg); | ||
| 988 | break; | 988 | break; |
| 989 | } | 989 | } |
| 990 | return; | 990 | return; |
| @@ -1020,8 +1020,8 @@ static void handle_plci(_cmsg * cmsg) | |||
| 1020 | card->bchans[plcip->chan].disconnecting = 1; | 1020 | card->bchans[plcip->chan].disconnecting = 1; |
| 1021 | plci_change_state(card, plcip, EV_PLCI_DISCONNECT_IND); | 1021 | plci_change_state(card, plcip, EV_PLCI_DISCONNECT_IND); |
| 1022 | capi_cmsg_answer(cmsg); | 1022 | capi_cmsg_answer(cmsg); |
| 1023 | send_message(card, cmsg); | ||
| 1024 | plci_change_state(card, plcip, EV_PLCI_DISCONNECT_RESP); | 1023 | plci_change_state(card, plcip, EV_PLCI_DISCONNECT_RESP); |
| 1024 | send_message(card, cmsg); | ||
| 1025 | break; | 1025 | break; |
| 1026 | 1026 | ||
| 1027 | case CAPI_DISCONNECT_CONF: /* plci */ | 1027 | case CAPI_DISCONNECT_CONF: /* plci */ |
| @@ -1078,8 +1078,8 @@ static void handle_plci(_cmsg * cmsg) | |||
| 1078 | 1078 | ||
| 1079 | if (card->bchans[plcip->chan].incoming) { | 1079 | if (card->bchans[plcip->chan].incoming) { |
| 1080 | capi_cmsg_answer(cmsg); | 1080 | capi_cmsg_answer(cmsg); |
| 1081 | send_message(card, cmsg); | ||
| 1082 | plci_change_state(card, plcip, EV_PLCI_CONNECT_ACTIVE_IND); | 1081 | plci_change_state(card, plcip, EV_PLCI_CONNECT_ACTIVE_IND); |
| 1082 | send_message(card, cmsg); | ||
| 1083 | } else { | 1083 | } else { |
| 1084 | capidrv_ncci *nccip; | 1084 | capidrv_ncci *nccip; |
| 1085 | capi_cmsg_answer(cmsg); | 1085 | capi_cmsg_answer(cmsg); |
| @@ -1098,13 +1098,14 @@ static void handle_plci(_cmsg * cmsg) | |||
| 1098 | NULL /* NCPI */ | 1098 | NULL /* NCPI */ |
| 1099 | ); | 1099 | ); |
| 1100 | nccip->msgid = cmsg->Messagenumber; | 1100 | nccip->msgid = cmsg->Messagenumber; |
| 1101 | plci_change_state(card, plcip, | ||
| 1102 | EV_PLCI_CONNECT_ACTIVE_IND); | ||
| 1103 | ncci_change_state(card, nccip, EV_NCCI_CONNECT_B3_REQ); | ||
| 1101 | send_message(card, cmsg); | 1104 | send_message(card, cmsg); |
| 1102 | cmd.command = ISDN_STAT_DCONN; | 1105 | cmd.command = ISDN_STAT_DCONN; |
| 1103 | cmd.driver = card->myid; | 1106 | cmd.driver = card->myid; |
| 1104 | cmd.arg = plcip->chan; | 1107 | cmd.arg = plcip->chan; |
| 1105 | card->interface.statcallb(&cmd); | 1108 | card->interface.statcallb(&cmd); |
| 1106 | plci_change_state(card, plcip, EV_PLCI_CONNECT_ACTIVE_IND); | ||
| 1107 | ncci_change_state(card, nccip, EV_NCCI_CONNECT_B3_REQ); | ||
| 1108 | } | 1109 | } |
| 1109 | break; | 1110 | break; |
| 1110 | 1111 | ||
| @@ -1193,8 +1194,8 @@ static void handle_ncci(_cmsg * cmsg) | |||
| 1193 | goto notfound; | 1194 | goto notfound; |
| 1194 | 1195 | ||
| 1195 | capi_cmsg_answer(cmsg); | 1196 | capi_cmsg_answer(cmsg); |
| 1196 | send_message(card, cmsg); | ||
| 1197 | ncci_change_state(card, nccip, EV_NCCI_CONNECT_B3_ACTIVE_IND); | 1197 | ncci_change_state(card, nccip, EV_NCCI_CONNECT_B3_ACTIVE_IND); |
| 1198 | send_message(card, cmsg); | ||
| 1198 | 1199 | ||
| 1199 | cmd.command = ISDN_STAT_BCONN; | 1200 | cmd.command = ISDN_STAT_BCONN; |
| 1200 | cmd.driver = card->myid; | 1201 | cmd.driver = card->myid; |
| @@ -1222,8 +1223,8 @@ static void handle_ncci(_cmsg * cmsg) | |||
| 1222 | 0, /* Reject */ | 1223 | 0, /* Reject */ |
| 1223 | NULL /* NCPI */ | 1224 | NULL /* NCPI */ |
| 1224 | ); | 1225 | ); |
| 1225 | send_message(card, cmsg); | ||
| 1226 | ncci_change_state(card, nccip, EV_NCCI_CONNECT_B3_RESP); | 1226 | ncci_change_state(card, nccip, EV_NCCI_CONNECT_B3_RESP); |
| 1227 | send_message(card, cmsg); | ||
| 1227 | break; | 1228 | break; |
| 1228 | } | 1229 | } |
| 1229 | printk(KERN_ERR "capidrv-%d: no mem for ncci, sorry\n", card->contrnr); | 1230 | printk(KERN_ERR "capidrv-%d: no mem for ncci, sorry\n", card->contrnr); |
| @@ -1299,8 +1300,8 @@ static void handle_ncci(_cmsg * cmsg) | |||
| 1299 | card->bchans[nccip->chan].disconnecting = 1; | 1300 | card->bchans[nccip->chan].disconnecting = 1; |
| 1300 | ncci_change_state(card, nccip, EV_NCCI_DISCONNECT_B3_IND); | 1301 | ncci_change_state(card, nccip, EV_NCCI_DISCONNECT_B3_IND); |
| 1301 | capi_cmsg_answer(cmsg); | 1302 | capi_cmsg_answer(cmsg); |
| 1302 | send_message(card, cmsg); | ||
| 1303 | ncci_change_state(card, nccip, EV_NCCI_DISCONNECT_B3_RESP); | 1303 | ncci_change_state(card, nccip, EV_NCCI_DISCONNECT_B3_RESP); |
| 1304 | send_message(card, cmsg); | ||
| 1304 | break; | 1305 | break; |
| 1305 | 1306 | ||
| 1306 | case CAPI_DISCONNECT_B3_CONF: /* ncci */ | 1307 | case CAPI_DISCONNECT_B3_CONF: /* ncci */ |
| @@ -2014,8 +2015,8 @@ static void send_listen(capidrv_contr *card) | |||
| 2014 | card->cipmask, | 2015 | card->cipmask, |
| 2015 | card->cipmask2, | 2016 | card->cipmask2, |
| 2016 | NULL, NULL); | 2017 | NULL, NULL); |
| 2017 | send_message(card, &cmdcmsg); | ||
| 2018 | listen_change_state(card, EV_LISTEN_REQ); | 2018 | listen_change_state(card, EV_LISTEN_REQ); |
| 2019 | send_message(card, &cmdcmsg); | ||
| 2019 | } | 2020 | } |
| 2020 | 2021 | ||
| 2021 | static void listentimerfunc(unsigned long x) | 2022 | static void listentimerfunc(unsigned long x) |
diff --git a/drivers/isdn/divert/divert_procfs.c b/drivers/isdn/divert/divert_procfs.c index 8b256a617c8a..3697c409bec6 100644 --- a/drivers/isdn/divert/divert_procfs.c +++ b/drivers/isdn/divert/divert_procfs.c | |||
| @@ -16,6 +16,7 @@ | |||
| 16 | #else | 16 | #else |
| 17 | #include <linux/fs.h> | 17 | #include <linux/fs.h> |
| 18 | #endif | 18 | #endif |
| 19 | #include <linux/sched.h> | ||
| 19 | #include <linux/isdnif.h> | 20 | #include <linux/isdnif.h> |
| 20 | #include <net/net_namespace.h> | 21 | #include <net/net_namespace.h> |
| 21 | #include "isdn_divert.h" | 22 | #include "isdn_divert.h" |
diff --git a/drivers/isdn/gigaset/asyncdata.c b/drivers/isdn/gigaset/asyncdata.c index 234cc5d53312..44a58e6f8f65 100644 --- a/drivers/isdn/gigaset/asyncdata.c +++ b/drivers/isdn/gigaset/asyncdata.c | |||
| @@ -334,7 +334,14 @@ static inline int iraw_loop(unsigned char c, unsigned char *src, int numbytes, | |||
| 334 | return startbytes - numbytes; | 334 | return startbytes - numbytes; |
| 335 | } | 335 | } |
| 336 | 336 | ||
| 337 | /* process a block of data received from the device | 337 | /** |
| 338 | * gigaset_m10x_input() - process a block of data received from the device | ||
| 339 | * @inbuf: received data and device descriptor structure. | ||
| 340 | * | ||
| 341 | * Called by hardware module {ser,usb}_gigaset with a block of received | ||
| 342 | * bytes. Separates the bytes received over the serial data channel into | ||
| 343 | * user data and command replies (locked/unlocked) according to the | ||
| 344 | * current state of the interface. | ||
| 338 | */ | 345 | */ |
| 339 | void gigaset_m10x_input(struct inbuf_t *inbuf) | 346 | void gigaset_m10x_input(struct inbuf_t *inbuf) |
| 340 | { | 347 | { |
| @@ -543,16 +550,17 @@ static struct sk_buff *iraw_encode(struct sk_buff *skb, int head, int tail) | |||
| 543 | return iraw_skb; | 550 | return iraw_skb; |
| 544 | } | 551 | } |
| 545 | 552 | ||
| 546 | /* gigaset_send_skb | 553 | /** |
| 547 | * called by common.c to queue an skb for sending | 554 | * gigaset_m10x_send_skb() - queue an skb for sending |
| 548 | * and start transmission if necessary | 555 | * @bcs: B channel descriptor structure. |
| 549 | * parameters: | 556 | * @skb: data to send. |
| 550 | * B Channel control structure | 557 | * |
| 551 | * skb | 558 | * Called by i4l.c to encode and queue an skb for sending, and start |
| 559 | * transmission if necessary. | ||
| 560 | * | ||
| 552 | * Return value: | 561 | * Return value: |
| 553 | * number of bytes accepted for sending | 562 | * number of bytes accepted for sending (skb->len) if ok, |
| 554 | * (skb->len if ok, 0 if out of buffer space) | 563 | * error code < 0 (eg. -ENOMEM) on error |
| 555 | * or error code (< 0, eg. -EINVAL) | ||
| 556 | */ | 564 | */ |
| 557 | int gigaset_m10x_send_skb(struct bc_state *bcs, struct sk_buff *skb) | 565 | int gigaset_m10x_send_skb(struct bc_state *bcs, struct sk_buff *skb) |
| 558 | { | 566 | { |
diff --git a/drivers/isdn/gigaset/bas-gigaset.c b/drivers/isdn/gigaset/bas-gigaset.c index 781c4041f7b0..5ed1d99eb9f3 100644 --- a/drivers/isdn/gigaset/bas-gigaset.c +++ b/drivers/isdn/gigaset/bas-gigaset.c | |||
| @@ -134,6 +134,7 @@ struct bas_cardstate { | |||
| 134 | #define BS_ATRDPEND 0x040 /* urb_cmd_in in use */ | 134 | #define BS_ATRDPEND 0x040 /* urb_cmd_in in use */ |
| 135 | #define BS_ATWRPEND 0x080 /* urb_cmd_out in use */ | 135 | #define BS_ATWRPEND 0x080 /* urb_cmd_out in use */ |
| 136 | #define BS_SUSPEND 0x100 /* USB port suspended */ | 136 | #define BS_SUSPEND 0x100 /* USB port suspended */ |
| 137 | #define BS_RESETTING 0x200 /* waiting for HD_RESET_INTERRUPT_PIPE_ACK */ | ||
| 137 | 138 | ||
| 138 | 139 | ||
| 139 | static struct gigaset_driver *driver = NULL; | 140 | static struct gigaset_driver *driver = NULL; |
| @@ -319,6 +320,21 @@ static int gigaset_set_line_ctrl(struct cardstate *cs, unsigned cflag) | |||
| 319 | return -EINVAL; | 320 | return -EINVAL; |
| 320 | } | 321 | } |
| 321 | 322 | ||
| 323 | /* set/clear bits in base connection state, return previous state | ||
| 324 | */ | ||
| 325 | static inline int update_basstate(struct bas_cardstate *ucs, | ||
| 326 | int set, int clear) | ||
| 327 | { | ||
| 328 | unsigned long flags; | ||
| 329 | int state; | ||
| 330 | |||
| 331 | spin_lock_irqsave(&ucs->lock, flags); | ||
| 332 | state = ucs->basstate; | ||
| 333 | ucs->basstate = (state & ~clear) | set; | ||
| 334 | spin_unlock_irqrestore(&ucs->lock, flags); | ||
| 335 | return state; | ||
| 336 | } | ||
| 337 | |||
| 322 | /* error_hangup | 338 | /* error_hangup |
| 323 | * hang up any existing connection because of an unrecoverable error | 339 | * hang up any existing connection because of an unrecoverable error |
| 324 | * This function may be called from any context and takes care of scheduling | 340 | * This function may be called from any context and takes care of scheduling |
| @@ -350,12 +366,9 @@ static inline void error_hangup(struct bc_state *bcs) | |||
| 350 | */ | 366 | */ |
| 351 | static inline void error_reset(struct cardstate *cs) | 367 | static inline void error_reset(struct cardstate *cs) |
| 352 | { | 368 | { |
| 353 | /* close AT command channel to recover (ignore errors) */ | 369 | /* reset interrupt pipe to recover (ignore errors) */ |
| 354 | req_submit(cs->bcs, HD_CLOSE_ATCHANNEL, 0, BAS_TIMEOUT); | 370 | update_basstate(cs->hw.bas, BS_RESETTING, 0); |
| 355 | 371 | req_submit(cs->bcs, HD_RESET_INTERRUPT_PIPE, 0, BAS_TIMEOUT); | |
| 356 | //FIXME try to recover without bothering the user | ||
| 357 | dev_err(cs->dev, | ||
| 358 | "unrecoverable error - please disconnect Gigaset base to reset\n"); | ||
| 359 | } | 372 | } |
| 360 | 373 | ||
| 361 | /* check_pending | 374 | /* check_pending |
| @@ -398,8 +411,13 @@ static void check_pending(struct bas_cardstate *ucs) | |||
| 398 | case HD_DEVICE_INIT_ACK: /* no reply expected */ | 411 | case HD_DEVICE_INIT_ACK: /* no reply expected */ |
| 399 | ucs->pending = 0; | 412 | ucs->pending = 0; |
| 400 | break; | 413 | break; |
| 401 | /* HD_READ_ATMESSAGE, HD_WRITE_ATMESSAGE, HD_RESET_INTERRUPTPIPE | 414 | case HD_RESET_INTERRUPT_PIPE: |
| 402 | * are handled separately and should never end up here | 415 | if (!(ucs->basstate & BS_RESETTING)) |
| 416 | ucs->pending = 0; | ||
| 417 | break; | ||
| 418 | /* | ||
| 419 | * HD_READ_ATMESSAGE and HD_WRITE_ATMESSAGE are handled separately | ||
| 420 | * and should never end up here | ||
| 403 | */ | 421 | */ |
| 404 | default: | 422 | default: |
| 405 | dev_warn(&ucs->interface->dev, | 423 | dev_warn(&ucs->interface->dev, |
| @@ -449,21 +467,6 @@ static void cmd_in_timeout(unsigned long data) | |||
| 449 | error_reset(cs); | 467 | error_reset(cs); |
| 450 | } | 468 | } |
| 451 | 469 | ||
| 452 | /* set/clear bits in base connection state, return previous state | ||
| 453 | */ | ||
| 454 | inline static int update_basstate(struct bas_cardstate *ucs, | ||
| 455 | int set, int clear) | ||
| 456 | { | ||
| 457 | unsigned long flags; | ||
| 458 | int state; | ||
| 459 | |||
| 460 | spin_lock_irqsave(&ucs->lock, flags); | ||
| 461 | state = ucs->basstate; | ||
| 462 | ucs->basstate = (state & ~clear) | set; | ||
| 463 | spin_unlock_irqrestore(&ucs->lock, flags); | ||
| 464 | return state; | ||
| 465 | } | ||
| 466 | |||
| 467 | /* read_ctrl_callback | 470 | /* read_ctrl_callback |
| 468 | * USB completion handler for control pipe input | 471 | * USB completion handler for control pipe input |
| 469 | * called by the USB subsystem in interrupt context | 472 | * called by the USB subsystem in interrupt context |
| @@ -762,7 +765,8 @@ static void read_int_callback(struct urb *urb) | |||
| 762 | break; | 765 | break; |
| 763 | 766 | ||
| 764 | case HD_RESET_INTERRUPT_PIPE_ACK: | 767 | case HD_RESET_INTERRUPT_PIPE_ACK: |
| 765 | gig_dbg(DEBUG_USBREQ, "HD_RESET_INTERRUPT_PIPE_ACK"); | 768 | update_basstate(ucs, 0, BS_RESETTING); |
| 769 | dev_notice(cs->dev, "interrupt pipe reset\n"); | ||
| 766 | break; | 770 | break; |
| 767 | 771 | ||
| 768 | case HD_SUSPEND_END: | 772 | case HD_SUSPEND_END: |
| @@ -1331,28 +1335,24 @@ static void read_iso_tasklet(unsigned long data) | |||
| 1331 | rcvbuf = urb->transfer_buffer; | 1335 | rcvbuf = urb->transfer_buffer; |
| 1332 | totleft = urb->actual_length; | 1336 | totleft = urb->actual_length; |
| 1333 | for (frame = 0; totleft > 0 && frame < BAS_NUMFRAMES; frame++) { | 1337 | for (frame = 0; totleft > 0 && frame < BAS_NUMFRAMES; frame++) { |
| 1334 | if (unlikely(urb->iso_frame_desc[frame].status)) { | 1338 | numbytes = urb->iso_frame_desc[frame].actual_length; |
| 1339 | if (unlikely(urb->iso_frame_desc[frame].status)) | ||
| 1335 | dev_warn(cs->dev, | 1340 | dev_warn(cs->dev, |
| 1336 | "isochronous read: frame %d: %s\n", | 1341 | "isochronous read: frame %d[%d]: %s\n", |
| 1337 | frame, | 1342 | frame, numbytes, |
| 1338 | get_usb_statmsg( | 1343 | get_usb_statmsg( |
| 1339 | urb->iso_frame_desc[frame].status)); | 1344 | urb->iso_frame_desc[frame].status)); |
| 1340 | break; | 1345 | if (unlikely(numbytes > BAS_MAXFRAME)) |
| 1341 | } | ||
| 1342 | numbytes = urb->iso_frame_desc[frame].actual_length; | ||
| 1343 | if (unlikely(numbytes > BAS_MAXFRAME)) { | ||
| 1344 | dev_warn(cs->dev, | 1346 | dev_warn(cs->dev, |
| 1345 | "isochronous read: frame %d: " | 1347 | "isochronous read: frame %d: " |
| 1346 | "numbytes (%d) > BAS_MAXFRAME\n", | 1348 | "numbytes (%d) > BAS_MAXFRAME\n", |
| 1347 | frame, numbytes); | 1349 | frame, numbytes); |
| 1348 | break; | ||
| 1349 | } | ||
| 1350 | if (unlikely(numbytes > totleft)) { | 1350 | if (unlikely(numbytes > totleft)) { |
| 1351 | dev_warn(cs->dev, | 1351 | dev_warn(cs->dev, |
| 1352 | "isochronous read: frame %d: " | 1352 | "isochronous read: frame %d: " |
| 1353 | "numbytes (%d) > totleft (%d)\n", | 1353 | "numbytes (%d) > totleft (%d)\n", |
| 1354 | frame, numbytes, totleft); | 1354 | frame, numbytes, totleft); |
| 1355 | break; | 1355 | numbytes = totleft; |
| 1356 | } | 1356 | } |
| 1357 | offset = urb->iso_frame_desc[frame].offset; | 1357 | offset = urb->iso_frame_desc[frame].offset; |
| 1358 | if (unlikely(offset + numbytes > BAS_INBUFSIZE)) { | 1358 | if (unlikely(offset + numbytes > BAS_INBUFSIZE)) { |
| @@ -1361,7 +1361,7 @@ static void read_iso_tasklet(unsigned long data) | |||
| 1361 | "offset (%d) + numbytes (%d) " | 1361 | "offset (%d) + numbytes (%d) " |
| 1362 | "> BAS_INBUFSIZE\n", | 1362 | "> BAS_INBUFSIZE\n", |
| 1363 | frame, offset, numbytes); | 1363 | frame, offset, numbytes); |
| 1364 | break; | 1364 | numbytes = BAS_INBUFSIZE - offset; |
| 1365 | } | 1365 | } |
| 1366 | gigaset_isoc_receive(rcvbuf + offset, numbytes, bcs); | 1366 | gigaset_isoc_receive(rcvbuf + offset, numbytes, bcs); |
| 1367 | totleft -= numbytes; | 1367 | totleft -= numbytes; |
| @@ -1433,6 +1433,7 @@ static void req_timeout(unsigned long data) | |||
| 1433 | 1433 | ||
| 1434 | case HD_CLOSE_ATCHANNEL: | 1434 | case HD_CLOSE_ATCHANNEL: |
| 1435 | dev_err(bcs->cs->dev, "timeout closing AT channel\n"); | 1435 | dev_err(bcs->cs->dev, "timeout closing AT channel\n"); |
| 1436 | error_reset(bcs->cs); | ||
| 1436 | break; | 1437 | break; |
| 1437 | 1438 | ||
| 1438 | case HD_CLOSE_B2CHANNEL: | 1439 | case HD_CLOSE_B2CHANNEL: |
| @@ -1442,6 +1443,13 @@ static void req_timeout(unsigned long data) | |||
| 1442 | error_reset(bcs->cs); | 1443 | error_reset(bcs->cs); |
| 1443 | break; | 1444 | break; |
| 1444 | 1445 | ||
| 1446 | case HD_RESET_INTERRUPT_PIPE: | ||
| 1447 | /* error recovery escalation */ | ||
| 1448 | dev_err(bcs->cs->dev, | ||
| 1449 | "reset interrupt pipe timeout, attempting USB reset\n"); | ||
| 1450 | usb_queue_reset_device(bcs->cs->hw.bas->interface); | ||
| 1451 | break; | ||
| 1452 | |||
| 1445 | default: | 1453 | default: |
| 1446 | dev_warn(bcs->cs->dev, "request 0x%02x timed out, clearing\n", | 1454 | dev_warn(bcs->cs->dev, "request 0x%02x timed out, clearing\n", |
| 1447 | pending); | 1455 | pending); |
| @@ -1934,6 +1942,15 @@ static int gigaset_write_cmd(struct cardstate *cs, | |||
| 1934 | goto notqueued; | 1942 | goto notqueued; |
| 1935 | } | 1943 | } |
| 1936 | 1944 | ||
| 1945 | /* translate "+++" escape sequence sent as a single separate command | ||
| 1946 | * into "close AT channel" command for error recovery | ||
| 1947 | * The next command will reopen the AT channel automatically. | ||
| 1948 | */ | ||
| 1949 | if (len == 3 && !memcmp(buf, "+++", 3)) { | ||
| 1950 | rc = req_submit(cs->bcs, HD_CLOSE_ATCHANNEL, 0, BAS_TIMEOUT); | ||
| 1951 | goto notqueued; | ||
| 1952 | } | ||
| 1953 | |||
| 1937 | if (len > IF_WRITEBUF) | 1954 | if (len > IF_WRITEBUF) |
| 1938 | len = IF_WRITEBUF; | 1955 | len = IF_WRITEBUF; |
| 1939 | if (!(cb = kmalloc(sizeof(struct cmdbuf_t) + len, GFP_ATOMIC))) { | 1956 | if (!(cb = kmalloc(sizeof(struct cmdbuf_t) + len, GFP_ATOMIC))) { |
diff --git a/drivers/isdn/gigaset/common.c b/drivers/isdn/gigaset/common.c index e4141bf8b2f3..33dcd8d72b7c 100644 --- a/drivers/isdn/gigaset/common.c +++ b/drivers/isdn/gigaset/common.c | |||
| @@ -22,6 +22,12 @@ | |||
| 22 | #define DRIVER_AUTHOR "Hansjoerg Lipp <hjlipp@web.de>, Tilman Schmidt <tilman@imap.cc>, Stefan Eilers" | 22 | #define DRIVER_AUTHOR "Hansjoerg Lipp <hjlipp@web.de>, Tilman Schmidt <tilman@imap.cc>, Stefan Eilers" |
| 23 | #define DRIVER_DESC "Driver for Gigaset 307x" | 23 | #define DRIVER_DESC "Driver for Gigaset 307x" |
| 24 | 24 | ||
| 25 | #ifdef CONFIG_GIGASET_DEBUG | ||
| 26 | #define DRIVER_DESC_DEBUG " (debug build)" | ||
| 27 | #else | ||
| 28 | #define DRIVER_DESC_DEBUG "" | ||
| 29 | #endif | ||
| 30 | |||
| 25 | /* Module parameters */ | 31 | /* Module parameters */ |
| 26 | int gigaset_debuglevel = DEBUG_DEFAULT; | 32 | int gigaset_debuglevel = DEBUG_DEFAULT; |
| 27 | EXPORT_SYMBOL_GPL(gigaset_debuglevel); | 33 | EXPORT_SYMBOL_GPL(gigaset_debuglevel); |
| @@ -32,6 +38,17 @@ MODULE_PARM_DESC(debug, "debug level"); | |||
| 32 | #define VALID_MINOR 0x01 | 38 | #define VALID_MINOR 0x01 |
| 33 | #define VALID_ID 0x02 | 39 | #define VALID_ID 0x02 |
| 34 | 40 | ||
| 41 | /** | ||
| 42 | * gigaset_dbg_buffer() - dump data in ASCII and hex for debugging | ||
| 43 | * @level: debugging level. | ||
| 44 | * @msg: message prefix. | ||
| 45 | * @len: number of bytes to dump. | ||
| 46 | * @buf: data to dump. | ||
| 47 | * | ||
| 48 | * If the current debugging level includes one of the bits set in @level, | ||
| 49 | * @len bytes starting at @buf are logged to dmesg at KERN_DEBUG prio, | ||
| 50 | * prefixed by the text @msg. | ||
| 51 | */ | ||
| 35 | void gigaset_dbg_buffer(enum debuglevel level, const unsigned char *msg, | 52 | void gigaset_dbg_buffer(enum debuglevel level, const unsigned char *msg, |
| 36 | size_t len, const unsigned char *buf) | 53 | size_t len, const unsigned char *buf) |
| 37 | { | 54 | { |
| @@ -274,6 +291,20 @@ static void clear_events(struct cardstate *cs) | |||
| 274 | spin_unlock_irqrestore(&cs->ev_lock, flags); | 291 | spin_unlock_irqrestore(&cs->ev_lock, flags); |
| 275 | } | 292 | } |
| 276 | 293 | ||
| 294 | /** | ||
| 295 | * gigaset_add_event() - add event to device event queue | ||
| 296 | * @cs: device descriptor structure. | ||
| 297 | * @at_state: connection state structure. | ||
| 298 | * @type: event type. | ||
| 299 | * @ptr: pointer parameter for event. | ||
| 300 | * @parameter: integer parameter for event. | ||
| 301 | * @arg: pointer parameter for event. | ||
| 302 | * | ||
| 303 | * Allocate an event queue entry from the device's event queue, and set it up | ||
| 304 | * with the parameters given. | ||
| 305 | * | ||
| 306 | * Return value: added event | ||
| 307 | */ | ||
| 277 | struct event_t *gigaset_add_event(struct cardstate *cs, | 308 | struct event_t *gigaset_add_event(struct cardstate *cs, |
| 278 | struct at_state_t *at_state, int type, | 309 | struct at_state_t *at_state, int type, |
| 279 | void *ptr, int parameter, void *arg) | 310 | void *ptr, int parameter, void *arg) |
| @@ -398,6 +429,15 @@ static void make_invalid(struct cardstate *cs, unsigned mask) | |||
| 398 | spin_unlock_irqrestore(&drv->lock, flags); | 429 | spin_unlock_irqrestore(&drv->lock, flags); |
| 399 | } | 430 | } |
| 400 | 431 | ||
| 432 | /** | ||
| 433 | * gigaset_freecs() - free all associated ressources of a device | ||
| 434 | * @cs: device descriptor structure. | ||
| 435 | * | ||
| 436 | * Stops all tasklets and timers, unregisters the device from all | ||
| 437 | * subsystems it was registered to, deallocates the device structure | ||
| 438 | * @cs and all structures referenced from it. | ||
| 439 | * Operations on the device should be stopped before calling this. | ||
| 440 | */ | ||
| 401 | void gigaset_freecs(struct cardstate *cs) | 441 | void gigaset_freecs(struct cardstate *cs) |
| 402 | { | 442 | { |
| 403 | int i; | 443 | int i; |
| @@ -506,7 +546,12 @@ static void gigaset_inbuf_init(struct inbuf_t *inbuf, struct bc_state *bcs, | |||
| 506 | inbuf->inputstate = inputstate; | 546 | inbuf->inputstate = inputstate; |
| 507 | } | 547 | } |
| 508 | 548 | ||
| 509 | /* append received bytes to inbuf */ | 549 | /** |
| 550 | * gigaset_fill_inbuf() - append received data to input buffer | ||
| 551 | * @inbuf: buffer structure. | ||
| 552 | * @src: received data. | ||
| 553 | * @numbytes: number of bytes received. | ||
| 554 | */ | ||
| 510 | int gigaset_fill_inbuf(struct inbuf_t *inbuf, const unsigned char *src, | 555 | int gigaset_fill_inbuf(struct inbuf_t *inbuf, const unsigned char *src, |
| 511 | unsigned numbytes) | 556 | unsigned numbytes) |
| 512 | { | 557 | { |
| @@ -606,20 +651,22 @@ static struct bc_state *gigaset_initbcs(struct bc_state *bcs, | |||
| 606 | return NULL; | 651 | return NULL; |
| 607 | } | 652 | } |
| 608 | 653 | ||
| 609 | /* gigaset_initcs | 654 | /** |
| 655 | * gigaset_initcs() - initialize device structure | ||
| 656 | * @drv: hardware driver the device belongs to | ||
| 657 | * @channels: number of B channels supported by device | ||
| 658 | * @onechannel: !=0 if B channel data and AT commands share one | ||
| 659 | * communication channel (M10x), | ||
| 660 | * ==0 if B channels have separate communication channels (base) | ||
| 661 | * @ignoreframes: number of frames to ignore after setting up B channel | ||
| 662 | * @cidmode: !=0: start in CallID mode | ||
| 663 | * @modulename: name of driver module for LL registration | ||
| 664 | * | ||
| 610 | * Allocate and initialize cardstate structure for Gigaset driver | 665 | * Allocate and initialize cardstate structure for Gigaset driver |
| 611 | * Calls hardware dependent gigaset_initcshw() function | 666 | * Calls hardware dependent gigaset_initcshw() function |
| 612 | * Calls B channel initialization function gigaset_initbcs() for each B channel | 667 | * Calls B channel initialization function gigaset_initbcs() for each B channel |
| 613 | * parameters: | 668 | * |
| 614 | * drv hardware driver the device belongs to | 669 | * Return value: |
| 615 | * channels number of B channels supported by device | ||
| 616 | * onechannel !=0: B channel data and AT commands share one | ||
| 617 | * communication channel | ||
| 618 | * ==0: B channels have separate communication channels | ||
| 619 | * ignoreframes number of frames to ignore after setting up B channel | ||
| 620 | * cidmode !=0: start in CallID mode | ||
| 621 | * modulename name of driver module (used for I4L registration) | ||
| 622 | * return value: | ||
| 623 | * pointer to cardstate structure | 670 | * pointer to cardstate structure |
| 624 | */ | 671 | */ |
| 625 | struct cardstate *gigaset_initcs(struct gigaset_driver *drv, int channels, | 672 | struct cardstate *gigaset_initcs(struct gigaset_driver *drv, int channels, |
| @@ -837,6 +884,17 @@ static void cleanup_cs(struct cardstate *cs) | |||
| 837 | } | 884 | } |
| 838 | 885 | ||
| 839 | 886 | ||
| 887 | /** | ||
| 888 | * gigaset_start() - start device operations | ||
| 889 | * @cs: device descriptor structure. | ||
| 890 | * | ||
| 891 | * Prepares the device for use by setting up communication parameters, | ||
| 892 | * scheduling an EV_START event to initiate device initialization, and | ||
| 893 | * waiting for completion of the initialization. | ||
| 894 | * | ||
| 895 | * Return value: | ||
| 896 | * 1 - success, 0 - error | ||
| 897 | */ | ||
| 840 | int gigaset_start(struct cardstate *cs) | 898 | int gigaset_start(struct cardstate *cs) |
| 841 | { | 899 | { |
| 842 | unsigned long flags; | 900 | unsigned long flags; |
| @@ -879,9 +937,15 @@ error: | |||
| 879 | } | 937 | } |
| 880 | EXPORT_SYMBOL_GPL(gigaset_start); | 938 | EXPORT_SYMBOL_GPL(gigaset_start); |
| 881 | 939 | ||
| 882 | /* gigaset_shutdown | 940 | /** |
| 883 | * check if a device is associated to the cardstate structure and stop it | 941 | * gigaset_shutdown() - shut down device operations |
| 884 | * return value: 0 if ok, -1 if no device was associated | 942 | * @cs: device descriptor structure. |
| 943 | * | ||
| 944 | * Deactivates the device by scheduling an EV_SHUTDOWN event and | ||
| 945 | * waiting for completion of the shutdown. | ||
| 946 | * | ||
| 947 | * Return value: | ||
| 948 | * 0 - success, -1 - error (no device associated) | ||
| 885 | */ | 949 | */ |
| 886 | int gigaset_shutdown(struct cardstate *cs) | 950 | int gigaset_shutdown(struct cardstate *cs) |
| 887 | { | 951 | { |
| @@ -912,6 +976,13 @@ exit: | |||
| 912 | } | 976 | } |
| 913 | EXPORT_SYMBOL_GPL(gigaset_shutdown); | 977 | EXPORT_SYMBOL_GPL(gigaset_shutdown); |
| 914 | 978 | ||
| 979 | /** | ||
| 980 | * gigaset_stop() - stop device operations | ||
| 981 | * @cs: device descriptor structure. | ||
| 982 | * | ||
| 983 | * Stops operations on the device by scheduling an EV_STOP event and | ||
| 984 | * waiting for completion of the shutdown. | ||
| 985 | */ | ||
| 915 | void gigaset_stop(struct cardstate *cs) | 986 | void gigaset_stop(struct cardstate *cs) |
| 916 | { | 987 | { |
| 917 | mutex_lock(&cs->mutex); | 988 | mutex_lock(&cs->mutex); |
| @@ -1020,6 +1091,14 @@ struct cardstate *gigaset_get_cs_by_tty(struct tty_struct *tty) | |||
| 1020 | return gigaset_get_cs_by_minor(tty->index + tty->driver->minor_start); | 1091 | return gigaset_get_cs_by_minor(tty->index + tty->driver->minor_start); |
| 1021 | } | 1092 | } |
| 1022 | 1093 | ||
| 1094 | /** | ||
| 1095 | * gigaset_freedriver() - free all associated ressources of a driver | ||
| 1096 | * @drv: driver descriptor structure. | ||
| 1097 | * | ||
| 1098 | * Unregisters the driver from the system and deallocates the driver | ||
| 1099 | * structure @drv and all structures referenced from it. | ||
| 1100 | * All devices should be shut down before calling this. | ||
| 1101 | */ | ||
| 1023 | void gigaset_freedriver(struct gigaset_driver *drv) | 1102 | void gigaset_freedriver(struct gigaset_driver *drv) |
| 1024 | { | 1103 | { |
| 1025 | unsigned long flags; | 1104 | unsigned long flags; |
| @@ -1035,14 +1114,16 @@ void gigaset_freedriver(struct gigaset_driver *drv) | |||
| 1035 | } | 1114 | } |
| 1036 | EXPORT_SYMBOL_GPL(gigaset_freedriver); | 1115 | EXPORT_SYMBOL_GPL(gigaset_freedriver); |
| 1037 | 1116 | ||
| 1038 | /* gigaset_initdriver | 1117 | /** |
| 1118 | * gigaset_initdriver() - initialize driver structure | ||
| 1119 | * @minor: First minor number | ||
| 1120 | * @minors: Number of minors this driver can handle | ||
| 1121 | * @procname: Name of the driver | ||
| 1122 | * @devname: Name of the device files (prefix without minor number) | ||
| 1123 | * | ||
| 1039 | * Allocate and initialize gigaset_driver structure. Initialize interface. | 1124 | * Allocate and initialize gigaset_driver structure. Initialize interface. |
| 1040 | * parameters: | 1125 | * |
| 1041 | * minor First minor number | 1126 | * Return value: |
| 1042 | * minors Number of minors this driver can handle | ||
| 1043 | * procname Name of the driver | ||
| 1044 | * devname Name of the device files (prefix without minor number) | ||
| 1045 | * return value: | ||
| 1046 | * Pointer to the gigaset_driver structure on success, NULL on failure. | 1127 | * Pointer to the gigaset_driver structure on success, NULL on failure. |
| 1047 | */ | 1128 | */ |
| 1048 | struct gigaset_driver *gigaset_initdriver(unsigned minor, unsigned minors, | 1129 | struct gigaset_driver *gigaset_initdriver(unsigned minor, unsigned minors, |
| @@ -1095,6 +1176,13 @@ error: | |||
| 1095 | } | 1176 | } |
| 1096 | EXPORT_SYMBOL_GPL(gigaset_initdriver); | 1177 | EXPORT_SYMBOL_GPL(gigaset_initdriver); |
| 1097 | 1178 | ||
| 1179 | /** | ||
| 1180 | * gigaset_blockdriver() - block driver | ||
| 1181 | * @drv: driver descriptor structure. | ||
| 1182 | * | ||
| 1183 | * Prevents the driver from attaching new devices, in preparation for | ||
| 1184 | * deregistration. | ||
| 1185 | */ | ||
| 1098 | void gigaset_blockdriver(struct gigaset_driver *drv) | 1186 | void gigaset_blockdriver(struct gigaset_driver *drv) |
| 1099 | { | 1187 | { |
| 1100 | drv->blocked = 1; | 1188 | drv->blocked = 1; |
| @@ -1110,7 +1198,7 @@ static int __init gigaset_init_module(void) | |||
| 1110 | if (gigaset_debuglevel == 1) | 1198 | if (gigaset_debuglevel == 1) |
| 1111 | gigaset_debuglevel = DEBUG_DEFAULT; | 1199 | gigaset_debuglevel = DEBUG_DEFAULT; |
| 1112 | 1200 | ||
| 1113 | pr_info(DRIVER_DESC "\n"); | 1201 | pr_info(DRIVER_DESC DRIVER_DESC_DEBUG "\n"); |
| 1114 | return 0; | 1202 | return 0; |
| 1115 | } | 1203 | } |
| 1116 | 1204 | ||
diff --git a/drivers/isdn/gigaset/ev-layer.c b/drivers/isdn/gigaset/ev-layer.c index 2d91049571a4..cc768caa38f5 100644 --- a/drivers/isdn/gigaset/ev-layer.c +++ b/drivers/isdn/gigaset/ev-layer.c | |||
| @@ -207,7 +207,6 @@ struct reply_t gigaset_tab_nocid[] = | |||
| 207 | /* leave dle mode */ | 207 | /* leave dle mode */ |
| 208 | {RSP_INIT, 0, 0,SEQ_DLE0, 201, 5, {0}, "^SDLE=0\r"}, | 208 | {RSP_INIT, 0, 0,SEQ_DLE0, 201, 5, {0}, "^SDLE=0\r"}, |
| 209 | {RSP_OK, 201,201, -1, 202,-1}, | 209 | {RSP_OK, 201,201, -1, 202,-1}, |
| 210 | //{RSP_ZDLE, 202,202, 0, 202, 0, {ACT_ERROR}},//DELETE | ||
| 211 | {RSP_ZDLE, 202,202, 0, 0, 0, {ACT_DLE0}}, | 210 | {RSP_ZDLE, 202,202, 0, 0, 0, {ACT_DLE0}}, |
| 212 | {RSP_NODEV, 200,249, -1, 0, 0, {ACT_FAKEDLE0}}, | 211 | {RSP_NODEV, 200,249, -1, 0, 0, {ACT_FAKEDLE0}}, |
| 213 | {RSP_ERROR, 200,249, -1, 0, 0, {ACT_FAILDLE0}}, | 212 | {RSP_ERROR, 200,249, -1, 0, 0, {ACT_FAILDLE0}}, |
| @@ -265,6 +264,7 @@ struct reply_t gigaset_tab_nocid[] = | |||
| 265 | {EV_SHUTDOWN, -1, -1, -1, -1,-1, {ACT_SHUTDOWN}}, //FIXME | 264 | {EV_SHUTDOWN, -1, -1, -1, -1,-1, {ACT_SHUTDOWN}}, //FIXME |
| 266 | 265 | ||
| 267 | /* misc. */ | 266 | /* misc. */ |
| 267 | {RSP_ERROR, -1, -1, -1, -1, -1, {ACT_ERROR} }, | ||
| 268 | {RSP_EMPTY, -1, -1, -1, -1,-1, {ACT_DEBUG}}, //FIXME | 268 | {RSP_EMPTY, -1, -1, -1, -1,-1, {ACT_DEBUG}}, //FIXME |
| 269 | {RSP_ZCFGT, -1, -1, -1, -1,-1, {ACT_DEBUG}}, //FIXME | 269 | {RSP_ZCFGT, -1, -1, -1, -1,-1, {ACT_DEBUG}}, //FIXME |
| 270 | {RSP_ZCFG, -1, -1, -1, -1,-1, {ACT_DEBUG}}, //FIXME | 270 | {RSP_ZCFG, -1, -1, -1, -1,-1, {ACT_DEBUG}}, //FIXME |
| @@ -328,10 +328,9 @@ struct reply_t gigaset_tab_cid[] = | |||
| 328 | {RSP_INIT, -1, -1,SEQ_HUP, 401, 5, {0}, "+VLS=0\r"}, /* hang up */ //-1,-1? | 328 | {RSP_INIT, -1, -1,SEQ_HUP, 401, 5, {0}, "+VLS=0\r"}, /* hang up */ //-1,-1? |
| 329 | {RSP_OK, 401,401, -1, 402, 5}, | 329 | {RSP_OK, 401,401, -1, 402, 5}, |
| 330 | {RSP_ZVLS, 402,402, 0, 403, 5}, | 330 | {RSP_ZVLS, 402,402, 0, 403, 5}, |
| 331 | {RSP_ZSAU, 403,403,ZSAU_DISCONNECT_REQ, -1,-1, {ACT_DEBUG}}, /* if not remote hup */ | 331 | {RSP_ZSAU, 403, 403, ZSAU_DISCONNECT_REQ, -1, -1, {ACT_DEBUG} }, |
| 332 | //{RSP_ZSAU, 403,403,ZSAU_NULL, 401, 0, {ACT_ERROR}}, //DELETE//FIXME -> DLE0 // should we do this _before_ hanging up for base driver? | 332 | {RSP_ZSAU, 403, 403, ZSAU_NULL, 0, 0, {ACT_DISCONNECT} }, |
| 333 | {RSP_ZSAU, 403,403,ZSAU_NULL, 0, 0, {ACT_DISCONNECT}}, //FIXME -> DLE0 // should we do this _before_ hanging up for base driver? | 333 | {RSP_NODEV, 401, 403, -1, 0, 0, {ACT_FAKEHUP} }, |
| 334 | {RSP_NODEV, 401,403, -1, 0, 0, {ACT_FAKEHUP}}, //FIXME -> DLE0 // should we do this _before_ hanging up for base driver? | ||
| 335 | {RSP_ERROR, 401,401, -1, 0, 0, {ACT_ABORTHUP}}, | 334 | {RSP_ERROR, 401,401, -1, 0, 0, {ACT_ABORTHUP}}, |
| 336 | {EV_TIMEOUT, 401,403, -1, 0, 0, {ACT_ABORTHUP}}, | 335 | {EV_TIMEOUT, 401,403, -1, 0, 0, {ACT_ABORTHUP}}, |
| 337 | 336 | ||
| @@ -474,8 +473,13 @@ static int cid_of_response(char *s) | |||
| 474 | //FIXME is ;<digit>+ at end of non-CID response really impossible? | 473 | //FIXME is ;<digit>+ at end of non-CID response really impossible? |
| 475 | } | 474 | } |
| 476 | 475 | ||
| 477 | /* This function will be called via task queue from the callback handler. | 476 | /** |
| 478 | * We received a modem response and have to handle it.. | 477 | * gigaset_handle_modem_response() - process received modem response |
| 478 | * @cs: device descriptor structure. | ||
| 479 | * | ||
| 480 | * Called by asyncdata/isocdata if a block of data received from the | ||
| 481 | * device must be processed as a modem command response. The data is | ||
| 482 | * already in the cs structure. | ||
| 479 | */ | 483 | */ |
| 480 | void gigaset_handle_modem_response(struct cardstate *cs) | 484 | void gigaset_handle_modem_response(struct cardstate *cs) |
| 481 | { | 485 | { |
| @@ -707,6 +711,11 @@ static void disconnect(struct at_state_t **at_state_p) | |||
| 707 | if (bcs) { | 711 | if (bcs) { |
| 708 | /* B channel assigned: invoke hardware specific handler */ | 712 | /* B channel assigned: invoke hardware specific handler */ |
| 709 | cs->ops->close_bchannel(bcs); | 713 | cs->ops->close_bchannel(bcs); |
| 714 | /* notify LL */ | ||
| 715 | if (bcs->chstate & (CHS_D_UP | CHS_NOTIFY_LL)) { | ||
| 716 | bcs->chstate &= ~(CHS_D_UP | CHS_NOTIFY_LL); | ||
| 717 | gigaset_i4l_channel_cmd(bcs, ISDN_STAT_DHUP); | ||
| 718 | } | ||
| 710 | } else { | 719 | } else { |
| 711 | /* no B channel assigned: just deallocate */ | 720 | /* no B channel assigned: just deallocate */ |
| 712 | spin_lock_irqsave(&cs->lock, flags); | 721 | spin_lock_irqsave(&cs->lock, flags); |
| @@ -1429,11 +1438,12 @@ static void do_action(int action, struct cardstate *cs, | |||
| 1429 | cs->gotfwver = -1; | 1438 | cs->gotfwver = -1; |
| 1430 | dev_err(cs->dev, "could not read firmware version.\n"); | 1439 | dev_err(cs->dev, "could not read firmware version.\n"); |
| 1431 | break; | 1440 | break; |
| 1432 | #ifdef CONFIG_GIGASET_DEBUG | ||
| 1433 | case ACT_ERROR: | 1441 | case ACT_ERROR: |
| 1434 | *p_genresp = 1; | 1442 | gig_dbg(DEBUG_ANY, "%s: ERROR response in ConState %d", |
| 1435 | *p_resp_code = RSP_ERROR; | 1443 | __func__, at_state->ConState); |
| 1444 | cs->cur_at_seq = SEQ_NONE; | ||
| 1436 | break; | 1445 | break; |
| 1446 | #ifdef CONFIG_GIGASET_DEBUG | ||
| 1437 | case ACT_TEST: | 1447 | case ACT_TEST: |
| 1438 | { | 1448 | { |
| 1439 | static int count = 3; //2; //1; | 1449 | static int count = 3; //2; //1; |
diff --git a/drivers/isdn/gigaset/i4l.c b/drivers/isdn/gigaset/i4l.c index 9b22f9cf2f33..654489d836cd 100644 --- a/drivers/isdn/gigaset/i4l.c +++ b/drivers/isdn/gigaset/i4l.c | |||
| @@ -51,6 +51,12 @@ static int writebuf_from_LL(int driverID, int channel, int ack, | |||
| 51 | return -ENODEV; | 51 | return -ENODEV; |
| 52 | } | 52 | } |
| 53 | bcs = &cs->bcs[channel]; | 53 | bcs = &cs->bcs[channel]; |
| 54 | |||
| 55 | /* can only handle linear sk_buffs */ | ||
| 56 | if (skb_linearize(skb) < 0) { | ||
| 57 | dev_err(cs->dev, "%s: skb_linearize failed\n", __func__); | ||
| 58 | return -ENOMEM; | ||
| 59 | } | ||
| 54 | len = skb->len; | 60 | len = skb->len; |
| 55 | 61 | ||
| 56 | gig_dbg(DEBUG_LLDATA, | 62 | gig_dbg(DEBUG_LLDATA, |
| @@ -79,6 +85,14 @@ static int writebuf_from_LL(int driverID, int channel, int ack, | |||
| 79 | return cs->ops->send_skb(bcs, skb); | 85 | return cs->ops->send_skb(bcs, skb); |
| 80 | } | 86 | } |
| 81 | 87 | ||
| 88 | /** | ||
| 89 | * gigaset_skb_sent() - acknowledge sending an skb | ||
| 90 | * @bcs: B channel descriptor structure. | ||
| 91 | * @skb: sent data. | ||
| 92 | * | ||
| 93 | * Called by hardware module {bas,ser,usb}_gigaset when the data in a | ||
| 94 | * skb has been successfully sent, for signalling completion to the LL. | ||
| 95 | */ | ||
| 82 | void gigaset_skb_sent(struct bc_state *bcs, struct sk_buff *skb) | 96 | void gigaset_skb_sent(struct bc_state *bcs, struct sk_buff *skb) |
| 83 | { | 97 | { |
| 84 | unsigned len; | 98 | unsigned len; |
| @@ -455,6 +469,15 @@ int gigaset_isdn_setup_accept(struct at_state_t *at_state) | |||
| 455 | return 0; | 469 | return 0; |
| 456 | } | 470 | } |
| 457 | 471 | ||
| 472 | /** | ||
| 473 | * gigaset_isdn_icall() - signal incoming call | ||
| 474 | * @at_state: connection state structure. | ||
| 475 | * | ||
| 476 | * Called by main module to notify the LL that an incoming call has been | ||
| 477 | * received. @at_state contains the parameters of the call. | ||
| 478 | * | ||
| 479 | * Return value: call disposition (ICALL_*) | ||
| 480 | */ | ||
| 458 | int gigaset_isdn_icall(struct at_state_t *at_state) | 481 | int gigaset_isdn_icall(struct at_state_t *at_state) |
| 459 | { | 482 | { |
| 460 | struct cardstate *cs = at_state->cs; | 483 | struct cardstate *cs = at_state->cs; |
diff --git a/drivers/isdn/gigaset/interface.c b/drivers/isdn/gigaset/interface.c index f33ac27de643..6a8e1384e7bd 100644 --- a/drivers/isdn/gigaset/interface.c +++ b/drivers/isdn/gigaset/interface.c | |||
| @@ -616,6 +616,15 @@ void gigaset_if_free(struct cardstate *cs) | |||
| 616 | tty_unregister_device(drv->tty, cs->minor_index); | 616 | tty_unregister_device(drv->tty, cs->minor_index); |
| 617 | } | 617 | } |
| 618 | 618 | ||
| 619 | /** | ||
| 620 | * gigaset_if_receive() - pass a received block of data to the tty device | ||
| 621 | * @cs: device descriptor structure. | ||
| 622 | * @buffer: received data. | ||
| 623 | * @len: number of bytes received. | ||
| 624 | * | ||
| 625 | * Called by asyncdata/isocdata if a block of data received from the | ||
| 626 | * device must be sent to userspace through the ttyG* device. | ||
| 627 | */ | ||
| 619 | void gigaset_if_receive(struct cardstate *cs, | 628 | void gigaset_if_receive(struct cardstate *cs, |
| 620 | unsigned char *buffer, size_t len) | 629 | unsigned char *buffer, size_t len) |
| 621 | { | 630 | { |
diff --git a/drivers/isdn/gigaset/isocdata.c b/drivers/isdn/gigaset/isocdata.c index bed38fcc432b..9f3ef7b4248c 100644 --- a/drivers/isdn/gigaset/isocdata.c +++ b/drivers/isdn/gigaset/isocdata.c | |||
| @@ -429,7 +429,7 @@ static inline int hdlc_buildframe(struct isowbuf_t *iwb, | |||
| 429 | return -EAGAIN; | 429 | return -EAGAIN; |
| 430 | } | 430 | } |
| 431 | 431 | ||
| 432 | dump_bytes(DEBUG_STREAM, "snd data", in, count); | 432 | dump_bytes(DEBUG_STREAM_DUMP, "snd data", in, count); |
| 433 | 433 | ||
| 434 | /* bitstuff and checksum input data */ | 434 | /* bitstuff and checksum input data */ |
| 435 | fcs = PPP_INITFCS; | 435 | fcs = PPP_INITFCS; |
| @@ -448,7 +448,6 @@ static inline int hdlc_buildframe(struct isowbuf_t *iwb, | |||
| 448 | /* put closing flag and repeat byte for flag idle */ | 448 | /* put closing flag and repeat byte for flag idle */ |
| 449 | isowbuf_putflag(iwb); | 449 | isowbuf_putflag(iwb); |
| 450 | end = isowbuf_donewrite(iwb); | 450 | end = isowbuf_donewrite(iwb); |
| 451 | dump_bytes(DEBUG_STREAM_DUMP, "isowbuf", iwb->data, end + 1); | ||
| 452 | return end; | 451 | return end; |
| 453 | } | 452 | } |
| 454 | 453 | ||
| @@ -482,6 +481,8 @@ static inline int trans_buildframe(struct isowbuf_t *iwb, | |||
| 482 | } | 481 | } |
| 483 | 482 | ||
| 484 | gig_dbg(DEBUG_STREAM, "put %d bytes", count); | 483 | gig_dbg(DEBUG_STREAM, "put %d bytes", count); |
| 484 | dump_bytes(DEBUG_STREAM_DUMP, "snd data", in, count); | ||
| 485 | |||
| 485 | write = iwb->write; | 486 | write = iwb->write; |
| 486 | do { | 487 | do { |
| 487 | c = bitrev8(*in++); | 488 | c = bitrev8(*in++); |
| @@ -583,7 +584,7 @@ static inline void hdlc_done(struct bc_state *bcs) | |||
| 583 | procskb->tail -= 2; | 584 | procskb->tail -= 2; |
| 584 | gig_dbg(DEBUG_ISO, "%s: good frame (%d octets)", | 585 | gig_dbg(DEBUG_ISO, "%s: good frame (%d octets)", |
| 585 | __func__, procskb->len); | 586 | __func__, procskb->len); |
| 586 | dump_bytes(DEBUG_STREAM, | 587 | dump_bytes(DEBUG_STREAM_DUMP, |
| 587 | "rcv data", procskb->data, procskb->len); | 588 | "rcv data", procskb->data, procskb->len); |
| 588 | bcs->hw.bas->goodbytes += procskb->len; | 589 | bcs->hw.bas->goodbytes += procskb->len; |
| 589 | gigaset_rcv_skb(procskb, bcs->cs, bcs); | 590 | gigaset_rcv_skb(procskb, bcs->cs, bcs); |
| @@ -878,6 +879,8 @@ static inline void trans_receive(unsigned char *src, unsigned count, | |||
| 878 | dobytes--; | 879 | dobytes--; |
| 879 | } | 880 | } |
| 880 | if (dobytes == 0) { | 881 | if (dobytes == 0) { |
| 882 | dump_bytes(DEBUG_STREAM_DUMP, | ||
| 883 | "rcv data", skb->data, skb->len); | ||
| 881 | gigaset_rcv_skb(skb, bcs->cs, bcs); | 884 | gigaset_rcv_skb(skb, bcs->cs, bcs); |
| 882 | bcs->skb = skb = dev_alloc_skb(SBUFSIZE + HW_HDR_LEN); | 885 | bcs->skb = skb = dev_alloc_skb(SBUFSIZE + HW_HDR_LEN); |
| 883 | if (!skb) { | 886 | if (!skb) { |
| @@ -973,16 +976,17 @@ void gigaset_isoc_input(struct inbuf_t *inbuf) | |||
| 973 | 976 | ||
| 974 | /* == data output ========================================================== */ | 977 | /* == data output ========================================================== */ |
| 975 | 978 | ||
| 976 | /* gigaset_send_skb | 979 | /** |
| 977 | * called by common.c to queue an skb for sending | 980 | * gigaset_isoc_send_skb() - queue an skb for sending |
| 978 | * and start transmission if necessary | 981 | * @bcs: B channel descriptor structure. |
| 979 | * parameters: | 982 | * @skb: data to send. |
| 980 | * B Channel control structure | 983 | * |
| 981 | * skb | 984 | * Called by i4l.c to queue an skb for sending, and start transmission if |
| 982 | * return value: | 985 | * necessary. |
| 983 | * number of bytes accepted for sending | 986 | * |
| 984 | * (skb->len if ok, 0 if out of buffer space) | 987 | * Return value: |
| 985 | * or error code (< 0, eg. -EINVAL) | 988 | * number of bytes accepted for sending (skb->len) if ok, |
| 989 | * error code < 0 (eg. -ENODEV) on error | ||
| 986 | */ | 990 | */ |
| 987 | int gigaset_isoc_send_skb(struct bc_state *bcs, struct sk_buff *skb) | 991 | int gigaset_isoc_send_skb(struct bc_state *bcs, struct sk_buff *skb) |
| 988 | { | 992 | { |
diff --git a/drivers/leds/leds-pca9532.c b/drivers/leds/leds-pca9532.c index 708a8017c21d..adc561eb59d2 100644 --- a/drivers/leds/leds-pca9532.c +++ b/drivers/leds/leds-pca9532.c | |||
| @@ -19,9 +19,6 @@ | |||
| 19 | #include <linux/workqueue.h> | 19 | #include <linux/workqueue.h> |
| 20 | #include <linux/leds-pca9532.h> | 20 | #include <linux/leds-pca9532.h> |
| 21 | 21 | ||
| 22 | static const unsigned short normal_i2c[] = { /*0x60,*/ I2C_CLIENT_END}; | ||
| 23 | I2C_CLIENT_INSMOD_1(pca9532); | ||
| 24 | |||
| 25 | #define PCA9532_REG_PSC(i) (0x2+(i)*2) | 22 | #define PCA9532_REG_PSC(i) (0x2+(i)*2) |
| 26 | #define PCA9532_REG_PWM(i) (0x3+(i)*2) | 23 | #define PCA9532_REG_PWM(i) (0x3+(i)*2) |
| 27 | #define PCA9532_REG_LS0 0x6 | 24 | #define PCA9532_REG_LS0 0x6 |
diff --git a/drivers/macintosh/therm_adt746x.c b/drivers/macintosh/therm_adt746x.c index fde377c60cca..556f0feaa4df 100644 --- a/drivers/macintosh/therm_adt746x.c +++ b/drivers/macintosh/therm_adt746x.c | |||
| @@ -124,6 +124,8 @@ read_reg(struct thermostat* th, int reg) | |||
| 124 | return data; | 124 | return data; |
| 125 | } | 125 | } |
| 126 | 126 | ||
| 127 | static struct i2c_driver thermostat_driver; | ||
| 128 | |||
| 127 | static int | 129 | static int |
| 128 | attach_thermostat(struct i2c_adapter *adapter) | 130 | attach_thermostat(struct i2c_adapter *adapter) |
| 129 | { | 131 | { |
| @@ -148,7 +150,7 @@ attach_thermostat(struct i2c_adapter *adapter) | |||
| 148 | * Let i2c-core delete that device on driver removal. | 150 | * Let i2c-core delete that device on driver removal. |
| 149 | * This is safe because i2c-core holds the core_lock mutex for us. | 151 | * This is safe because i2c-core holds the core_lock mutex for us. |
| 150 | */ | 152 | */ |
| 151 | list_add_tail(&client->detected, &client->driver->clients); | 153 | list_add_tail(&client->detected, &thermostat_driver.clients); |
| 152 | return 0; | 154 | return 0; |
| 153 | } | 155 | } |
| 154 | 156 | ||
diff --git a/drivers/macintosh/therm_pm72.c b/drivers/macintosh/therm_pm72.c index a028598af2d3..ea32c7e5a9af 100644 --- a/drivers/macintosh/therm_pm72.c +++ b/drivers/macintosh/therm_pm72.c | |||
| @@ -286,6 +286,8 @@ struct fcu_fan_table fcu_fans[] = { | |||
| 286 | }, | 286 | }, |
| 287 | }; | 287 | }; |
| 288 | 288 | ||
| 289 | static struct i2c_driver therm_pm72_driver; | ||
| 290 | |||
| 289 | /* | 291 | /* |
| 290 | * Utility function to create an i2c_client structure and | 292 | * Utility function to create an i2c_client structure and |
| 291 | * attach it to one of u3 adapters | 293 | * attach it to one of u3 adapters |
| @@ -318,7 +320,7 @@ static struct i2c_client *attach_i2c_chip(int id, const char *name) | |||
| 318 | * Let i2c-core delete that device on driver removal. | 320 | * Let i2c-core delete that device on driver removal. |
| 319 | * This is safe because i2c-core holds the core_lock mutex for us. | 321 | * This is safe because i2c-core holds the core_lock mutex for us. |
| 320 | */ | 322 | */ |
| 321 | list_add_tail(&clt->detected, &clt->driver->clients); | 323 | list_add_tail(&clt->detected, &therm_pm72_driver.clients); |
| 322 | return clt; | 324 | return clt; |
| 323 | } | 325 | } |
| 324 | 326 | ||
diff --git a/drivers/macintosh/windfarm_lm75_sensor.c b/drivers/macintosh/windfarm_lm75_sensor.c index 529886c7a826..ed6426a10773 100644 --- a/drivers/macintosh/windfarm_lm75_sensor.c +++ b/drivers/macintosh/windfarm_lm75_sensor.c | |||
| @@ -115,6 +115,8 @@ static int wf_lm75_probe(struct i2c_client *client, | |||
| 115 | return rc; | 115 | return rc; |
| 116 | } | 116 | } |
| 117 | 117 | ||
| 118 | static struct i2c_driver wf_lm75_driver; | ||
| 119 | |||
| 118 | static struct i2c_client *wf_lm75_create(struct i2c_adapter *adapter, | 120 | static struct i2c_client *wf_lm75_create(struct i2c_adapter *adapter, |
| 119 | u8 addr, int ds1775, | 121 | u8 addr, int ds1775, |
| 120 | const char *loc) | 122 | const char *loc) |
| @@ -157,7 +159,7 @@ static struct i2c_client *wf_lm75_create(struct i2c_adapter *adapter, | |||
| 157 | * Let i2c-core delete that device on driver removal. | 159 | * Let i2c-core delete that device on driver removal. |
| 158 | * This is safe because i2c-core holds the core_lock mutex for us. | 160 | * This is safe because i2c-core holds the core_lock mutex for us. |
| 159 | */ | 161 | */ |
| 160 | list_add_tail(&client->detected, &client->driver->clients); | 162 | list_add_tail(&client->detected, &wf_lm75_driver.clients); |
| 161 | return client; | 163 | return client; |
| 162 | fail: | 164 | fail: |
| 163 | return NULL; | 165 | return NULL; |
diff --git a/drivers/macintosh/windfarm_max6690_sensor.c b/drivers/macintosh/windfarm_max6690_sensor.c index e2a55ecda2b2..a67b349319e9 100644 --- a/drivers/macintosh/windfarm_max6690_sensor.c +++ b/drivers/macintosh/windfarm_max6690_sensor.c | |||
| @@ -88,6 +88,8 @@ static int wf_max6690_probe(struct i2c_client *client, | |||
| 88 | return rc; | 88 | return rc; |
| 89 | } | 89 | } |
| 90 | 90 | ||
| 91 | static struct i2c_driver wf_max6690_driver; | ||
| 92 | |||
| 91 | static struct i2c_client *wf_max6690_create(struct i2c_adapter *adapter, | 93 | static struct i2c_client *wf_max6690_create(struct i2c_adapter *adapter, |
| 92 | u8 addr, const char *loc) | 94 | u8 addr, const char *loc) |
| 93 | { | 95 | { |
| @@ -119,7 +121,7 @@ static struct i2c_client *wf_max6690_create(struct i2c_adapter *adapter, | |||
| 119 | * Let i2c-core delete that device on driver removal. | 121 | * Let i2c-core delete that device on driver removal. |
| 120 | * This is safe because i2c-core holds the core_lock mutex for us. | 122 | * This is safe because i2c-core holds the core_lock mutex for us. |
| 121 | */ | 123 | */ |
| 122 | list_add_tail(&client->detected, &client->driver->clients); | 124 | list_add_tail(&client->detected, &wf_max6690_driver.clients); |
| 123 | return client; | 125 | return client; |
| 124 | 126 | ||
| 125 | fail: | 127 | fail: |
diff --git a/drivers/macintosh/windfarm_smu_sat.c b/drivers/macintosh/windfarm_smu_sat.c index 5da729e58f99..e20330a28959 100644 --- a/drivers/macintosh/windfarm_smu_sat.c +++ b/drivers/macintosh/windfarm_smu_sat.c | |||
| @@ -194,6 +194,8 @@ static struct wf_sensor_ops wf_sat_ops = { | |||
| 194 | .owner = THIS_MODULE, | 194 | .owner = THIS_MODULE, |
| 195 | }; | 195 | }; |
| 196 | 196 | ||
| 197 | static struct i2c_driver wf_sat_driver; | ||
| 198 | |||
| 197 | static void wf_sat_create(struct i2c_adapter *adapter, struct device_node *dev) | 199 | static void wf_sat_create(struct i2c_adapter *adapter, struct device_node *dev) |
| 198 | { | 200 | { |
| 199 | struct i2c_board_info info; | 201 | struct i2c_board_info info; |
| @@ -222,7 +224,7 @@ static void wf_sat_create(struct i2c_adapter *adapter, struct device_node *dev) | |||
| 222 | * Let i2c-core delete that device on driver removal. | 224 | * Let i2c-core delete that device on driver removal. |
| 223 | * This is safe because i2c-core holds the core_lock mutex for us. | 225 | * This is safe because i2c-core holds the core_lock mutex for us. |
| 224 | */ | 226 | */ |
| 225 | list_add_tail(&client->detected, &client->driver->clients); | 227 | list_add_tail(&client->detected, &wf_sat_driver.clients); |
| 226 | } | 228 | } |
| 227 | 229 | ||
| 228 | static int wf_sat_probe(struct i2c_client *client, | 230 | static int wf_sat_probe(struct i2c_client *client, |
diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 376f1ab48a24..23e76fe0d359 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c | |||
| @@ -130,7 +130,7 @@ struct mapped_device { | |||
| 130 | /* | 130 | /* |
| 131 | * A list of ios that arrived while we were suspended. | 131 | * A list of ios that arrived while we were suspended. |
| 132 | */ | 132 | */ |
| 133 | atomic_t pending[2]; | 133 | atomic_t pending; |
| 134 | wait_queue_head_t wait; | 134 | wait_queue_head_t wait; |
| 135 | struct work_struct work; | 135 | struct work_struct work; |
| 136 | struct bio_list deferred; | 136 | struct bio_list deferred; |
| @@ -453,14 +453,13 @@ static void start_io_acct(struct dm_io *io) | |||
| 453 | { | 453 | { |
| 454 | struct mapped_device *md = io->md; | 454 | struct mapped_device *md = io->md; |
| 455 | int cpu; | 455 | int cpu; |
| 456 | int rw = bio_data_dir(io->bio); | ||
| 457 | 456 | ||
| 458 | io->start_time = jiffies; | 457 | io->start_time = jiffies; |
| 459 | 458 | ||
| 460 | cpu = part_stat_lock(); | 459 | cpu = part_stat_lock(); |
| 461 | part_round_stats(cpu, &dm_disk(md)->part0); | 460 | part_round_stats(cpu, &dm_disk(md)->part0); |
| 462 | part_stat_unlock(); | 461 | part_stat_unlock(); |
| 463 | dm_disk(md)->part0.in_flight[rw] = atomic_inc_return(&md->pending[rw]); | 462 | dm_disk(md)->part0.in_flight = atomic_inc_return(&md->pending); |
| 464 | } | 463 | } |
| 465 | 464 | ||
| 466 | static void end_io_acct(struct dm_io *io) | 465 | static void end_io_acct(struct dm_io *io) |
| @@ -480,9 +479,8 @@ static void end_io_acct(struct dm_io *io) | |||
| 480 | * After this is decremented the bio must not be touched if it is | 479 | * After this is decremented the bio must not be touched if it is |
| 481 | * a barrier. | 480 | * a barrier. |
| 482 | */ | 481 | */ |
| 483 | dm_disk(md)->part0.in_flight[rw] = pending = | 482 | dm_disk(md)->part0.in_flight = pending = |
| 484 | atomic_dec_return(&md->pending[rw]); | 483 | atomic_dec_return(&md->pending); |
| 485 | pending += atomic_read(&md->pending[rw^0x1]); | ||
| 486 | 484 | ||
| 487 | /* nudge anyone waiting on suspend queue */ | 485 | /* nudge anyone waiting on suspend queue */ |
| 488 | if (!pending) | 486 | if (!pending) |
| @@ -1787,8 +1785,7 @@ static struct mapped_device *alloc_dev(int minor) | |||
| 1787 | if (!md->disk) | 1785 | if (!md->disk) |
| 1788 | goto bad_disk; | 1786 | goto bad_disk; |
| 1789 | 1787 | ||
| 1790 | atomic_set(&md->pending[0], 0); | 1788 | atomic_set(&md->pending, 0); |
| 1791 | atomic_set(&md->pending[1], 0); | ||
| 1792 | init_waitqueue_head(&md->wait); | 1789 | init_waitqueue_head(&md->wait); |
| 1793 | INIT_WORK(&md->work, dm_wq_work); | 1790 | INIT_WORK(&md->work, dm_wq_work); |
| 1794 | init_waitqueue_head(&md->eventq); | 1791 | init_waitqueue_head(&md->eventq); |
| @@ -2091,8 +2088,7 @@ static int dm_wait_for_completion(struct mapped_device *md, int interruptible) | |||
| 2091 | break; | 2088 | break; |
| 2092 | } | 2089 | } |
| 2093 | spin_unlock_irqrestore(q->queue_lock, flags); | 2090 | spin_unlock_irqrestore(q->queue_lock, flags); |
| 2094 | } else if (!atomic_read(&md->pending[0]) && | 2091 | } else if (!atomic_read(&md->pending)) |
| 2095 | !atomic_read(&md->pending[1])) | ||
| 2096 | break; | 2092 | break; |
| 2097 | 2093 | ||
| 2098 | if (interruptible == TASK_INTERRUPTIBLE && | 2094 | if (interruptible == TASK_INTERRUPTIBLE && |
diff --git a/drivers/media/dvb/dvb-core/dmxdev.c b/drivers/media/dvb/dvb-core/dmxdev.c index 516414983593..c37790ad92d0 100644 --- a/drivers/media/dvb/dvb-core/dmxdev.c +++ b/drivers/media/dvb/dvb-core/dmxdev.c | |||
| @@ -20,6 +20,7 @@ | |||
| 20 | * | 20 | * |
| 21 | */ | 21 | */ |
| 22 | 22 | ||
| 23 | #include <linux/sched.h> | ||
| 23 | #include <linux/spinlock.h> | 24 | #include <linux/spinlock.h> |
| 24 | #include <linux/slab.h> | 25 | #include <linux/slab.h> |
| 25 | #include <linux/vmalloc.h> | 26 | #include <linux/vmalloc.h> |
diff --git a/drivers/media/dvb/dvb-core/dvb_demux.c b/drivers/media/dvb/dvb-core/dvb_demux.c index eef6d3616626..91c537bca8ad 100644 --- a/drivers/media/dvb/dvb-core/dvb_demux.c +++ b/drivers/media/dvb/dvb-core/dvb_demux.c | |||
| @@ -21,6 +21,7 @@ | |||
| 21 | * | 21 | * |
| 22 | */ | 22 | */ |
| 23 | 23 | ||
| 24 | #include <linux/sched.h> | ||
| 24 | #include <linux/spinlock.h> | 25 | #include <linux/spinlock.h> |
| 25 | #include <linux/slab.h> | 26 | #include <linux/slab.h> |
| 26 | #include <linux/vmalloc.h> | 27 | #include <linux/vmalloc.h> |
diff --git a/drivers/media/radio/radio-cadet.c b/drivers/media/radio/radio-cadet.c index 8b1440136c45..482d0f3be5ff 100644 --- a/drivers/media/radio/radio-cadet.c +++ b/drivers/media/radio/radio-cadet.c | |||
| @@ -38,6 +38,7 @@ | |||
| 38 | #include <linux/videodev2.h> /* V4L2 API defs */ | 38 | #include <linux/videodev2.h> /* V4L2 API defs */ |
| 39 | #include <linux/param.h> | 39 | #include <linux/param.h> |
| 40 | #include <linux/pnp.h> | 40 | #include <linux/pnp.h> |
| 41 | #include <linux/sched.h> | ||
| 41 | #include <linux/io.h> /* outb, outb_p */ | 42 | #include <linux/io.h> /* outb, outb_p */ |
| 42 | #include <media/v4l2-device.h> | 43 | #include <media/v4l2-device.h> |
| 43 | #include <media/v4l2-ioctl.h> | 44 | #include <media/v4l2-ioctl.h> |
diff --git a/drivers/media/video/cpia.c b/drivers/media/video/cpia.c index 43ab0adf3b61..2377313c041a 100644 --- a/drivers/media/video/cpia.c +++ b/drivers/media/video/cpia.c | |||
| @@ -31,6 +31,7 @@ | |||
| 31 | #include <linux/init.h> | 31 | #include <linux/init.h> |
| 32 | #include <linux/fs.h> | 32 | #include <linux/fs.h> |
| 33 | #include <linux/vmalloc.h> | 33 | #include <linux/vmalloc.h> |
| 34 | #include <linux/sched.h> | ||
| 34 | #include <linux/slab.h> | 35 | #include <linux/slab.h> |
| 35 | #include <linux/proc_fs.h> | 36 | #include <linux/proc_fs.h> |
| 36 | #include <linux/ctype.h> | 37 | #include <linux/ctype.h> |
diff --git a/drivers/mfd/ab3100-core.c b/drivers/mfd/ab3100-core.c index 5447da16a170..613481028272 100644 --- a/drivers/mfd/ab3100-core.c +++ b/drivers/mfd/ab3100-core.c | |||
| @@ -57,8 +57,6 @@ | |||
| 57 | * The AB3100 is usually assigned address 0x48 (7-bit) | 57 | * The AB3100 is usually assigned address 0x48 (7-bit) |
| 58 | * The chip is defined in the platform i2c_board_data section. | 58 | * The chip is defined in the platform i2c_board_data section. |
| 59 | */ | 59 | */ |
| 60 | static unsigned short normal_i2c[] = { 0x48, I2C_CLIENT_END }; | ||
| 61 | I2C_CLIENT_INSMOD_1(ab3100); | ||
| 62 | 60 | ||
| 63 | u8 ab3100_get_chip_type(struct ab3100 *ab3100) | 61 | u8 ab3100_get_chip_type(struct ab3100 *ab3100) |
| 64 | { | 62 | { |
| @@ -966,7 +964,7 @@ static int __exit ab3100_remove(struct i2c_client *client) | |||
| 966 | } | 964 | } |
| 967 | 965 | ||
| 968 | static const struct i2c_device_id ab3100_id[] = { | 966 | static const struct i2c_device_id ab3100_id[] = { |
| 969 | { "ab3100", ab3100 }, | 967 | { "ab3100", 0 }, |
| 970 | { } | 968 | { } |
| 971 | }; | 969 | }; |
| 972 | MODULE_DEVICE_TABLE(i2c, ab3100_id); | 970 | MODULE_DEVICE_TABLE(i2c, ab3100_id); |
diff --git a/drivers/mfd/ucb1400_core.c b/drivers/mfd/ucb1400_core.c index 2afc08006e6d..fa294b6d600a 100644 --- a/drivers/mfd/ucb1400_core.c +++ b/drivers/mfd/ucb1400_core.c | |||
| @@ -21,6 +21,7 @@ | |||
| 21 | */ | 21 | */ |
| 22 | 22 | ||
| 23 | #include <linux/module.h> | 23 | #include <linux/module.h> |
| 24 | #include <linux/sched.h> | ||
| 24 | #include <linux/ucb1400.h> | 25 | #include <linux/ucb1400.h> |
| 25 | 26 | ||
| 26 | unsigned int ucb1400_adc_read(struct snd_ac97 *ac97, u16 adc_channel, | 27 | unsigned int ucb1400_adc_read(struct snd_ac97 *ac97, u16 adc_channel, |
diff --git a/drivers/misc/eeprom/max6875.c b/drivers/misc/eeprom/max6875.c index 3c0c58eed347..5a6b2bce8ad5 100644 --- a/drivers/misc/eeprom/max6875.c +++ b/drivers/misc/eeprom/max6875.c | |||
| @@ -33,12 +33,6 @@ | |||
| 33 | #include <linux/i2c.h> | 33 | #include <linux/i2c.h> |
| 34 | #include <linux/mutex.h> | 34 | #include <linux/mutex.h> |
| 35 | 35 | ||
| 36 | /* Do not scan - the MAX6875 access method will write to some EEPROM chips */ | ||
| 37 | static const unsigned short normal_i2c[] = { I2C_CLIENT_END }; | ||
| 38 | |||
| 39 | /* Insmod parameters */ | ||
| 40 | I2C_CLIENT_INSMOD_1(max6875); | ||
| 41 | |||
| 42 | /* The MAX6875 can only read/write 16 bytes at a time */ | 36 | /* The MAX6875 can only read/write 16 bytes at a time */ |
| 43 | #define SLICE_SIZE 16 | 37 | #define SLICE_SIZE 16 |
| 44 | #define SLICE_BITS 4 | 38 | #define SLICE_BITS 4 |
| @@ -146,31 +140,21 @@ static struct bin_attribute user_eeprom_attr = { | |||
| 146 | .read = max6875_read, | 140 | .read = max6875_read, |
| 147 | }; | 141 | }; |
| 148 | 142 | ||
| 149 | /* Return 0 if detection is successful, -ENODEV otherwise */ | 143 | static int max6875_probe(struct i2c_client *client, |
| 150 | static int max6875_detect(struct i2c_client *client, int kind, | 144 | const struct i2c_device_id *id) |
| 151 | struct i2c_board_info *info) | ||
| 152 | { | 145 | { |
| 153 | struct i2c_adapter *adapter = client->adapter; | 146 | struct i2c_adapter *adapter = client->adapter; |
| 147 | struct max6875_data *data; | ||
| 148 | int err; | ||
| 154 | 149 | ||
| 155 | if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_WRITE_BYTE_DATA | 150 | if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_WRITE_BYTE_DATA |
| 156 | | I2C_FUNC_SMBUS_READ_BYTE)) | 151 | | I2C_FUNC_SMBUS_READ_BYTE)) |
| 157 | return -ENODEV; | 152 | return -ENODEV; |
| 158 | 153 | ||
| 159 | /* Only check even addresses */ | 154 | /* Only bind to even addresses */ |
| 160 | if (client->addr & 1) | 155 | if (client->addr & 1) |
| 161 | return -ENODEV; | 156 | return -ENODEV; |
| 162 | 157 | ||
| 163 | strlcpy(info->type, "max6875", I2C_NAME_SIZE); | ||
| 164 | |||
| 165 | return 0; | ||
| 166 | } | ||
| 167 | |||
| 168 | static int max6875_probe(struct i2c_client *client, | ||
| 169 | const struct i2c_device_id *id) | ||
| 170 | { | ||
| 171 | struct max6875_data *data; | ||
| 172 | int err; | ||
| 173 | |||
| 174 | if (!(data = kzalloc(sizeof(struct max6875_data), GFP_KERNEL))) | 158 | if (!(data = kzalloc(sizeof(struct max6875_data), GFP_KERNEL))) |
| 175 | return -ENOMEM; | 159 | return -ENOMEM; |
| 176 | 160 | ||
| @@ -222,9 +206,6 @@ static struct i2c_driver max6875_driver = { | |||
| 222 | .probe = max6875_probe, | 206 | .probe = max6875_probe, |
| 223 | .remove = max6875_remove, | 207 | .remove = max6875_remove, |
| 224 | .id_table = max6875_id, | 208 | .id_table = max6875_id, |
| 225 | |||
| 226 | .detect = max6875_detect, | ||
| 227 | .address_data = &addr_data, | ||
| 228 | }; | 209 | }; |
| 229 | 210 | ||
| 230 | static int __init max6875_init(void) | 211 | static int __init max6875_init(void) |
diff --git a/drivers/mmc/core/sdio_cis.c b/drivers/mmc/core/sdio_cis.c index e1035c895808..f85dcd536508 100644 --- a/drivers/mmc/core/sdio_cis.c +++ b/drivers/mmc/core/sdio_cis.c | |||
| @@ -29,6 +29,8 @@ static int cistpl_vers_1(struct mmc_card *card, struct sdio_func *func, | |||
| 29 | unsigned i, nr_strings; | 29 | unsigned i, nr_strings; |
| 30 | char **buffer, *string; | 30 | char **buffer, *string; |
| 31 | 31 | ||
| 32 | /* Find all null-terminated (including zero length) strings in | ||
| 33 | the TPLLV1_INFO field. Trailing garbage is ignored. */ | ||
| 32 | buf += 2; | 34 | buf += 2; |
| 33 | size -= 2; | 35 | size -= 2; |
| 34 | 36 | ||
| @@ -39,11 +41,8 @@ static int cistpl_vers_1(struct mmc_card *card, struct sdio_func *func, | |||
| 39 | if (buf[i] == 0) | 41 | if (buf[i] == 0) |
| 40 | nr_strings++; | 42 | nr_strings++; |
| 41 | } | 43 | } |
| 42 | 44 | if (nr_strings == 0) | |
| 43 | if (nr_strings < 4) { | ||
| 44 | printk(KERN_WARNING "SDIO: ignoring broken CISTPL_VERS_1\n"); | ||
| 45 | return 0; | 45 | return 0; |
| 46 | } | ||
| 47 | 46 | ||
| 48 | size = i; | 47 | size = i; |
| 49 | 48 | ||
diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c index 0acbf4f5be50..8ca17a3e96ea 100644 --- a/drivers/mtd/mtd_blkdevs.c +++ b/drivers/mtd/mtd_blkdevs.c | |||
| @@ -32,14 +32,6 @@ struct mtd_blkcore_priv { | |||
| 32 | spinlock_t queue_lock; | 32 | spinlock_t queue_lock; |
| 33 | }; | 33 | }; |
| 34 | 34 | ||
| 35 | static int blktrans_discard_request(struct request_queue *q, | ||
| 36 | struct request *req) | ||
| 37 | { | ||
| 38 | req->cmd_type = REQ_TYPE_LINUX_BLOCK; | ||
| 39 | req->cmd[0] = REQ_LB_OP_DISCARD; | ||
| 40 | return 0; | ||
| 41 | } | ||
| 42 | |||
| 43 | static int do_blktrans_request(struct mtd_blktrans_ops *tr, | 35 | static int do_blktrans_request(struct mtd_blktrans_ops *tr, |
| 44 | struct mtd_blktrans_dev *dev, | 36 | struct mtd_blktrans_dev *dev, |
| 45 | struct request *req) | 37 | struct request *req) |
| @@ -52,10 +44,6 @@ static int do_blktrans_request(struct mtd_blktrans_ops *tr, | |||
| 52 | 44 | ||
| 53 | buf = req->buffer; | 45 | buf = req->buffer; |
| 54 | 46 | ||
| 55 | if (req->cmd_type == REQ_TYPE_LINUX_BLOCK && | ||
| 56 | req->cmd[0] == REQ_LB_OP_DISCARD) | ||
| 57 | return tr->discard(dev, block, nsect); | ||
| 58 | |||
| 59 | if (!blk_fs_request(req)) | 47 | if (!blk_fs_request(req)) |
| 60 | return -EIO; | 48 | return -EIO; |
| 61 | 49 | ||
| @@ -63,6 +51,9 @@ static int do_blktrans_request(struct mtd_blktrans_ops *tr, | |||
| 63 | get_capacity(req->rq_disk)) | 51 | get_capacity(req->rq_disk)) |
| 64 | return -EIO; | 52 | return -EIO; |
| 65 | 53 | ||
| 54 | if (blk_discard_rq(req)) | ||
| 55 | return tr->discard(dev, block, nsect); | ||
| 56 | |||
| 66 | switch(rq_data_dir(req)) { | 57 | switch(rq_data_dir(req)) { |
| 67 | case READ: | 58 | case READ: |
| 68 | for (; nsect > 0; nsect--, block++, buf += tr->blksize) | 59 | for (; nsect > 0; nsect--, block++, buf += tr->blksize) |
| @@ -380,8 +371,8 @@ int register_mtd_blktrans(struct mtd_blktrans_ops *tr) | |||
| 380 | tr->blkcore_priv->rq->queuedata = tr; | 371 | tr->blkcore_priv->rq->queuedata = tr; |
| 381 | blk_queue_logical_block_size(tr->blkcore_priv->rq, tr->blksize); | 372 | blk_queue_logical_block_size(tr->blkcore_priv->rq, tr->blksize); |
| 382 | if (tr->discard) | 373 | if (tr->discard) |
| 383 | blk_queue_set_discard(tr->blkcore_priv->rq, | 374 | queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, |
| 384 | blktrans_discard_request); | 375 | tr->blkcore_priv->rq); |
| 385 | 376 | ||
| 386 | tr->blkshift = ffs(tr->blksize) - 1; | 377 | tr->blkshift = ffs(tr->blksize) - 1; |
| 387 | 378 | ||
diff --git a/drivers/net/au1000_eth.c b/drivers/net/au1000_eth.c index fdf5937233fc..04f63c77071d 100644 --- a/drivers/net/au1000_eth.c +++ b/drivers/net/au1000_eth.c | |||
| @@ -721,7 +721,7 @@ static inline void update_rx_stats(struct net_device *dev, u32 status) | |||
| 721 | ps->rx_errors++; | 721 | ps->rx_errors++; |
| 722 | if (status & RX_MISSED_FRAME) | 722 | if (status & RX_MISSED_FRAME) |
| 723 | ps->rx_missed_errors++; | 723 | ps->rx_missed_errors++; |
| 724 | if (status & (RX_OVERLEN | RX_OVERLEN | RX_LEN_ERROR)) | 724 | if (status & (RX_OVERLEN | RX_RUNT | RX_LEN_ERROR)) |
| 725 | ps->rx_length_errors++; | 725 | ps->rx_length_errors++; |
| 726 | if (status & RX_CRC_ERROR) | 726 | if (status & RX_CRC_ERROR) |
| 727 | ps->rx_crc_errors++; | 727 | ps->rx_crc_errors++; |
| @@ -794,8 +794,6 @@ static int au1000_rx(struct net_device *dev) | |||
| 794 | printk("rx len error\n"); | 794 | printk("rx len error\n"); |
| 795 | if (status & RX_U_CNTRL_FRAME) | 795 | if (status & RX_U_CNTRL_FRAME) |
| 796 | printk("rx u control frame\n"); | 796 | printk("rx u control frame\n"); |
| 797 | if (status & RX_MISSED_FRAME) | ||
| 798 | printk("rx miss\n"); | ||
| 799 | } | 797 | } |
| 800 | } | 798 | } |
| 801 | prxd->buff_stat = (u32)(pDB->dma_addr | RX_DMA_ENABLE); | 799 | prxd->buff_stat = (u32)(pDB->dma_addr | RX_DMA_ENABLE); |
diff --git a/drivers/net/benet/be_cmds.c b/drivers/net/benet/be_cmds.c index 79d35d122c08..89876ade5e33 100644 --- a/drivers/net/benet/be_cmds.c +++ b/drivers/net/benet/be_cmds.c | |||
| @@ -1129,7 +1129,6 @@ int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd, | |||
| 1129 | spin_lock_bh(&adapter->mcc_lock); | 1129 | spin_lock_bh(&adapter->mcc_lock); |
| 1130 | 1130 | ||
| 1131 | wrb = wrb_from_mccq(adapter); | 1131 | wrb = wrb_from_mccq(adapter); |
| 1132 | req = embedded_payload(wrb); | ||
| 1133 | sge = nonembedded_sgl(wrb); | 1132 | sge = nonembedded_sgl(wrb); |
| 1134 | 1133 | ||
| 1135 | be_wrb_hdr_prepare(wrb, cmd->size, false, 1); | 1134 | be_wrb_hdr_prepare(wrb, cmd->size, false, 1); |
diff --git a/drivers/net/benet/be_cmds.h b/drivers/net/benet/be_cmds.h index 8b4c2cb9ad62..a86f917f85f4 100644 --- a/drivers/net/benet/be_cmds.h +++ b/drivers/net/benet/be_cmds.h | |||
| @@ -62,7 +62,7 @@ enum { | |||
| 62 | MCC_STATUS_QUEUE_FLUSHING = 0x4, | 62 | MCC_STATUS_QUEUE_FLUSHING = 0x4, |
| 63 | /* The command is completing with a DMA error */ | 63 | /* The command is completing with a DMA error */ |
| 64 | MCC_STATUS_DMA_FAILED = 0x5, | 64 | MCC_STATUS_DMA_FAILED = 0x5, |
| 65 | MCC_STATUS_NOT_SUPPORTED = 0x66 | 65 | MCC_STATUS_NOT_SUPPORTED = 66 |
| 66 | }; | 66 | }; |
| 67 | 67 | ||
| 68 | #define CQE_STATUS_COMPL_MASK 0xFFFF | 68 | #define CQE_STATUS_COMPL_MASK 0xFFFF |
diff --git a/drivers/net/benet/be_ethtool.c b/drivers/net/benet/be_ethtool.c index 11445df3dbc0..cda5bf2fc50a 100644 --- a/drivers/net/benet/be_ethtool.c +++ b/drivers/net/benet/be_ethtool.c | |||
| @@ -358,7 +358,7 @@ const struct ethtool_ops be_ethtool_ops = { | |||
| 358 | .get_rx_csum = be_get_rx_csum, | 358 | .get_rx_csum = be_get_rx_csum, |
| 359 | .set_rx_csum = be_set_rx_csum, | 359 | .set_rx_csum = be_set_rx_csum, |
| 360 | .get_tx_csum = ethtool_op_get_tx_csum, | 360 | .get_tx_csum = ethtool_op_get_tx_csum, |
| 361 | .set_tx_csum = ethtool_op_set_tx_csum, | 361 | .set_tx_csum = ethtool_op_set_tx_hw_csum, |
| 362 | .get_sg = ethtool_op_get_sg, | 362 | .get_sg = ethtool_op_get_sg, |
| 363 | .set_sg = ethtool_op_set_sg, | 363 | .set_sg = ethtool_op_set_sg, |
| 364 | .get_tso = ethtool_op_get_tso, | 364 | .get_tso = ethtool_op_get_tso, |
diff --git a/drivers/net/benet/be_main.c b/drivers/net/benet/be_main.c index 2f9b50156e0c..6d5e81f7046f 100644 --- a/drivers/net/benet/be_main.c +++ b/drivers/net/benet/be_main.c | |||
| @@ -197,7 +197,7 @@ void netdev_stats_update(struct be_adapter *adapter) | |||
| 197 | /* no space available in linux */ | 197 | /* no space available in linux */ |
| 198 | dev_stats->tx_dropped = 0; | 198 | dev_stats->tx_dropped = 0; |
| 199 | 199 | ||
| 200 | dev_stats->multicast = port_stats->tx_multicastframes; | 200 | dev_stats->multicast = port_stats->rx_multicast_frames; |
| 201 | dev_stats->collisions = 0; | 201 | dev_stats->collisions = 0; |
| 202 | 202 | ||
| 203 | /* detailed tx_errors */ | 203 | /* detailed tx_errors */ |
| @@ -1899,8 +1899,8 @@ static void be_netdev_init(struct net_device *netdev) | |||
| 1899 | struct be_adapter *adapter = netdev_priv(netdev); | 1899 | struct be_adapter *adapter = netdev_priv(netdev); |
| 1900 | 1900 | ||
| 1901 | netdev->features |= NETIF_F_SG | NETIF_F_HW_VLAN_RX | NETIF_F_TSO | | 1901 | netdev->features |= NETIF_F_SG | NETIF_F_HW_VLAN_RX | NETIF_F_TSO | |
| 1902 | NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_FILTER | NETIF_F_IP_CSUM | | 1902 | NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_FILTER | NETIF_F_HW_CSUM | |
| 1903 | NETIF_F_IPV6_CSUM | NETIF_F_GRO; | 1903 | NETIF_F_GRO; |
| 1904 | 1904 | ||
| 1905 | netdev->flags |= IFF_MULTICAST; | 1905 | netdev->flags |= IFF_MULTICAST; |
| 1906 | 1906 | ||
diff --git a/drivers/net/e1000e/82571.c b/drivers/net/e1000e/82571.c index b53b40ba88a8..d1e0563a67df 100644 --- a/drivers/net/e1000e/82571.c +++ b/drivers/net/e1000e/82571.c | |||
| @@ -1803,7 +1803,7 @@ struct e1000_info e1000_82574_info = { | |||
| 1803 | | FLAG_HAS_AMT | 1803 | | FLAG_HAS_AMT |
| 1804 | | FLAG_HAS_CTRLEXT_ON_LOAD, | 1804 | | FLAG_HAS_CTRLEXT_ON_LOAD, |
| 1805 | .pba = 20, | 1805 | .pba = 20, |
| 1806 | .max_hw_frame_size = ETH_FRAME_LEN + ETH_FCS_LEN, | 1806 | .max_hw_frame_size = DEFAULT_JUMBO, |
| 1807 | .get_variants = e1000_get_variants_82571, | 1807 | .get_variants = e1000_get_variants_82571, |
| 1808 | .mac_ops = &e82571_mac_ops, | 1808 | .mac_ops = &e82571_mac_ops, |
| 1809 | .phy_ops = &e82_phy_ops_bm, | 1809 | .phy_ops = &e82_phy_ops_bm, |
| @@ -1820,7 +1820,7 @@ struct e1000_info e1000_82583_info = { | |||
| 1820 | | FLAG_HAS_AMT | 1820 | | FLAG_HAS_AMT |
| 1821 | | FLAG_HAS_CTRLEXT_ON_LOAD, | 1821 | | FLAG_HAS_CTRLEXT_ON_LOAD, |
| 1822 | .pba = 20, | 1822 | .pba = 20, |
| 1823 | .max_hw_frame_size = DEFAULT_JUMBO, | 1823 | .max_hw_frame_size = ETH_FRAME_LEN + ETH_FCS_LEN, |
| 1824 | .get_variants = e1000_get_variants_82571, | 1824 | .get_variants = e1000_get_variants_82571, |
| 1825 | .mac_ops = &e82571_mac_ops, | 1825 | .mac_ops = &e82571_mac_ops, |
| 1826 | .phy_ops = &e82_phy_ops_bm, | 1826 | .phy_ops = &e82_phy_ops_bm, |
diff --git a/drivers/net/ethoc.c b/drivers/net/ethoc.c index b7311bc00258..34d0c69e67f7 100644 --- a/drivers/net/ethoc.c +++ b/drivers/net/ethoc.c | |||
| @@ -19,6 +19,10 @@ | |||
| 19 | #include <linux/platform_device.h> | 19 | #include <linux/platform_device.h> |
| 20 | #include <net/ethoc.h> | 20 | #include <net/ethoc.h> |
| 21 | 21 | ||
| 22 | static int buffer_size = 0x8000; /* 32 KBytes */ | ||
| 23 | module_param(buffer_size, int, 0); | ||
| 24 | MODULE_PARM_DESC(buffer_size, "DMA buffer allocation size"); | ||
| 25 | |||
| 22 | /* register offsets */ | 26 | /* register offsets */ |
| 23 | #define MODER 0x00 | 27 | #define MODER 0x00 |
| 24 | #define INT_SOURCE 0x04 | 28 | #define INT_SOURCE 0x04 |
| @@ -167,6 +171,7 @@ | |||
| 167 | * struct ethoc - driver-private device structure | 171 | * struct ethoc - driver-private device structure |
| 168 | * @iobase: pointer to I/O memory region | 172 | * @iobase: pointer to I/O memory region |
| 169 | * @membase: pointer to buffer memory region | 173 | * @membase: pointer to buffer memory region |
| 174 | * @dma_alloc: dma allocated buffer size | ||
| 170 | * @num_tx: number of send buffers | 175 | * @num_tx: number of send buffers |
| 171 | * @cur_tx: last send buffer written | 176 | * @cur_tx: last send buffer written |
| 172 | * @dty_tx: last buffer actually sent | 177 | * @dty_tx: last buffer actually sent |
| @@ -185,6 +190,7 @@ | |||
| 185 | struct ethoc { | 190 | struct ethoc { |
| 186 | void __iomem *iobase; | 191 | void __iomem *iobase; |
| 187 | void __iomem *membase; | 192 | void __iomem *membase; |
| 193 | int dma_alloc; | ||
| 188 | 194 | ||
| 189 | unsigned int num_tx; | 195 | unsigned int num_tx; |
| 190 | unsigned int cur_tx; | 196 | unsigned int cur_tx; |
| @@ -284,7 +290,7 @@ static int ethoc_init_ring(struct ethoc *dev) | |||
| 284 | dev->cur_rx = 0; | 290 | dev->cur_rx = 0; |
| 285 | 291 | ||
| 286 | /* setup transmission buffers */ | 292 | /* setup transmission buffers */ |
| 287 | bd.addr = 0; | 293 | bd.addr = virt_to_phys(dev->membase); |
| 288 | bd.stat = TX_BD_IRQ | TX_BD_CRC; | 294 | bd.stat = TX_BD_IRQ | TX_BD_CRC; |
| 289 | 295 | ||
| 290 | for (i = 0; i < dev->num_tx; i++) { | 296 | for (i = 0; i < dev->num_tx; i++) { |
| @@ -295,7 +301,6 @@ static int ethoc_init_ring(struct ethoc *dev) | |||
| 295 | bd.addr += ETHOC_BUFSIZ; | 301 | bd.addr += ETHOC_BUFSIZ; |
| 296 | } | 302 | } |
| 297 | 303 | ||
| 298 | bd.addr = dev->num_tx * ETHOC_BUFSIZ; | ||
| 299 | bd.stat = RX_BD_EMPTY | RX_BD_IRQ; | 304 | bd.stat = RX_BD_EMPTY | RX_BD_IRQ; |
| 300 | 305 | ||
| 301 | for (i = 0; i < dev->num_rx; i++) { | 306 | for (i = 0; i < dev->num_rx; i++) { |
| @@ -400,8 +405,12 @@ static int ethoc_rx(struct net_device *dev, int limit) | |||
| 400 | if (ethoc_update_rx_stats(priv, &bd) == 0) { | 405 | if (ethoc_update_rx_stats(priv, &bd) == 0) { |
| 401 | int size = bd.stat >> 16; | 406 | int size = bd.stat >> 16; |
| 402 | struct sk_buff *skb = netdev_alloc_skb(dev, size); | 407 | struct sk_buff *skb = netdev_alloc_skb(dev, size); |
| 408 | |||
| 409 | size -= 4; /* strip the CRC */ | ||
| 410 | skb_reserve(skb, 2); /* align TCP/IP header */ | ||
| 411 | |||
| 403 | if (likely(skb)) { | 412 | if (likely(skb)) { |
| 404 | void *src = priv->membase + bd.addr; | 413 | void *src = phys_to_virt(bd.addr); |
| 405 | memcpy_fromio(skb_put(skb, size), src, size); | 414 | memcpy_fromio(skb_put(skb, size), src, size); |
| 406 | skb->protocol = eth_type_trans(skb, dev); | 415 | skb->protocol = eth_type_trans(skb, dev); |
| 407 | priv->stats.rx_packets++; | 416 | priv->stats.rx_packets++; |
| @@ -653,9 +662,9 @@ static int ethoc_open(struct net_device *dev) | |||
| 653 | if (ret) | 662 | if (ret) |
| 654 | return ret; | 663 | return ret; |
| 655 | 664 | ||
| 656 | /* calculate the number of TX/RX buffers */ | 665 | /* calculate the number of TX/RX buffers, maximum 128 supported */ |
| 657 | num_bd = (dev->mem_end - dev->mem_start + 1) / ETHOC_BUFSIZ; | 666 | num_bd = min(128, (dev->mem_end - dev->mem_start + 1) / ETHOC_BUFSIZ); |
| 658 | priv->num_tx = min(min_tx, num_bd / 4); | 667 | priv->num_tx = max(min_tx, num_bd / 4); |
| 659 | priv->num_rx = num_bd - priv->num_tx; | 668 | priv->num_rx = num_bd - priv->num_tx; |
| 660 | ethoc_write(priv, TX_BD_NUM, priv->num_tx); | 669 | ethoc_write(priv, TX_BD_NUM, priv->num_tx); |
| 661 | 670 | ||
| @@ -823,7 +832,7 @@ static netdev_tx_t ethoc_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
| 823 | else | 832 | else |
| 824 | bd.stat &= ~TX_BD_PAD; | 833 | bd.stat &= ~TX_BD_PAD; |
| 825 | 834 | ||
| 826 | dest = priv->membase + bd.addr; | 835 | dest = phys_to_virt(bd.addr); |
| 827 | memcpy_toio(dest, skb->data, skb->len); | 836 | memcpy_toio(dest, skb->data, skb->len); |
| 828 | 837 | ||
| 829 | bd.stat &= ~(TX_BD_STATS | TX_BD_LEN_MASK); | 838 | bd.stat &= ~(TX_BD_STATS | TX_BD_LEN_MASK); |
| @@ -903,22 +912,19 @@ static int ethoc_probe(struct platform_device *pdev) | |||
| 903 | 912 | ||
| 904 | /* obtain buffer memory space */ | 913 | /* obtain buffer memory space */ |
| 905 | res = platform_get_resource(pdev, IORESOURCE_MEM, 1); | 914 | res = platform_get_resource(pdev, IORESOURCE_MEM, 1); |
| 906 | if (!res) { | 915 | if (res) { |
| 907 | dev_err(&pdev->dev, "cannot obtain memory space\n"); | 916 | mem = devm_request_mem_region(&pdev->dev, res->start, |
| 908 | ret = -ENXIO; | ||
| 909 | goto free; | ||
| 910 | } | ||
| 911 | |||
| 912 | mem = devm_request_mem_region(&pdev->dev, res->start, | ||
| 913 | res->end - res->start + 1, res->name); | 917 | res->end - res->start + 1, res->name); |
| 914 | if (!mem) { | 918 | if (!mem) { |
| 915 | dev_err(&pdev->dev, "cannot request memory space\n"); | 919 | dev_err(&pdev->dev, "cannot request memory space\n"); |
| 916 | ret = -ENXIO; | 920 | ret = -ENXIO; |
| 917 | goto free; | 921 | goto free; |
| 922 | } | ||
| 923 | |||
| 924 | netdev->mem_start = mem->start; | ||
| 925 | netdev->mem_end = mem->end; | ||
| 918 | } | 926 | } |
| 919 | 927 | ||
| 920 | netdev->mem_start = mem->start; | ||
| 921 | netdev->mem_end = mem->end; | ||
| 922 | 928 | ||
| 923 | /* obtain device IRQ number */ | 929 | /* obtain device IRQ number */ |
| 924 | res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); | 930 | res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); |
| @@ -933,6 +939,7 @@ static int ethoc_probe(struct platform_device *pdev) | |||
| 933 | /* setup driver-private data */ | 939 | /* setup driver-private data */ |
| 934 | priv = netdev_priv(netdev); | 940 | priv = netdev_priv(netdev); |
| 935 | priv->netdev = netdev; | 941 | priv->netdev = netdev; |
| 942 | priv->dma_alloc = 0; | ||
| 936 | 943 | ||
| 937 | priv->iobase = devm_ioremap_nocache(&pdev->dev, netdev->base_addr, | 944 | priv->iobase = devm_ioremap_nocache(&pdev->dev, netdev->base_addr, |
| 938 | mmio->end - mmio->start + 1); | 945 | mmio->end - mmio->start + 1); |
| @@ -942,12 +949,27 @@ static int ethoc_probe(struct platform_device *pdev) | |||
| 942 | goto error; | 949 | goto error; |
| 943 | } | 950 | } |
| 944 | 951 | ||
| 945 | priv->membase = devm_ioremap_nocache(&pdev->dev, netdev->mem_start, | 952 | if (netdev->mem_end) { |
| 946 | mem->end - mem->start + 1); | 953 | priv->membase = devm_ioremap_nocache(&pdev->dev, |
| 947 | if (!priv->membase) { | 954 | netdev->mem_start, mem->end - mem->start + 1); |
| 948 | dev_err(&pdev->dev, "cannot remap memory space\n"); | 955 | if (!priv->membase) { |
| 949 | ret = -ENXIO; | 956 | dev_err(&pdev->dev, "cannot remap memory space\n"); |
| 950 | goto error; | 957 | ret = -ENXIO; |
| 958 | goto error; | ||
| 959 | } | ||
| 960 | } else { | ||
| 961 | /* Allocate buffer memory */ | ||
| 962 | priv->membase = dma_alloc_coherent(NULL, | ||
| 963 | buffer_size, (void *)&netdev->mem_start, | ||
| 964 | GFP_KERNEL); | ||
| 965 | if (!priv->membase) { | ||
| 966 | dev_err(&pdev->dev, "cannot allocate %dB buffer\n", | ||
| 967 | buffer_size); | ||
| 968 | ret = -ENOMEM; | ||
| 969 | goto error; | ||
| 970 | } | ||
| 971 | netdev->mem_end = netdev->mem_start + buffer_size; | ||
| 972 | priv->dma_alloc = buffer_size; | ||
| 951 | } | 973 | } |
| 952 | 974 | ||
| 953 | /* Allow the platform setup code to pass in a MAC address. */ | 975 | /* Allow the platform setup code to pass in a MAC address. */ |
| @@ -1034,6 +1056,9 @@ free_mdio: | |||
| 1034 | kfree(priv->mdio->irq); | 1056 | kfree(priv->mdio->irq); |
| 1035 | mdiobus_free(priv->mdio); | 1057 | mdiobus_free(priv->mdio); |
| 1036 | free: | 1058 | free: |
| 1059 | if (priv->dma_alloc) | ||
| 1060 | dma_free_coherent(NULL, priv->dma_alloc, priv->membase, | ||
| 1061 | netdev->mem_start); | ||
| 1037 | free_netdev(netdev); | 1062 | free_netdev(netdev); |
| 1038 | out: | 1063 | out: |
| 1039 | return ret; | 1064 | return ret; |
| @@ -1059,7 +1084,9 @@ static int ethoc_remove(struct platform_device *pdev) | |||
| 1059 | kfree(priv->mdio->irq); | 1084 | kfree(priv->mdio->irq); |
| 1060 | mdiobus_free(priv->mdio); | 1085 | mdiobus_free(priv->mdio); |
| 1061 | } | 1086 | } |
| 1062 | 1087 | if (priv->dma_alloc) | |
| 1088 | dma_free_coherent(NULL, priv->dma_alloc, priv->membase, | ||
| 1089 | netdev->mem_start); | ||
| 1063 | unregister_netdev(netdev); | 1090 | unregister_netdev(netdev); |
| 1064 | free_netdev(netdev); | 1091 | free_netdev(netdev); |
| 1065 | } | 1092 | } |
diff --git a/drivers/net/ixgbe/ixgbe_82599.c b/drivers/net/ixgbe/ixgbe_82599.c index 2ec58dcdb82b..34b04924c8a1 100644 --- a/drivers/net/ixgbe/ixgbe_82599.c +++ b/drivers/net/ixgbe/ixgbe_82599.c | |||
| @@ -330,6 +330,8 @@ static enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw) | |||
| 330 | 330 | ||
| 331 | switch (hw->device_id) { | 331 | switch (hw->device_id) { |
| 332 | case IXGBE_DEV_ID_82599_KX4: | 332 | case IXGBE_DEV_ID_82599_KX4: |
| 333 | case IXGBE_DEV_ID_82599_KX4_MEZZ: | ||
| 334 | case IXGBE_DEV_ID_82599_COMBO_BACKPLANE: | ||
| 333 | case IXGBE_DEV_ID_82599_XAUI_LOM: | 335 | case IXGBE_DEV_ID_82599_XAUI_LOM: |
| 334 | /* Default device ID is mezzanine card KX/KX4 */ | 336 | /* Default device ID is mezzanine card KX/KX4 */ |
| 335 | media_type = ixgbe_media_type_backplane; | 337 | media_type = ixgbe_media_type_backplane; |
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c index 28fbb9d281f9..cbb143ca1eb8 100644 --- a/drivers/net/ixgbe/ixgbe_main.c +++ b/drivers/net/ixgbe/ixgbe_main.c | |||
| @@ -97,8 +97,12 @@ static struct pci_device_id ixgbe_pci_tbl[] = { | |||
| 97 | board_82599 }, | 97 | board_82599 }, |
| 98 | {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP), | 98 | {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP), |
| 99 | board_82599 }, | 99 | board_82599 }, |
| 100 | {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KX4_MEZZ), | ||
| 101 | board_82599 }, | ||
| 100 | {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_CX4), | 102 | {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_CX4), |
| 101 | board_82599 }, | 103 | board_82599 }, |
| 104 | {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_COMBO_BACKPLANE), | ||
| 105 | board_82599 }, | ||
| 102 | 106 | ||
| 103 | /* required last entry */ | 107 | /* required last entry */ |
| 104 | {0, } | 108 | {0, } |
diff --git a/drivers/net/ixgbe/ixgbe_type.h b/drivers/net/ixgbe/ixgbe_type.h index 7c93e923bf2e..ef4bdd58e016 100644 --- a/drivers/net/ixgbe/ixgbe_type.h +++ b/drivers/net/ixgbe/ixgbe_type.h | |||
| @@ -49,9 +49,11 @@ | |||
| 49 | #define IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM 0x10E1 | 49 | #define IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM 0x10E1 |
| 50 | #define IXGBE_DEV_ID_82598EB_XF_LR 0x10F4 | 50 | #define IXGBE_DEV_ID_82598EB_XF_LR 0x10F4 |
| 51 | #define IXGBE_DEV_ID_82599_KX4 0x10F7 | 51 | #define IXGBE_DEV_ID_82599_KX4 0x10F7 |
| 52 | #define IXGBE_DEV_ID_82599_KX4_MEZZ 0x1514 | ||
| 52 | #define IXGBE_DEV_ID_82599_CX4 0x10F9 | 53 | #define IXGBE_DEV_ID_82599_CX4 0x10F9 |
| 53 | #define IXGBE_DEV_ID_82599_SFP 0x10FB | 54 | #define IXGBE_DEV_ID_82599_SFP 0x10FB |
| 54 | #define IXGBE_DEV_ID_82599_XAUI_LOM 0x10FC | 55 | #define IXGBE_DEV_ID_82599_XAUI_LOM 0x10FC |
| 56 | #define IXGBE_DEV_ID_82599_COMBO_BACKPLANE 0x10F8 | ||
| 55 | 57 | ||
| 56 | /* General Registers */ | 58 | /* General Registers */ |
| 57 | #define IXGBE_CTRL 0x00000 | 59 | #define IXGBE_CTRL 0x00000 |
diff --git a/drivers/net/netxen/netxen_nic_main.c b/drivers/net/netxen/netxen_nic_main.c index b5aa974827e5..9b9eab107704 100644 --- a/drivers/net/netxen/netxen_nic_main.c +++ b/drivers/net/netxen/netxen_nic_main.c | |||
| @@ -1714,7 +1714,7 @@ netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev) | |||
| 1714 | /* 4 fragments per cmd des */ | 1714 | /* 4 fragments per cmd des */ |
| 1715 | no_of_desc = (frag_count + 3) >> 2; | 1715 | no_of_desc = (frag_count + 3) >> 2; |
| 1716 | 1716 | ||
| 1717 | if (unlikely(no_of_desc + 2) > netxen_tx_avail(tx_ring)) { | 1717 | if (unlikely(no_of_desc + 2 > netxen_tx_avail(tx_ring))) { |
| 1718 | netif_stop_queue(netdev); | 1718 | netif_stop_queue(netdev); |
| 1719 | return NETDEV_TX_BUSY; | 1719 | return NETDEV_TX_BUSY; |
| 1720 | } | 1720 | } |
diff --git a/drivers/net/pasemi_mac_ethtool.c b/drivers/net/pasemi_mac_ethtool.c index 064a4fe1dd90..28a86224879d 100644 --- a/drivers/net/pasemi_mac_ethtool.c +++ b/drivers/net/pasemi_mac_ethtool.c | |||
| @@ -71,6 +71,9 @@ pasemi_mac_ethtool_get_settings(struct net_device *netdev, | |||
| 71 | struct pasemi_mac *mac = netdev_priv(netdev); | 71 | struct pasemi_mac *mac = netdev_priv(netdev); |
| 72 | struct phy_device *phydev = mac->phydev; | 72 | struct phy_device *phydev = mac->phydev; |
| 73 | 73 | ||
| 74 | if (!phydev) | ||
| 75 | return -EOPNOTSUPP; | ||
| 76 | |||
| 74 | return phy_ethtool_gset(phydev, cmd); | 77 | return phy_ethtool_gset(phydev, cmd); |
| 75 | } | 78 | } |
| 76 | 79 | ||
diff --git a/drivers/net/pcmcia/pcnet_cs.c b/drivers/net/pcmcia/pcnet_cs.c index 474876c879cb..bd3447f04902 100644 --- a/drivers/net/pcmcia/pcnet_cs.c +++ b/drivers/net/pcmcia/pcnet_cs.c | |||
| @@ -1754,14 +1754,14 @@ static struct pcmcia_device_id pcnet_ids[] = { | |||
| 1754 | PCMCIA_PFC_DEVICE_CIS_PROD_ID12(0, "Psion Dacom", "Gold Card V34 Ethernet", 0xf5f025c2, 0x338e8155, "cis/PCMLM28.cis"), | 1754 | PCMCIA_PFC_DEVICE_CIS_PROD_ID12(0, "Psion Dacom", "Gold Card V34 Ethernet", 0xf5f025c2, 0x338e8155, "cis/PCMLM28.cis"), |
| 1755 | PCMCIA_PFC_DEVICE_CIS_PROD_ID12(0, "Psion Dacom", "Gold Card V34 Ethernet GSM", 0xf5f025c2, 0x4ae85d35, "cis/PCMLM28.cis"), | 1755 | PCMCIA_PFC_DEVICE_CIS_PROD_ID12(0, "Psion Dacom", "Gold Card V34 Ethernet GSM", 0xf5f025c2, 0x4ae85d35, "cis/PCMLM28.cis"), |
| 1756 | PCMCIA_PFC_DEVICE_CIS_PROD_ID12(0, "LINKSYS", "PCMLM28", 0xf7cb0b07, 0x66881874, "cis/PCMLM28.cis"), | 1756 | PCMCIA_PFC_DEVICE_CIS_PROD_ID12(0, "LINKSYS", "PCMLM28", 0xf7cb0b07, 0x66881874, "cis/PCMLM28.cis"), |
| 1757 | PCMCIA_MFC_DEVICE_CIS_PROD_ID12(0, "DAYNA COMMUNICATIONS", "LAN AND MODEM MULTIFUNCTION", 0x8fdf8f89, 0xdd5ed9e8, "DP83903.cis"), | 1757 | PCMCIA_MFC_DEVICE_CIS_PROD_ID12(0, "DAYNA COMMUNICATIONS", "LAN AND MODEM MULTIFUNCTION", 0x8fdf8f89, 0xdd5ed9e8, "cis/DP83903.cis"), |
| 1758 | PCMCIA_MFC_DEVICE_CIS_PROD_ID4(0, "NSC MF LAN/Modem", 0x58fc6056, "DP83903.cis"), | 1758 | PCMCIA_MFC_DEVICE_CIS_PROD_ID4(0, "NSC MF LAN/Modem", 0x58fc6056, "cis/DP83903.cis"), |
| 1759 | PCMCIA_MFC_DEVICE_CIS_MANF_CARD(0, 0x0175, 0x0000, "DP83903.cis"), | 1759 | PCMCIA_MFC_DEVICE_CIS_MANF_CARD(0, 0x0175, 0x0000, "cis/DP83903.cis"), |
| 1760 | PCMCIA_DEVICE_CIS_MANF_CARD(0xc00f, 0x0002, "cis/LA-PCM.cis"), | 1760 | PCMCIA_DEVICE_CIS_MANF_CARD(0xc00f, 0x0002, "cis/LA-PCM.cis"), |
| 1761 | PCMCIA_DEVICE_CIS_PROD_ID12("KTI", "PE520 PLUS", 0xad180345, 0x9d58d392, "PE520.cis"), | 1761 | PCMCIA_DEVICE_CIS_PROD_ID12("KTI", "PE520 PLUS", 0xad180345, 0x9d58d392, "PE520.cis"), |
| 1762 | PCMCIA_DEVICE_CIS_PROD_ID12("NDC", "Ethernet", 0x01c43ae1, 0x00b2e941, "NE2K.cis"), | 1762 | PCMCIA_DEVICE_CIS_PROD_ID12("NDC", "Ethernet", 0x01c43ae1, 0x00b2e941, "cis/NE2K.cis"), |
| 1763 | PCMCIA_DEVICE_CIS_PROD_ID12("PMX ", "PE-200", 0x34f3f1c8, 0x10b59f8c, "PE-200.cis"), | 1763 | PCMCIA_DEVICE_CIS_PROD_ID12("PMX ", "PE-200", 0x34f3f1c8, 0x10b59f8c, "PE-200.cis"), |
| 1764 | PCMCIA_DEVICE_CIS_PROD_ID12("TAMARACK", "Ethernet", 0xcf434fba, 0x00b2e941, "tamarack.cis"), | 1764 | PCMCIA_DEVICE_CIS_PROD_ID12("TAMARACK", "Ethernet", 0xcf434fba, 0x00b2e941, "cis/tamarack.cis"), |
| 1765 | PCMCIA_DEVICE_PROD_ID12("Ethernet", "CF Size PC Card", 0x00b2e941, 0x43ac239b), | 1765 | PCMCIA_DEVICE_PROD_ID12("Ethernet", "CF Size PC Card", 0x00b2e941, 0x43ac239b), |
| 1766 | PCMCIA_DEVICE_PROD_ID123("Fast Ethernet", "CF Size PC Card", "1.0", | 1766 | PCMCIA_DEVICE_PROD_ID123("Fast Ethernet", "CF Size PC Card", "1.0", |
| 1767 | 0xb4be14e3, 0x43ac239b, 0x0877b627), | 1767 | 0xb4be14e3, 0x43ac239b, 0x0877b627), |
diff --git a/drivers/net/qlge/qlge.h b/drivers/net/qlge/qlge.h index 30d5585beeee..3ec6e85587a2 100644 --- a/drivers/net/qlge/qlge.h +++ b/drivers/net/qlge/qlge.h | |||
| @@ -9,6 +9,7 @@ | |||
| 9 | 9 | ||
| 10 | #include <linux/pci.h> | 10 | #include <linux/pci.h> |
| 11 | #include <linux/netdevice.h> | 11 | #include <linux/netdevice.h> |
| 12 | #include <linux/rtnetlink.h> | ||
| 12 | 13 | ||
| 13 | /* | 14 | /* |
| 14 | * General definitions... | 15 | * General definitions... |
| @@ -135,9 +136,9 @@ enum { | |||
| 135 | RST_FO_TFO = (1 << 0), | 136 | RST_FO_TFO = (1 << 0), |
| 136 | RST_FO_RR_MASK = 0x00060000, | 137 | RST_FO_RR_MASK = 0x00060000, |
| 137 | RST_FO_RR_CQ_CAM = 0x00000000, | 138 | RST_FO_RR_CQ_CAM = 0x00000000, |
| 138 | RST_FO_RR_DROP = 0x00000001, | 139 | RST_FO_RR_DROP = 0x00000002, |
| 139 | RST_FO_RR_DQ = 0x00000002, | 140 | RST_FO_RR_DQ = 0x00000004, |
| 140 | RST_FO_RR_RCV_FUNC_CQ = 0x00000003, | 141 | RST_FO_RR_RCV_FUNC_CQ = 0x00000006, |
| 141 | RST_FO_FRB = (1 << 12), | 142 | RST_FO_FRB = (1 << 12), |
| 142 | RST_FO_MOP = (1 << 13), | 143 | RST_FO_MOP = (1 << 13), |
| 143 | RST_FO_REG = (1 << 14), | 144 | RST_FO_REG = (1 << 14), |
| @@ -1477,7 +1478,6 @@ struct ql_adapter { | |||
| 1477 | u32 mailbox_in; | 1478 | u32 mailbox_in; |
| 1478 | u32 mailbox_out; | 1479 | u32 mailbox_out; |
| 1479 | struct mbox_params idc_mbc; | 1480 | struct mbox_params idc_mbc; |
| 1480 | struct mutex mpi_mutex; | ||
| 1481 | 1481 | ||
| 1482 | int tx_ring_size; | 1482 | int tx_ring_size; |
| 1483 | int rx_ring_size; | 1483 | int rx_ring_size; |
diff --git a/drivers/net/qlge/qlge_ethtool.c b/drivers/net/qlge/qlge_ethtool.c index 68f9bd280f86..52073946bce3 100644 --- a/drivers/net/qlge/qlge_ethtool.c +++ b/drivers/net/qlge/qlge_ethtool.c | |||
| @@ -45,7 +45,6 @@ static int ql_update_ring_coalescing(struct ql_adapter *qdev) | |||
| 45 | if (!netif_running(qdev->ndev)) | 45 | if (!netif_running(qdev->ndev)) |
| 46 | return status; | 46 | return status; |
| 47 | 47 | ||
| 48 | spin_lock(&qdev->hw_lock); | ||
| 49 | /* Skip the default queue, and update the outbound handler | 48 | /* Skip the default queue, and update the outbound handler |
| 50 | * queues if they changed. | 49 | * queues if they changed. |
| 51 | */ | 50 | */ |
| @@ -92,7 +91,6 @@ static int ql_update_ring_coalescing(struct ql_adapter *qdev) | |||
| 92 | } | 91 | } |
| 93 | } | 92 | } |
| 94 | exit: | 93 | exit: |
| 95 | spin_unlock(&qdev->hw_lock); | ||
| 96 | return status; | 94 | return status; |
| 97 | } | 95 | } |
| 98 | 96 | ||
diff --git a/drivers/net/qlge/qlge_main.c b/drivers/net/qlge/qlge_main.c index 3d0efea32111..61680715cde0 100644 --- a/drivers/net/qlge/qlge_main.c +++ b/drivers/net/qlge/qlge_main.c | |||
| @@ -34,7 +34,6 @@ | |||
| 34 | #include <linux/etherdevice.h> | 34 | #include <linux/etherdevice.h> |
| 35 | #include <linux/ethtool.h> | 35 | #include <linux/ethtool.h> |
| 36 | #include <linux/skbuff.h> | 36 | #include <linux/skbuff.h> |
| 37 | #include <linux/rtnetlink.h> | ||
| 38 | #include <linux/if_vlan.h> | 37 | #include <linux/if_vlan.h> |
| 39 | #include <linux/delay.h> | 38 | #include <linux/delay.h> |
| 40 | #include <linux/mm.h> | 39 | #include <linux/mm.h> |
| @@ -1926,12 +1925,10 @@ static void ql_vlan_rx_add_vid(struct net_device *ndev, u16 vid) | |||
| 1926 | status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK); | 1925 | status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK); |
| 1927 | if (status) | 1926 | if (status) |
| 1928 | return; | 1927 | return; |
| 1929 | spin_lock(&qdev->hw_lock); | ||
| 1930 | if (ql_set_mac_addr_reg | 1928 | if (ql_set_mac_addr_reg |
| 1931 | (qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) { | 1929 | (qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) { |
| 1932 | QPRINTK(qdev, IFUP, ERR, "Failed to init vlan address.\n"); | 1930 | QPRINTK(qdev, IFUP, ERR, "Failed to init vlan address.\n"); |
| 1933 | } | 1931 | } |
| 1934 | spin_unlock(&qdev->hw_lock); | ||
| 1935 | ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK); | 1932 | ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK); |
| 1936 | } | 1933 | } |
| 1937 | 1934 | ||
| @@ -1945,12 +1942,10 @@ static void ql_vlan_rx_kill_vid(struct net_device *ndev, u16 vid) | |||
| 1945 | if (status) | 1942 | if (status) |
| 1946 | return; | 1943 | return; |
| 1947 | 1944 | ||
| 1948 | spin_lock(&qdev->hw_lock); | ||
| 1949 | if (ql_set_mac_addr_reg | 1945 | if (ql_set_mac_addr_reg |
| 1950 | (qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) { | 1946 | (qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) { |
| 1951 | QPRINTK(qdev, IFUP, ERR, "Failed to clear vlan address.\n"); | 1947 | QPRINTK(qdev, IFUP, ERR, "Failed to clear vlan address.\n"); |
| 1952 | } | 1948 | } |
| 1953 | spin_unlock(&qdev->hw_lock); | ||
| 1954 | ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK); | 1949 | ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK); |
| 1955 | 1950 | ||
| 1956 | } | 1951 | } |
| @@ -2001,15 +1996,17 @@ static irqreturn_t qlge_isr(int irq, void *dev_id) | |||
| 2001 | /* | 1996 | /* |
| 2002 | * Check MPI processor activity. | 1997 | * Check MPI processor activity. |
| 2003 | */ | 1998 | */ |
| 2004 | if (var & STS_PI) { | 1999 | if ((var & STS_PI) && |
| 2000 | (ql_read32(qdev, INTR_MASK) & INTR_MASK_PI)) { | ||
| 2005 | /* | 2001 | /* |
| 2006 | * We've got an async event or mailbox completion. | 2002 | * We've got an async event or mailbox completion. |
| 2007 | * Handle it and clear the source of the interrupt. | 2003 | * Handle it and clear the source of the interrupt. |
| 2008 | */ | 2004 | */ |
| 2009 | QPRINTK(qdev, INTR, ERR, "Got MPI processor interrupt.\n"); | 2005 | QPRINTK(qdev, INTR, ERR, "Got MPI processor interrupt.\n"); |
| 2010 | ql_disable_completion_interrupt(qdev, intr_context->intr); | 2006 | ql_disable_completion_interrupt(qdev, intr_context->intr); |
| 2011 | queue_delayed_work_on(smp_processor_id(), qdev->workqueue, | 2007 | ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16)); |
| 2012 | &qdev->mpi_work, 0); | 2008 | queue_delayed_work_on(smp_processor_id(), |
| 2009 | qdev->workqueue, &qdev->mpi_work, 0); | ||
| 2013 | work_done++; | 2010 | work_done++; |
| 2014 | } | 2011 | } |
| 2015 | 2012 | ||
| @@ -3585,7 +3582,6 @@ static void qlge_set_multicast_list(struct net_device *ndev) | |||
| 3585 | status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK); | 3582 | status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK); |
| 3586 | if (status) | 3583 | if (status) |
| 3587 | return; | 3584 | return; |
| 3588 | spin_lock(&qdev->hw_lock); | ||
| 3589 | /* | 3585 | /* |
| 3590 | * Set or clear promiscuous mode if a | 3586 | * Set or clear promiscuous mode if a |
| 3591 | * transition is taking place. | 3587 | * transition is taking place. |
| @@ -3662,7 +3658,6 @@ static void qlge_set_multicast_list(struct net_device *ndev) | |||
| 3662 | } | 3658 | } |
| 3663 | } | 3659 | } |
| 3664 | exit: | 3660 | exit: |
| 3665 | spin_unlock(&qdev->hw_lock); | ||
| 3666 | ql_sem_unlock(qdev, SEM_RT_IDX_MASK); | 3661 | ql_sem_unlock(qdev, SEM_RT_IDX_MASK); |
| 3667 | } | 3662 | } |
| 3668 | 3663 | ||
| @@ -3682,10 +3677,8 @@ static int qlge_set_mac_address(struct net_device *ndev, void *p) | |||
| 3682 | status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK); | 3677 | status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK); |
| 3683 | if (status) | 3678 | if (status) |
| 3684 | return status; | 3679 | return status; |
| 3685 | spin_lock(&qdev->hw_lock); | ||
| 3686 | status = ql_set_mac_addr_reg(qdev, (u8 *) ndev->dev_addr, | 3680 | status = ql_set_mac_addr_reg(qdev, (u8 *) ndev->dev_addr, |
| 3687 | MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ); | 3681 | MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ); |
| 3688 | spin_unlock(&qdev->hw_lock); | ||
| 3689 | if (status) | 3682 | if (status) |
| 3690 | QPRINTK(qdev, HW, ERR, "Failed to load MAC address.\n"); | 3683 | QPRINTK(qdev, HW, ERR, "Failed to load MAC address.\n"); |
| 3691 | ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK); | 3684 | ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK); |
| @@ -3928,7 +3921,6 @@ static int __devinit ql_init_device(struct pci_dev *pdev, | |||
| 3928 | INIT_DELAYED_WORK(&qdev->mpi_work, ql_mpi_work); | 3921 | INIT_DELAYED_WORK(&qdev->mpi_work, ql_mpi_work); |
| 3929 | INIT_DELAYED_WORK(&qdev->mpi_port_cfg_work, ql_mpi_port_cfg_work); | 3922 | INIT_DELAYED_WORK(&qdev->mpi_port_cfg_work, ql_mpi_port_cfg_work); |
| 3930 | INIT_DELAYED_WORK(&qdev->mpi_idc_work, ql_mpi_idc_work); | 3923 | INIT_DELAYED_WORK(&qdev->mpi_idc_work, ql_mpi_idc_work); |
| 3931 | mutex_init(&qdev->mpi_mutex); | ||
| 3932 | init_completion(&qdev->ide_completion); | 3924 | init_completion(&qdev->ide_completion); |
| 3933 | 3925 | ||
| 3934 | if (!cards_found) { | 3926 | if (!cards_found) { |
diff --git a/drivers/net/qlge/qlge_mpi.c b/drivers/net/qlge/qlge_mpi.c index 6685bd97da91..c2e43073047e 100644 --- a/drivers/net/qlge/qlge_mpi.c +++ b/drivers/net/qlge/qlge_mpi.c | |||
| @@ -472,7 +472,6 @@ static int ql_mailbox_command(struct ql_adapter *qdev, struct mbox_params *mbcp) | |||
| 472 | { | 472 | { |
| 473 | int status, count; | 473 | int status, count; |
| 474 | 474 | ||
| 475 | mutex_lock(&qdev->mpi_mutex); | ||
| 476 | 475 | ||
| 477 | /* Begin polled mode for MPI */ | 476 | /* Begin polled mode for MPI */ |
| 478 | ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16)); | 477 | ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16)); |
| @@ -541,7 +540,6 @@ static int ql_mailbox_command(struct ql_adapter *qdev, struct mbox_params *mbcp) | |||
| 541 | status = -EIO; | 540 | status = -EIO; |
| 542 | } | 541 | } |
| 543 | end: | 542 | end: |
| 544 | mutex_unlock(&qdev->mpi_mutex); | ||
| 545 | /* End polled mode for MPI */ | 543 | /* End polled mode for MPI */ |
| 546 | ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI); | 544 | ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI); |
| 547 | return status; | 545 | return status; |
| @@ -776,7 +774,9 @@ static int ql_idc_wait(struct ql_adapter *qdev) | |||
| 776 | static int ql_set_port_cfg(struct ql_adapter *qdev) | 774 | static int ql_set_port_cfg(struct ql_adapter *qdev) |
| 777 | { | 775 | { |
| 778 | int status; | 776 | int status; |
| 777 | rtnl_lock(); | ||
| 779 | status = ql_mb_set_port_cfg(qdev); | 778 | status = ql_mb_set_port_cfg(qdev); |
| 779 | rtnl_unlock(); | ||
| 780 | if (status) | 780 | if (status) |
| 781 | return status; | 781 | return status; |
| 782 | status = ql_idc_wait(qdev); | 782 | status = ql_idc_wait(qdev); |
| @@ -797,7 +797,9 @@ void ql_mpi_port_cfg_work(struct work_struct *work) | |||
| 797 | container_of(work, struct ql_adapter, mpi_port_cfg_work.work); | 797 | container_of(work, struct ql_adapter, mpi_port_cfg_work.work); |
| 798 | int status; | 798 | int status; |
| 799 | 799 | ||
| 800 | rtnl_lock(); | ||
| 800 | status = ql_mb_get_port_cfg(qdev); | 801 | status = ql_mb_get_port_cfg(qdev); |
| 802 | rtnl_unlock(); | ||
| 801 | if (status) { | 803 | if (status) { |
| 802 | QPRINTK(qdev, DRV, ERR, | 804 | QPRINTK(qdev, DRV, ERR, |
| 803 | "Bug: Failed to get port config data.\n"); | 805 | "Bug: Failed to get port config data.\n"); |
| @@ -855,7 +857,9 @@ void ql_mpi_idc_work(struct work_struct *work) | |||
| 855 | * needs to be set. | 857 | * needs to be set. |
| 856 | * */ | 858 | * */ |
| 857 | set_bit(QL_CAM_RT_SET, &qdev->flags); | 859 | set_bit(QL_CAM_RT_SET, &qdev->flags); |
| 860 | rtnl_lock(); | ||
| 858 | status = ql_mb_idc_ack(qdev); | 861 | status = ql_mb_idc_ack(qdev); |
| 862 | rtnl_unlock(); | ||
| 859 | if (status) { | 863 | if (status) { |
| 860 | QPRINTK(qdev, DRV, ERR, | 864 | QPRINTK(qdev, DRV, ERR, |
| 861 | "Bug: No pending IDC!\n"); | 865 | "Bug: No pending IDC!\n"); |
| @@ -871,7 +875,7 @@ void ql_mpi_work(struct work_struct *work) | |||
| 871 | struct mbox_params *mbcp = &mbc; | 875 | struct mbox_params *mbcp = &mbc; |
| 872 | int err = 0; | 876 | int err = 0; |
| 873 | 877 | ||
| 874 | mutex_lock(&qdev->mpi_mutex); | 878 | rtnl_lock(); |
| 875 | 879 | ||
| 876 | while (ql_read32(qdev, STS) & STS_PI) { | 880 | while (ql_read32(qdev, STS) & STS_PI) { |
| 877 | memset(mbcp, 0, sizeof(struct mbox_params)); | 881 | memset(mbcp, 0, sizeof(struct mbox_params)); |
| @@ -884,7 +888,7 @@ void ql_mpi_work(struct work_struct *work) | |||
| 884 | break; | 888 | break; |
| 885 | } | 889 | } |
| 886 | 890 | ||
| 887 | mutex_unlock(&qdev->mpi_mutex); | 891 | rtnl_unlock(); |
| 888 | ql_enable_completion_interrupt(qdev, 0); | 892 | ql_enable_completion_interrupt(qdev, 0); |
| 889 | } | 893 | } |
| 890 | 894 | ||
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c index f09bc5dfe8b2..ba5d3fe753b6 100644 --- a/drivers/net/tg3.c +++ b/drivers/net/tg3.c | |||
| @@ -902,11 +902,12 @@ static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg) | |||
| 902 | struct tg3 *tp = bp->priv; | 902 | struct tg3 *tp = bp->priv; |
| 903 | u32 val; | 903 | u32 val; |
| 904 | 904 | ||
| 905 | if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_PAUSED) | 905 | spin_lock_bh(&tp->lock); |
| 906 | return -EAGAIN; | ||
| 907 | 906 | ||
| 908 | if (tg3_readphy(tp, reg, &val)) | 907 | if (tg3_readphy(tp, reg, &val)) |
| 909 | return -EIO; | 908 | val = -EIO; |
| 909 | |||
| 910 | spin_unlock_bh(&tp->lock); | ||
| 910 | 911 | ||
| 911 | return val; | 912 | return val; |
| 912 | } | 913 | } |
| @@ -914,14 +915,16 @@ static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg) | |||
| 914 | static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val) | 915 | static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val) |
| 915 | { | 916 | { |
| 916 | struct tg3 *tp = bp->priv; | 917 | struct tg3 *tp = bp->priv; |
| 918 | u32 ret = 0; | ||
| 917 | 919 | ||
| 918 | if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_PAUSED) | 920 | spin_lock_bh(&tp->lock); |
| 919 | return -EAGAIN; | ||
| 920 | 921 | ||
| 921 | if (tg3_writephy(tp, reg, val)) | 922 | if (tg3_writephy(tp, reg, val)) |
| 922 | return -EIO; | 923 | ret = -EIO; |
| 923 | 924 | ||
| 924 | return 0; | 925 | spin_unlock_bh(&tp->lock); |
| 926 | |||
| 927 | return ret; | ||
| 925 | } | 928 | } |
| 926 | 929 | ||
| 927 | static int tg3_mdio_reset(struct mii_bus *bp) | 930 | static int tg3_mdio_reset(struct mii_bus *bp) |
| @@ -1011,12 +1014,6 @@ static void tg3_mdio_config_5785(struct tg3 *tp) | |||
| 1011 | 1014 | ||
| 1012 | static void tg3_mdio_start(struct tg3 *tp) | 1015 | static void tg3_mdio_start(struct tg3 *tp) |
| 1013 | { | 1016 | { |
| 1014 | if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) { | ||
| 1015 | mutex_lock(&tp->mdio_bus->mdio_lock); | ||
| 1016 | tp->tg3_flags3 &= ~TG3_FLG3_MDIOBUS_PAUSED; | ||
| 1017 | mutex_unlock(&tp->mdio_bus->mdio_lock); | ||
| 1018 | } | ||
| 1019 | |||
| 1020 | tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL; | 1017 | tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL; |
| 1021 | tw32_f(MAC_MI_MODE, tp->mi_mode); | 1018 | tw32_f(MAC_MI_MODE, tp->mi_mode); |
| 1022 | udelay(80); | 1019 | udelay(80); |
| @@ -1041,15 +1038,6 @@ static void tg3_mdio_start(struct tg3 *tp) | |||
| 1041 | tg3_mdio_config_5785(tp); | 1038 | tg3_mdio_config_5785(tp); |
| 1042 | } | 1039 | } |
| 1043 | 1040 | ||
| 1044 | static void tg3_mdio_stop(struct tg3 *tp) | ||
| 1045 | { | ||
| 1046 | if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) { | ||
| 1047 | mutex_lock(&tp->mdio_bus->mdio_lock); | ||
| 1048 | tp->tg3_flags3 |= TG3_FLG3_MDIOBUS_PAUSED; | ||
| 1049 | mutex_unlock(&tp->mdio_bus->mdio_lock); | ||
| 1050 | } | ||
| 1051 | } | ||
| 1052 | |||
| 1053 | static int tg3_mdio_init(struct tg3 *tp) | 1041 | static int tg3_mdio_init(struct tg3 *tp) |
| 1054 | { | 1042 | { |
| 1055 | int i; | 1043 | int i; |
| @@ -1141,7 +1129,6 @@ static void tg3_mdio_fini(struct tg3 *tp) | |||
| 1141 | tp->tg3_flags3 &= ~TG3_FLG3_MDIOBUS_INITED; | 1129 | tp->tg3_flags3 &= ~TG3_FLG3_MDIOBUS_INITED; |
| 1142 | mdiobus_unregister(tp->mdio_bus); | 1130 | mdiobus_unregister(tp->mdio_bus); |
| 1143 | mdiobus_free(tp->mdio_bus); | 1131 | mdiobus_free(tp->mdio_bus); |
| 1144 | tp->tg3_flags3 &= ~TG3_FLG3_MDIOBUS_PAUSED; | ||
| 1145 | } | 1132 | } |
| 1146 | } | 1133 | } |
| 1147 | 1134 | ||
| @@ -1363,7 +1350,7 @@ static void tg3_adjust_link(struct net_device *dev) | |||
| 1363 | struct tg3 *tp = netdev_priv(dev); | 1350 | struct tg3 *tp = netdev_priv(dev); |
| 1364 | struct phy_device *phydev = tp->mdio_bus->phy_map[PHY_ADDR]; | 1351 | struct phy_device *phydev = tp->mdio_bus->phy_map[PHY_ADDR]; |
| 1365 | 1352 | ||
| 1366 | spin_lock(&tp->lock); | 1353 | spin_lock_bh(&tp->lock); |
| 1367 | 1354 | ||
| 1368 | mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK | | 1355 | mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK | |
| 1369 | MAC_MODE_HALF_DUPLEX); | 1356 | MAC_MODE_HALF_DUPLEX); |
| @@ -1431,7 +1418,7 @@ static void tg3_adjust_link(struct net_device *dev) | |||
| 1431 | tp->link_config.active_speed = phydev->speed; | 1418 | tp->link_config.active_speed = phydev->speed; |
| 1432 | tp->link_config.active_duplex = phydev->duplex; | 1419 | tp->link_config.active_duplex = phydev->duplex; |
| 1433 | 1420 | ||
| 1434 | spin_unlock(&tp->lock); | 1421 | spin_unlock_bh(&tp->lock); |
| 1435 | 1422 | ||
| 1436 | if (linkmesg) | 1423 | if (linkmesg) |
| 1437 | tg3_link_report(tp); | 1424 | tg3_link_report(tp); |
| @@ -6392,8 +6379,6 @@ static int tg3_chip_reset(struct tg3 *tp) | |||
| 6392 | 6379 | ||
| 6393 | tg3_nvram_lock(tp); | 6380 | tg3_nvram_lock(tp); |
| 6394 | 6381 | ||
| 6395 | tg3_mdio_stop(tp); | ||
| 6396 | |||
| 6397 | tg3_ape_lock(tp, TG3_APE_LOCK_GRC); | 6382 | tg3_ape_lock(tp, TG3_APE_LOCK_GRC); |
| 6398 | 6383 | ||
| 6399 | /* No matching tg3_nvram_unlock() after this because | 6384 | /* No matching tg3_nvram_unlock() after this because |
| @@ -8698,6 +8683,8 @@ static int tg3_close(struct net_device *dev) | |||
| 8698 | 8683 | ||
| 8699 | del_timer_sync(&tp->timer); | 8684 | del_timer_sync(&tp->timer); |
| 8700 | 8685 | ||
| 8686 | tg3_phy_stop(tp); | ||
| 8687 | |||
| 8701 | tg3_full_lock(tp, 1); | 8688 | tg3_full_lock(tp, 1); |
| 8702 | #if 0 | 8689 | #if 0 |
| 8703 | tg3_dump_state(tp); | 8690 | tg3_dump_state(tp); |
diff --git a/drivers/net/tg3.h b/drivers/net/tg3.h index 524691cd9896..bab7940158e6 100644 --- a/drivers/net/tg3.h +++ b/drivers/net/tg3.h | |||
| @@ -2748,7 +2748,6 @@ struct tg3 { | |||
| 2748 | #define TG3_FLG3_5701_DMA_BUG 0x00000008 | 2748 | #define TG3_FLG3_5701_DMA_BUG 0x00000008 |
| 2749 | #define TG3_FLG3_USE_PHYLIB 0x00000010 | 2749 | #define TG3_FLG3_USE_PHYLIB 0x00000010 |
| 2750 | #define TG3_FLG3_MDIOBUS_INITED 0x00000020 | 2750 | #define TG3_FLG3_MDIOBUS_INITED 0x00000020 |
| 2751 | #define TG3_FLG3_MDIOBUS_PAUSED 0x00000040 | ||
| 2752 | #define TG3_FLG3_PHY_CONNECTED 0x00000080 | 2751 | #define TG3_FLG3_PHY_CONNECTED 0x00000080 |
| 2753 | #define TG3_FLG3_RGMII_STD_IBND_DISABLE 0x00000100 | 2752 | #define TG3_FLG3_RGMII_STD_IBND_DISABLE 0x00000100 |
| 2754 | #define TG3_FLG3_RGMII_EXT_IBND_RX_EN 0x00000200 | 2753 | #define TG3_FLG3_RGMII_EXT_IBND_RX_EN 0x00000200 |
diff --git a/drivers/net/usb/rndis_host.c b/drivers/net/usb/rndis_host.c index d032bba9bc4c..0caa8008c51c 100644 --- a/drivers/net/usb/rndis_host.c +++ b/drivers/net/usb/rndis_host.c | |||
| @@ -418,6 +418,7 @@ generic_rndis_bind(struct usbnet *dev, struct usb_interface *intf, int flags) | |||
| 418 | goto halt_fail_and_release; | 418 | goto halt_fail_and_release; |
| 419 | } | 419 | } |
| 420 | memcpy(net->dev_addr, bp, ETH_ALEN); | 420 | memcpy(net->dev_addr, bp, ETH_ALEN); |
| 421 | memcpy(net->perm_addr, bp, ETH_ALEN); | ||
| 421 | 422 | ||
| 422 | /* set a nonzero filter to enable data transfers */ | 423 | /* set a nonzero filter to enable data transfers */ |
| 423 | memset(u.set, 0, sizeof *u.set); | 424 | memset(u.set, 0, sizeof *u.set); |
diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c index afdbdaaf80cb..a2a742c8ff7e 100644 --- a/drivers/platform/x86/sony-laptop.c +++ b/drivers/platform/x86/sony-laptop.c | |||
| @@ -1211,15 +1211,6 @@ static int sony_nc_add(struct acpi_device *device) | |||
| 1211 | } | 1211 | } |
| 1212 | } | 1212 | } |
| 1213 | 1213 | ||
| 1214 | /* try to _INI the device if such method exists (ACPI spec 3.0-6.5.1 | ||
| 1215 | * should be respected as we already checked for the device presence above */ | ||
| 1216 | if (ACPI_SUCCESS(acpi_get_handle(sony_nc_acpi_handle, METHOD_NAME__INI, &handle))) { | ||
| 1217 | dprintk("Invoking _INI\n"); | ||
| 1218 | if (ACPI_FAILURE(acpi_evaluate_object(sony_nc_acpi_handle, METHOD_NAME__INI, | ||
| 1219 | NULL, NULL))) | ||
| 1220 | dprintk("_INI Method failed\n"); | ||
| 1221 | } | ||
| 1222 | |||
| 1223 | if (ACPI_SUCCESS(acpi_get_handle(sony_nc_acpi_handle, "ECON", | 1214 | if (ACPI_SUCCESS(acpi_get_handle(sony_nc_acpi_handle, "ECON", |
| 1224 | &handle))) { | 1215 | &handle))) { |
| 1225 | if (acpi_callsetfunc(sony_nc_acpi_handle, "ECON", 1, NULL)) | 1216 | if (acpi_callsetfunc(sony_nc_acpi_handle, "ECON", 1, NULL)) |
| @@ -1399,27 +1390,20 @@ struct sonypi_eventtypes { | |||
| 1399 | struct sonypi_event *events; | 1390 | struct sonypi_event *events; |
| 1400 | }; | 1391 | }; |
| 1401 | 1392 | ||
| 1402 | struct device_ctrl { | 1393 | struct sony_pic_dev { |
| 1394 | struct acpi_device *acpi_dev; | ||
| 1395 | struct sony_pic_irq *cur_irq; | ||
| 1396 | struct sony_pic_ioport *cur_ioport; | ||
| 1397 | struct list_head interrupts; | ||
| 1398 | struct list_head ioports; | ||
| 1399 | struct mutex lock; | ||
| 1400 | struct sonypi_eventtypes *event_types; | ||
| 1401 | int (*handle_irq)(const u8, const u8); | ||
| 1403 | int model; | 1402 | int model; |
| 1404 | int (*handle_irq)(const u8, const u8); | ||
| 1405 | u16 evport_offset; | 1403 | u16 evport_offset; |
| 1406 | u8 has_camera; | 1404 | u8 camera_power; |
| 1407 | u8 has_bluetooth; | 1405 | u8 bluetooth_power; |
| 1408 | u8 has_wwan; | 1406 | u8 wwan_power; |
| 1409 | struct sonypi_eventtypes *event_types; | ||
| 1410 | }; | ||
| 1411 | |||
| 1412 | struct sony_pic_dev { | ||
| 1413 | struct device_ctrl *control; | ||
| 1414 | struct acpi_device *acpi_dev; | ||
| 1415 | struct sony_pic_irq *cur_irq; | ||
| 1416 | struct sony_pic_ioport *cur_ioport; | ||
| 1417 | struct list_head interrupts; | ||
| 1418 | struct list_head ioports; | ||
| 1419 | struct mutex lock; | ||
| 1420 | u8 camera_power; | ||
| 1421 | u8 bluetooth_power; | ||
| 1422 | u8 wwan_power; | ||
| 1423 | }; | 1407 | }; |
| 1424 | 1408 | ||
| 1425 | static struct sony_pic_dev spic_dev = { | 1409 | static struct sony_pic_dev spic_dev = { |
| @@ -1427,6 +1411,8 @@ static struct sony_pic_dev spic_dev = { | |||
| 1427 | .ioports = LIST_HEAD_INIT(spic_dev.ioports), | 1411 | .ioports = LIST_HEAD_INIT(spic_dev.ioports), |
| 1428 | }; | 1412 | }; |
| 1429 | 1413 | ||
| 1414 | static int spic_drv_registered; | ||
| 1415 | |||
| 1430 | /* Event masks */ | 1416 | /* Event masks */ |
| 1431 | #define SONYPI_JOGGER_MASK 0x00000001 | 1417 | #define SONYPI_JOGGER_MASK 0x00000001 |
| 1432 | #define SONYPI_CAPTURE_MASK 0x00000002 | 1418 | #define SONYPI_CAPTURE_MASK 0x00000002 |
| @@ -1724,27 +1710,6 @@ static int type3_handle_irq(const u8 data_mask, const u8 ev) | |||
| 1724 | return 1; | 1710 | return 1; |
| 1725 | } | 1711 | } |
| 1726 | 1712 | ||
| 1727 | static struct device_ctrl spic_types[] = { | ||
| 1728 | { | ||
| 1729 | .model = SONYPI_DEVICE_TYPE1, | ||
| 1730 | .handle_irq = NULL, | ||
| 1731 | .evport_offset = SONYPI_TYPE1_OFFSET, | ||
| 1732 | .event_types = type1_events, | ||
| 1733 | }, | ||
| 1734 | { | ||
| 1735 | .model = SONYPI_DEVICE_TYPE2, | ||
| 1736 | .handle_irq = NULL, | ||
| 1737 | .evport_offset = SONYPI_TYPE2_OFFSET, | ||
| 1738 | .event_types = type2_events, | ||
| 1739 | }, | ||
| 1740 | { | ||
| 1741 | .model = SONYPI_DEVICE_TYPE3, | ||
| 1742 | .handle_irq = type3_handle_irq, | ||
| 1743 | .evport_offset = SONYPI_TYPE3_OFFSET, | ||
| 1744 | .event_types = type3_events, | ||
| 1745 | }, | ||
| 1746 | }; | ||
| 1747 | |||
| 1748 | static void sony_pic_detect_device_type(struct sony_pic_dev *dev) | 1713 | static void sony_pic_detect_device_type(struct sony_pic_dev *dev) |
| 1749 | { | 1714 | { |
| 1750 | struct pci_dev *pcidev; | 1715 | struct pci_dev *pcidev; |
| @@ -1752,48 +1717,63 @@ static void sony_pic_detect_device_type(struct sony_pic_dev *dev) | |||
| 1752 | pcidev = pci_get_device(PCI_VENDOR_ID_INTEL, | 1717 | pcidev = pci_get_device(PCI_VENDOR_ID_INTEL, |
| 1753 | PCI_DEVICE_ID_INTEL_82371AB_3, NULL); | 1718 | PCI_DEVICE_ID_INTEL_82371AB_3, NULL); |
| 1754 | if (pcidev) { | 1719 | if (pcidev) { |
| 1755 | dev->control = &spic_types[0]; | 1720 | dev->model = SONYPI_DEVICE_TYPE1; |
| 1721 | dev->evport_offset = SONYPI_TYPE1_OFFSET; | ||
| 1722 | dev->event_types = type1_events; | ||
| 1756 | goto out; | 1723 | goto out; |
| 1757 | } | 1724 | } |
| 1758 | 1725 | ||
| 1759 | pcidev = pci_get_device(PCI_VENDOR_ID_INTEL, | 1726 | pcidev = pci_get_device(PCI_VENDOR_ID_INTEL, |
| 1760 | PCI_DEVICE_ID_INTEL_ICH6_1, NULL); | 1727 | PCI_DEVICE_ID_INTEL_ICH6_1, NULL); |
| 1761 | if (pcidev) { | 1728 | if (pcidev) { |
| 1762 | dev->control = &spic_types[2]; | 1729 | dev->model = SONYPI_DEVICE_TYPE2; |
| 1730 | dev->evport_offset = SONYPI_TYPE2_OFFSET; | ||
| 1731 | dev->event_types = type2_events; | ||
| 1763 | goto out; | 1732 | goto out; |
| 1764 | } | 1733 | } |
| 1765 | 1734 | ||
| 1766 | pcidev = pci_get_device(PCI_VENDOR_ID_INTEL, | 1735 | pcidev = pci_get_device(PCI_VENDOR_ID_INTEL, |
| 1767 | PCI_DEVICE_ID_INTEL_ICH7_1, NULL); | 1736 | PCI_DEVICE_ID_INTEL_ICH7_1, NULL); |
| 1768 | if (pcidev) { | 1737 | if (pcidev) { |
| 1769 | dev->control = &spic_types[2]; | 1738 | dev->model = SONYPI_DEVICE_TYPE3; |
| 1739 | dev->handle_irq = type3_handle_irq; | ||
| 1740 | dev->evport_offset = SONYPI_TYPE3_OFFSET; | ||
| 1741 | dev->event_types = type3_events; | ||
| 1770 | goto out; | 1742 | goto out; |
| 1771 | } | 1743 | } |
| 1772 | 1744 | ||
| 1773 | pcidev = pci_get_device(PCI_VENDOR_ID_INTEL, | 1745 | pcidev = pci_get_device(PCI_VENDOR_ID_INTEL, |
| 1774 | PCI_DEVICE_ID_INTEL_ICH8_4, NULL); | 1746 | PCI_DEVICE_ID_INTEL_ICH8_4, NULL); |
| 1775 | if (pcidev) { | 1747 | if (pcidev) { |
| 1776 | dev->control = &spic_types[2]; | 1748 | dev->model = SONYPI_DEVICE_TYPE3; |
| 1749 | dev->handle_irq = type3_handle_irq; | ||
| 1750 | dev->evport_offset = SONYPI_TYPE3_OFFSET; | ||
| 1751 | dev->event_types = type3_events; | ||
| 1777 | goto out; | 1752 | goto out; |
| 1778 | } | 1753 | } |
| 1779 | 1754 | ||
| 1780 | pcidev = pci_get_device(PCI_VENDOR_ID_INTEL, | 1755 | pcidev = pci_get_device(PCI_VENDOR_ID_INTEL, |
| 1781 | PCI_DEVICE_ID_INTEL_ICH9_1, NULL); | 1756 | PCI_DEVICE_ID_INTEL_ICH9_1, NULL); |
| 1782 | if (pcidev) { | 1757 | if (pcidev) { |
| 1783 | dev->control = &spic_types[2]; | 1758 | dev->model = SONYPI_DEVICE_TYPE3; |
| 1759 | dev->handle_irq = type3_handle_irq; | ||
| 1760 | dev->evport_offset = SONYPI_TYPE3_OFFSET; | ||
| 1761 | dev->event_types = type3_events; | ||
| 1784 | goto out; | 1762 | goto out; |
| 1785 | } | 1763 | } |
| 1786 | 1764 | ||
| 1787 | /* default */ | 1765 | /* default */ |
| 1788 | dev->control = &spic_types[1]; | 1766 | dev->model = SONYPI_DEVICE_TYPE2; |
| 1767 | dev->evport_offset = SONYPI_TYPE2_OFFSET; | ||
| 1768 | dev->event_types = type2_events; | ||
| 1789 | 1769 | ||
| 1790 | out: | 1770 | out: |
| 1791 | if (pcidev) | 1771 | if (pcidev) |
| 1792 | pci_dev_put(pcidev); | 1772 | pci_dev_put(pcidev); |
| 1793 | 1773 | ||
| 1794 | printk(KERN_INFO DRV_PFX "detected Type%d model\n", | 1774 | printk(KERN_INFO DRV_PFX "detected Type%d model\n", |
| 1795 | dev->control->model == SONYPI_DEVICE_TYPE1 ? 1 : | 1775 | dev->model == SONYPI_DEVICE_TYPE1 ? 1 : |
| 1796 | dev->control->model == SONYPI_DEVICE_TYPE2 ? 2 : 3); | 1776 | dev->model == SONYPI_DEVICE_TYPE2 ? 2 : 3); |
| 1797 | } | 1777 | } |
| 1798 | 1778 | ||
| 1799 | /* camera tests and poweron/poweroff */ | 1779 | /* camera tests and poweron/poweroff */ |
| @@ -2566,7 +2546,7 @@ static int sony_pic_enable(struct acpi_device *device, | |||
| 2566 | buffer.pointer = resource; | 2546 | buffer.pointer = resource; |
| 2567 | 2547 | ||
| 2568 | /* setup Type 1 resources */ | 2548 | /* setup Type 1 resources */ |
| 2569 | if (spic_dev.control->model == SONYPI_DEVICE_TYPE1) { | 2549 | if (spic_dev.model == SONYPI_DEVICE_TYPE1) { |
| 2570 | 2550 | ||
| 2571 | /* setup io resources */ | 2551 | /* setup io resources */ |
| 2572 | resource->res1.type = ACPI_RESOURCE_TYPE_IO; | 2552 | resource->res1.type = ACPI_RESOURCE_TYPE_IO; |
| @@ -2649,29 +2629,28 @@ static irqreturn_t sony_pic_irq(int irq, void *dev_id) | |||
| 2649 | data_mask = inb_p(dev->cur_ioport->io2.minimum); | 2629 | data_mask = inb_p(dev->cur_ioport->io2.minimum); |
| 2650 | else | 2630 | else |
| 2651 | data_mask = inb_p(dev->cur_ioport->io1.minimum + | 2631 | data_mask = inb_p(dev->cur_ioport->io1.minimum + |
| 2652 | dev->control->evport_offset); | 2632 | dev->evport_offset); |
| 2653 | 2633 | ||
| 2654 | dprintk("event ([%.2x] [%.2x]) at port 0x%.4x(+0x%.2x)\n", | 2634 | dprintk("event ([%.2x] [%.2x]) at port 0x%.4x(+0x%.2x)\n", |
| 2655 | ev, data_mask, dev->cur_ioport->io1.minimum, | 2635 | ev, data_mask, dev->cur_ioport->io1.minimum, |
| 2656 | dev->control->evport_offset); | 2636 | dev->evport_offset); |
| 2657 | 2637 | ||
| 2658 | if (ev == 0x00 || ev == 0xff) | 2638 | if (ev == 0x00 || ev == 0xff) |
| 2659 | return IRQ_HANDLED; | 2639 | return IRQ_HANDLED; |
| 2660 | 2640 | ||
| 2661 | for (i = 0; dev->control->event_types[i].mask; i++) { | 2641 | for (i = 0; dev->event_types[i].mask; i++) { |
| 2662 | 2642 | ||
| 2663 | if ((data_mask & dev->control->event_types[i].data) != | 2643 | if ((data_mask & dev->event_types[i].data) != |
| 2664 | dev->control->event_types[i].data) | 2644 | dev->event_types[i].data) |
| 2665 | continue; | 2645 | continue; |
| 2666 | 2646 | ||
| 2667 | if (!(mask & dev->control->event_types[i].mask)) | 2647 | if (!(mask & dev->event_types[i].mask)) |
| 2668 | continue; | 2648 | continue; |
| 2669 | 2649 | ||
| 2670 | for (j = 0; dev->control->event_types[i].events[j].event; j++) { | 2650 | for (j = 0; dev->event_types[i].events[j].event; j++) { |
| 2671 | if (ev == dev->control->event_types[i].events[j].data) { | 2651 | if (ev == dev->event_types[i].events[j].data) { |
| 2672 | device_event = | 2652 | device_event = |
| 2673 | dev->control-> | 2653 | dev->event_types[i].events[j].event; |
| 2674 | event_types[i].events[j].event; | ||
| 2675 | goto found; | 2654 | goto found; |
| 2676 | } | 2655 | } |
| 2677 | } | 2656 | } |
| @@ -2679,13 +2658,12 @@ static irqreturn_t sony_pic_irq(int irq, void *dev_id) | |||
| 2679 | /* Still not able to decode the event try to pass | 2658 | /* Still not able to decode the event try to pass |
| 2680 | * it over to the minidriver | 2659 | * it over to the minidriver |
| 2681 | */ | 2660 | */ |
| 2682 | if (dev->control->handle_irq && | 2661 | if (dev->handle_irq && dev->handle_irq(data_mask, ev) == 0) |
| 2683 | dev->control->handle_irq(data_mask, ev) == 0) | ||
| 2684 | return IRQ_HANDLED; | 2662 | return IRQ_HANDLED; |
| 2685 | 2663 | ||
| 2686 | dprintk("unknown event ([%.2x] [%.2x]) at port 0x%.4x(+0x%.2x)\n", | 2664 | dprintk("unknown event ([%.2x] [%.2x]) at port 0x%.4x(+0x%.2x)\n", |
| 2687 | ev, data_mask, dev->cur_ioport->io1.minimum, | 2665 | ev, data_mask, dev->cur_ioport->io1.minimum, |
| 2688 | dev->control->evport_offset); | 2666 | dev->evport_offset); |
| 2689 | return IRQ_HANDLED; | 2667 | return IRQ_HANDLED; |
| 2690 | 2668 | ||
| 2691 | found: | 2669 | found: |
| @@ -2816,7 +2794,7 @@ static int sony_pic_add(struct acpi_device *device) | |||
| 2816 | /* request IRQ */ | 2794 | /* request IRQ */ |
| 2817 | list_for_each_entry_reverse(irq, &spic_dev.interrupts, list) { | 2795 | list_for_each_entry_reverse(irq, &spic_dev.interrupts, list) { |
| 2818 | if (!request_irq(irq->irq.interrupts[0], sony_pic_irq, | 2796 | if (!request_irq(irq->irq.interrupts[0], sony_pic_irq, |
| 2819 | IRQF_SHARED, "sony-laptop", &spic_dev)) { | 2797 | IRQF_DISABLED, "sony-laptop", &spic_dev)) { |
| 2820 | dprintk("IRQ: %d - triggering: %d - " | 2798 | dprintk("IRQ: %d - triggering: %d - " |
| 2821 | "polarity: %d - shr: %d\n", | 2799 | "polarity: %d - shr: %d\n", |
| 2822 | irq->irq.interrupts[0], | 2800 | irq->irq.interrupts[0], |
| @@ -2949,6 +2927,7 @@ static int __init sony_laptop_init(void) | |||
| 2949 | "Unable to register SPIC driver."); | 2927 | "Unable to register SPIC driver."); |
| 2950 | goto out; | 2928 | goto out; |
| 2951 | } | 2929 | } |
| 2930 | spic_drv_registered = 1; | ||
| 2952 | } | 2931 | } |
| 2953 | 2932 | ||
| 2954 | result = acpi_bus_register_driver(&sony_nc_driver); | 2933 | result = acpi_bus_register_driver(&sony_nc_driver); |
| @@ -2960,7 +2939,7 @@ static int __init sony_laptop_init(void) | |||
| 2960 | return 0; | 2939 | return 0; |
| 2961 | 2940 | ||
| 2962 | out_unregister_pic: | 2941 | out_unregister_pic: |
| 2963 | if (!no_spic) | 2942 | if (spic_drv_registered) |
| 2964 | acpi_bus_unregister_driver(&sony_pic_driver); | 2943 | acpi_bus_unregister_driver(&sony_pic_driver); |
| 2965 | out: | 2944 | out: |
| 2966 | return result; | 2945 | return result; |
| @@ -2969,7 +2948,7 @@ out: | |||
| 2969 | static void __exit sony_laptop_exit(void) | 2948 | static void __exit sony_laptop_exit(void) |
| 2970 | { | 2949 | { |
| 2971 | acpi_bus_unregister_driver(&sony_nc_driver); | 2950 | acpi_bus_unregister_driver(&sony_nc_driver); |
| 2972 | if (!no_spic) | 2951 | if (spic_drv_registered) |
| 2973 | acpi_bus_unregister_driver(&sony_pic_driver); | 2952 | acpi_bus_unregister_driver(&sony_pic_driver); |
| 2974 | } | 2953 | } |
| 2975 | 2954 | ||
diff --git a/drivers/serial/serial_cs.c b/drivers/serial/serial_cs.c index a3bb49031a7f..ff4617e21426 100644 --- a/drivers/serial/serial_cs.c +++ b/drivers/serial/serial_cs.c | |||
| @@ -873,10 +873,10 @@ static struct pcmcia_device_id serial_ids[] = { | |||
| 873 | PCMCIA_PFC_DEVICE_CIS_PROD_ID12(1, "Psion Dacom", "Gold Card V34 Ethernet", 0xf5f025c2, 0x338e8155, "cis/PCMLM28.cis"), | 873 | PCMCIA_PFC_DEVICE_CIS_PROD_ID12(1, "Psion Dacom", "Gold Card V34 Ethernet", 0xf5f025c2, 0x338e8155, "cis/PCMLM28.cis"), |
| 874 | PCMCIA_PFC_DEVICE_CIS_PROD_ID12(1, "Psion Dacom", "Gold Card V34 Ethernet GSM", 0xf5f025c2, 0x4ae85d35, "cis/PCMLM28.cis"), | 874 | PCMCIA_PFC_DEVICE_CIS_PROD_ID12(1, "Psion Dacom", "Gold Card V34 Ethernet GSM", 0xf5f025c2, 0x4ae85d35, "cis/PCMLM28.cis"), |
| 875 | PCMCIA_PFC_DEVICE_CIS_PROD_ID12(1, "LINKSYS", "PCMLM28", 0xf7cb0b07, 0x66881874, "cis/PCMLM28.cis"), | 875 | PCMCIA_PFC_DEVICE_CIS_PROD_ID12(1, "LINKSYS", "PCMLM28", 0xf7cb0b07, 0x66881874, "cis/PCMLM28.cis"), |
| 876 | PCMCIA_MFC_DEVICE_CIS_PROD_ID12(1, "DAYNA COMMUNICATIONS", "LAN AND MODEM MULTIFUNCTION", 0x8fdf8f89, 0xdd5ed9e8, "DP83903.cis"), | 876 | PCMCIA_MFC_DEVICE_CIS_PROD_ID12(1, "DAYNA COMMUNICATIONS", "LAN AND MODEM MULTIFUNCTION", 0x8fdf8f89, 0xdd5ed9e8, "cis/DP83903.cis"), |
| 877 | PCMCIA_MFC_DEVICE_CIS_PROD_ID4(1, "NSC MF LAN/Modem", 0x58fc6056, "DP83903.cis"), | 877 | PCMCIA_MFC_DEVICE_CIS_PROD_ID4(1, "NSC MF LAN/Modem", 0x58fc6056, "cis/DP83903.cis"), |
| 878 | PCMCIA_MFC_DEVICE_CIS_MANF_CARD(1, 0x0101, 0x0556, "cis/3CCFEM556.cis"), | 878 | PCMCIA_MFC_DEVICE_CIS_MANF_CARD(1, 0x0101, 0x0556, "cis/3CCFEM556.cis"), |
| 879 | PCMCIA_MFC_DEVICE_CIS_MANF_CARD(1, 0x0175, 0x0000, "DP83903.cis"), | 879 | PCMCIA_MFC_DEVICE_CIS_MANF_CARD(1, 0x0175, 0x0000, "cis/DP83903.cis"), |
| 880 | PCMCIA_MFC_DEVICE_CIS_MANF_CARD(1, 0x0101, 0x0035, "cis/3CXEM556.cis"), | 880 | PCMCIA_MFC_DEVICE_CIS_MANF_CARD(1, 0x0101, 0x0035, "cis/3CXEM556.cis"), |
| 881 | PCMCIA_MFC_DEVICE_CIS_MANF_CARD(1, 0x0101, 0x003d, "cis/3CXEM556.cis"), | 881 | PCMCIA_MFC_DEVICE_CIS_MANF_CARD(1, 0x0101, 0x003d, "cis/3CXEM556.cis"), |
| 882 | PCMCIA_DEVICE_CIS_PROD_ID12("Sierra Wireless", "AC850", 0xd85f6206, 0x42a2c018, "SW_8xx_SER.cis"), /* Sierra Wireless AC850 3G Network Adapter R1 */ | 882 | PCMCIA_DEVICE_CIS_PROD_ID12("Sierra Wireless", "AC850", 0xd85f6206, 0x42a2c018, "SW_8xx_SER.cis"), /* Sierra Wireless AC850 3G Network Adapter R1 */ |
| @@ -884,9 +884,9 @@ static struct pcmcia_device_id serial_ids[] = { | |||
| 884 | PCMCIA_DEVICE_CIS_MANF_CARD(0x0192, 0xa555, "SW_555_SER.cis"), /* Sierra Aircard 555 CDMA 1xrtt Modem -- pre update */ | 884 | PCMCIA_DEVICE_CIS_MANF_CARD(0x0192, 0xa555, "SW_555_SER.cis"), /* Sierra Aircard 555 CDMA 1xrtt Modem -- pre update */ |
| 885 | PCMCIA_DEVICE_CIS_MANF_CARD(0x013f, 0xa555, "SW_555_SER.cis"), /* Sierra Aircard 555 CDMA 1xrtt Modem -- post update */ | 885 | PCMCIA_DEVICE_CIS_MANF_CARD(0x013f, 0xa555, "SW_555_SER.cis"), /* Sierra Aircard 555 CDMA 1xrtt Modem -- post update */ |
| 886 | PCMCIA_DEVICE_CIS_PROD_ID12("MultiTech", "PCMCIA 56K DataFax", 0x842047ee, 0xc2efcf03, "cis/MT5634ZLX.cis"), | 886 | PCMCIA_DEVICE_CIS_PROD_ID12("MultiTech", "PCMCIA 56K DataFax", 0x842047ee, 0xc2efcf03, "cis/MT5634ZLX.cis"), |
| 887 | PCMCIA_DEVICE_CIS_PROD_ID12("ADVANTECH", "COMpad-32/85B-2", 0x96913a85, 0x27ab5437, "COMpad2.cis"), | 887 | PCMCIA_DEVICE_CIS_PROD_ID12("ADVANTECH", "COMpad-32/85B-2", 0x96913a85, 0x27ab5437, "cis/COMpad2.cis"), |
| 888 | PCMCIA_DEVICE_CIS_PROD_ID12("ADVANTECH", "COMpad-32/85B-4", 0x96913a85, 0xcec8f102, "COMpad4.cis"), | 888 | PCMCIA_DEVICE_CIS_PROD_ID12("ADVANTECH", "COMpad-32/85B-4", 0x96913a85, 0xcec8f102, "cis/COMpad4.cis"), |
| 889 | PCMCIA_DEVICE_CIS_PROD_ID123("ADVANTECH", "COMpad-32/85", "1.0", 0x96913a85, 0x8fbe92ae, 0x0877b627, "COMpad2.cis"), | 889 | PCMCIA_DEVICE_CIS_PROD_ID123("ADVANTECH", "COMpad-32/85", "1.0", 0x96913a85, 0x8fbe92ae, 0x0877b627, "cis/COMpad2.cis"), |
| 890 | PCMCIA_DEVICE_CIS_PROD_ID2("RS-COM 2P", 0xad20b156, "cis/RS-COM-2P.cis"), | 890 | PCMCIA_DEVICE_CIS_PROD_ID2("RS-COM 2P", 0xad20b156, "cis/RS-COM-2P.cis"), |
| 891 | PCMCIA_DEVICE_CIS_MANF_CARD(0x0013, 0x0000, "GLOBETROTTER.cis"), | 891 | PCMCIA_DEVICE_CIS_MANF_CARD(0x0013, 0x0000, "GLOBETROTTER.cis"), |
| 892 | PCMCIA_DEVICE_PROD_ID12("ELAN DIGITAL SYSTEMS LTD, c1997.","SERIAL CARD: SL100 1.00.",0x19ca78af,0xf964f42b), | 892 | PCMCIA_DEVICE_PROD_ID12("ELAN DIGITAL SYSTEMS LTD, c1997.","SERIAL CARD: SL100 1.00.",0x19ca78af,0xf964f42b), |
diff --git a/drivers/sfi/sfi_core.c b/drivers/sfi/sfi_core.c index d3b496800477..b204a0929139 100644 --- a/drivers/sfi/sfi_core.c +++ b/drivers/sfi/sfi_core.c | |||
| @@ -90,7 +90,11 @@ static struct sfi_table_simple *syst_va __read_mostly; | |||
| 90 | */ | 90 | */ |
| 91 | static u32 sfi_use_ioremap __read_mostly; | 91 | static u32 sfi_use_ioremap __read_mostly; |
| 92 | 92 | ||
| 93 | static void __iomem *sfi_map_memory(u64 phys, u32 size) | 93 | /* |
| 94 | * sfi_un/map_memory calls early_ioremap/iounmap which is a __init function | ||
| 95 | * and introduces section mismatch. So use __ref to make it calm. | ||
| 96 | */ | ||
| 97 | static void __iomem * __ref sfi_map_memory(u64 phys, u32 size) | ||
| 94 | { | 98 | { |
| 95 | if (!phys || !size) | 99 | if (!phys || !size) |
| 96 | return NULL; | 100 | return NULL; |
| @@ -101,7 +105,7 @@ static void __iomem *sfi_map_memory(u64 phys, u32 size) | |||
| 101 | return early_ioremap(phys, size); | 105 | return early_ioremap(phys, size); |
| 102 | } | 106 | } |
| 103 | 107 | ||
| 104 | static void sfi_unmap_memory(void __iomem *virt, u32 size) | 108 | static void __ref sfi_unmap_memory(void __iomem *virt, u32 size) |
| 105 | { | 109 | { |
| 106 | if (!virt || !size) | 110 | if (!virt || !size) |
| 107 | return; | 111 | return; |
| @@ -125,7 +129,7 @@ static void sfi_print_table_header(unsigned long long pa, | |||
| 125 | * sfi_verify_table() | 129 | * sfi_verify_table() |
| 126 | * Sanity check table lengh, calculate checksum | 130 | * Sanity check table lengh, calculate checksum |
| 127 | */ | 131 | */ |
| 128 | static __init int sfi_verify_table(struct sfi_table_header *table) | 132 | static int sfi_verify_table(struct sfi_table_header *table) |
| 129 | { | 133 | { |
| 130 | 134 | ||
| 131 | u8 checksum = 0; | 135 | u8 checksum = 0; |
| @@ -213,12 +217,17 @@ static int sfi_table_check_key(struct sfi_table_header *th, | |||
| 213 | * the mapped virt address will be returned, and the virt space | 217 | * the mapped virt address will be returned, and the virt space |
| 214 | * will be released by call sfi_put_table() later | 218 | * will be released by call sfi_put_table() later |
| 215 | * | 219 | * |
| 220 | * This two cases are from two different functions with two different | ||
| 221 | * sections and causes section mismatch warning. So use __ref to tell | ||
| 222 | * modpost not to make any noise. | ||
| 223 | * | ||
| 216 | * Return value: | 224 | * Return value: |
| 217 | * NULL: when can't find a table matching the key | 225 | * NULL: when can't find a table matching the key |
| 218 | * ERR_PTR(error): error value | 226 | * ERR_PTR(error): error value |
| 219 | * virt table address: when a matched table is found | 227 | * virt table address: when a matched table is found |
| 220 | */ | 228 | */ |
| 221 | struct sfi_table_header *sfi_check_table(u64 pa, struct sfi_table_key *key) | 229 | struct sfi_table_header * |
| 230 | __ref sfi_check_table(u64 pa, struct sfi_table_key *key) | ||
| 222 | { | 231 | { |
| 223 | struct sfi_table_header *th; | 232 | struct sfi_table_header *th; |
| 224 | void *ret = NULL; | 233 | void *ret = NULL; |
diff --git a/drivers/staging/dst/dcore.c b/drivers/staging/dst/dcore.c index ee1601026fb0..c24e4e0367a2 100644 --- a/drivers/staging/dst/dcore.c +++ b/drivers/staging/dst/dcore.c | |||
| @@ -102,7 +102,7 @@ static int dst_request(struct request_queue *q, struct bio *bio) | |||
| 102 | struct dst_node *n = q->queuedata; | 102 | struct dst_node *n = q->queuedata; |
| 103 | int err = -EIO; | 103 | int err = -EIO; |
| 104 | 104 | ||
| 105 | if (bio_empty_barrier(bio) && !q->prepare_discard_fn) { | 105 | if (bio_empty_barrier(bio) && !blk_queue_discard(q)) { |
| 106 | /* | 106 | /* |
| 107 | * This is a dirty^Wnice hack, but if we complete this | 107 | * This is a dirty^Wnice hack, but if we complete this |
| 108 | * operation with -EOPNOTSUPP like intended, XFS | 108 | * operation with -EOPNOTSUPP like intended, XFS |
diff --git a/drivers/staging/iio/light/tsl2561.c b/drivers/staging/iio/light/tsl2561.c index ea8a5efc19bc..fc2107f4c049 100644 --- a/drivers/staging/iio/light/tsl2561.c +++ b/drivers/staging/iio/light/tsl2561.c | |||
| @@ -239,10 +239,6 @@ static int __devexit tsl2561_remove(struct i2c_client *client) | |||
| 239 | return tsl2561_powerdown(client); | 239 | return tsl2561_powerdown(client); |
| 240 | } | 240 | } |
| 241 | 241 | ||
| 242 | static unsigned short normal_i2c[] = { 0x29, 0x39, 0x49, I2C_CLIENT_END }; | ||
| 243 | |||
| 244 | I2C_CLIENT_INSMOD; | ||
| 245 | |||
| 246 | static const struct i2c_device_id tsl2561_id[] = { | 242 | static const struct i2c_device_id tsl2561_id[] = { |
| 247 | { "tsl2561", 0 }, | 243 | { "tsl2561", 0 }, |
| 248 | { } | 244 | { } |
diff --git a/drivers/usb/gadget/inode.c b/drivers/usb/gadget/inode.c index c44367fea185..bf0f6520c6df 100644 --- a/drivers/usb/gadget/inode.c +++ b/drivers/usb/gadget/inode.c | |||
| @@ -30,6 +30,7 @@ | |||
| 30 | #include <linux/wait.h> | 30 | #include <linux/wait.h> |
| 31 | #include <linux/compiler.h> | 31 | #include <linux/compiler.h> |
| 32 | #include <asm/uaccess.h> | 32 | #include <asm/uaccess.h> |
| 33 | #include <linux/sched.h> | ||
| 33 | #include <linux/slab.h> | 34 | #include <linux/slab.h> |
| 34 | #include <linux/poll.h> | 35 | #include <linux/poll.h> |
| 35 | #include <linux/smp_lock.h> | 36 | #include <linux/smp_lock.h> |
diff --git a/drivers/video/da8xx-fb.c b/drivers/video/da8xx-fb.c index 42e1005e2916..d065894ce38f 100644 --- a/drivers/video/da8xx-fb.c +++ b/drivers/video/da8xx-fb.c | |||
| @@ -26,7 +26,6 @@ | |||
| 26 | #include <linux/device.h> | 26 | #include <linux/device.h> |
| 27 | #include <linux/platform_device.h> | 27 | #include <linux/platform_device.h> |
| 28 | #include <linux/uaccess.h> | 28 | #include <linux/uaccess.h> |
| 29 | #include <linux/device.h> | ||
| 30 | #include <linux/interrupt.h> | 29 | #include <linux/interrupt.h> |
| 31 | #include <linux/clk.h> | 30 | #include <linux/clk.h> |
| 32 | #include <video/da8xx-fb.h> | 31 | #include <video/da8xx-fb.h> |
diff --git a/drivers/video/msm/mddi.c b/drivers/video/msm/mddi.c index f2de5a1acd6d..5c5a1ad1d397 100644 --- a/drivers/video/msm/mddi.c +++ b/drivers/video/msm/mddi.c | |||
| @@ -27,8 +27,6 @@ | |||
| 27 | #include <mach/msm_iomap.h> | 27 | #include <mach/msm_iomap.h> |
| 28 | #include <mach/irqs.h> | 28 | #include <mach/irqs.h> |
| 29 | #include <mach/board.h> | 29 | #include <mach/board.h> |
| 30 | #include <linux/delay.h> | ||
| 31 | |||
| 32 | #include <mach/msm_fb.h> | 30 | #include <mach/msm_fb.h> |
| 33 | #include "mddi_hw.h" | 31 | #include "mddi_hw.h" |
| 34 | 32 | ||
diff --git a/drivers/video/omap/blizzard.c b/drivers/video/omap/blizzard.c index d5e59556f9e2..70dadf9d2334 100644 --- a/drivers/video/omap/blizzard.c +++ b/drivers/video/omap/blizzard.c | |||
| @@ -93,7 +93,7 @@ struct blizzard_reg_list { | |||
| 93 | }; | 93 | }; |
| 94 | 94 | ||
| 95 | /* These need to be saved / restored separately from the rest. */ | 95 | /* These need to be saved / restored separately from the rest. */ |
| 96 | static struct blizzard_reg_list blizzard_pll_regs[] = { | 96 | static const struct blizzard_reg_list blizzard_pll_regs[] = { |
| 97 | { | 97 | { |
| 98 | .start = 0x04, /* Don't save PLL ctrl (0x0C) */ | 98 | .start = 0x04, /* Don't save PLL ctrl (0x0C) */ |
| 99 | .end = 0x0a, | 99 | .end = 0x0a, |
| @@ -104,7 +104,7 @@ static struct blizzard_reg_list blizzard_pll_regs[] = { | |||
| 104 | }, | 104 | }, |
| 105 | }; | 105 | }; |
| 106 | 106 | ||
| 107 | static struct blizzard_reg_list blizzard_gen_regs[] = { | 107 | static const struct blizzard_reg_list blizzard_gen_regs[] = { |
| 108 | { | 108 | { |
| 109 | .start = 0x18, /* SDRAM control */ | 109 | .start = 0x18, /* SDRAM control */ |
| 110 | .end = 0x20, | 110 | .end = 0x20, |
| @@ -191,7 +191,7 @@ struct blizzard_struct { | |||
| 191 | 191 | ||
| 192 | struct omapfb_device *fbdev; | 192 | struct omapfb_device *fbdev; |
| 193 | struct lcd_ctrl_extif *extif; | 193 | struct lcd_ctrl_extif *extif; |
| 194 | struct lcd_ctrl *int_ctrl; | 194 | const struct lcd_ctrl *int_ctrl; |
| 195 | 195 | ||
| 196 | void (*power_up)(struct device *dev); | 196 | void (*power_up)(struct device *dev); |
| 197 | void (*power_down)(struct device *dev); | 197 | void (*power_down)(struct device *dev); |
| @@ -1372,7 +1372,7 @@ static void blizzard_get_caps(int plane, struct omapfb_caps *caps) | |||
| 1372 | (1 << OMAPFB_COLOR_YUV420); | 1372 | (1 << OMAPFB_COLOR_YUV420); |
| 1373 | } | 1373 | } |
| 1374 | 1374 | ||
| 1375 | static void _save_regs(struct blizzard_reg_list *list, int cnt) | 1375 | static void _save_regs(const struct blizzard_reg_list *list, int cnt) |
| 1376 | { | 1376 | { |
| 1377 | int i; | 1377 | int i; |
| 1378 | 1378 | ||
| @@ -1383,7 +1383,7 @@ static void _save_regs(struct blizzard_reg_list *list, int cnt) | |||
| 1383 | } | 1383 | } |
| 1384 | } | 1384 | } |
| 1385 | 1385 | ||
| 1386 | static void _restore_regs(struct blizzard_reg_list *list, int cnt) | 1386 | static void _restore_regs(const struct blizzard_reg_list *list, int cnt) |
| 1387 | { | 1387 | { |
| 1388 | int i; | 1388 | int i; |
| 1389 | 1389 | ||
diff --git a/drivers/video/omap/omapfb_main.c b/drivers/video/omap/omapfb_main.c index 125e605b8c68..0d0c8c8b9b56 100644 --- a/drivers/video/omap/omapfb_main.c +++ b/drivers/video/omap/omapfb_main.c | |||
| @@ -393,7 +393,7 @@ static void omapfb_sync(struct fb_info *fbi) | |||
| 393 | * Set fb_info.fix fields and also updates fbdev. | 393 | * Set fb_info.fix fields and also updates fbdev. |
| 394 | * When calling this fb_info.var must be set up already. | 394 | * When calling this fb_info.var must be set up already. |
| 395 | */ | 395 | */ |
| 396 | static void set_fb_fix(struct fb_info *fbi) | 396 | static void set_fb_fix(struct fb_info *fbi, int from_init) |
| 397 | { | 397 | { |
| 398 | struct fb_fix_screeninfo *fix = &fbi->fix; | 398 | struct fb_fix_screeninfo *fix = &fbi->fix; |
| 399 | struct fb_var_screeninfo *var = &fbi->var; | 399 | struct fb_var_screeninfo *var = &fbi->var; |
| @@ -403,10 +403,16 @@ static void set_fb_fix(struct fb_info *fbi) | |||
| 403 | 403 | ||
| 404 | rg = &plane->fbdev->mem_desc.region[plane->idx]; | 404 | rg = &plane->fbdev->mem_desc.region[plane->idx]; |
| 405 | fbi->screen_base = rg->vaddr; | 405 | fbi->screen_base = rg->vaddr; |
| 406 | mutex_lock(&fbi->mm_lock); | 406 | |
| 407 | fix->smem_start = rg->paddr; | 407 | if (!from_init) { |
| 408 | fix->smem_len = rg->size; | 408 | mutex_lock(&fbi->mm_lock); |
| 409 | mutex_unlock(&fbi->mm_lock); | 409 | fix->smem_start = rg->paddr; |
| 410 | fix->smem_len = rg->size; | ||
| 411 | mutex_unlock(&fbi->mm_lock); | ||
| 412 | } else { | ||
| 413 | fix->smem_start = rg->paddr; | ||
| 414 | fix->smem_len = rg->size; | ||
| 415 | } | ||
| 410 | 416 | ||
| 411 | fix->type = FB_TYPE_PACKED_PIXELS; | 417 | fix->type = FB_TYPE_PACKED_PIXELS; |
| 412 | bpp = var->bits_per_pixel; | 418 | bpp = var->bits_per_pixel; |
| @@ -704,7 +710,7 @@ static int omapfb_set_par(struct fb_info *fbi) | |||
| 704 | int r = 0; | 710 | int r = 0; |
| 705 | 711 | ||
| 706 | omapfb_rqueue_lock(fbdev); | 712 | omapfb_rqueue_lock(fbdev); |
| 707 | set_fb_fix(fbi); | 713 | set_fb_fix(fbi, 0); |
| 708 | r = ctrl_change_mode(fbi); | 714 | r = ctrl_change_mode(fbi); |
| 709 | omapfb_rqueue_unlock(fbdev); | 715 | omapfb_rqueue_unlock(fbdev); |
| 710 | 716 | ||
| @@ -904,7 +910,7 @@ static int omapfb_setup_mem(struct fb_info *fbi, struct omapfb_mem_info *mi) | |||
| 904 | if (old_size != size) { | 910 | if (old_size != size) { |
| 905 | if (size) { | 911 | if (size) { |
| 906 | memcpy(&fbi->var, new_var, sizeof(fbi->var)); | 912 | memcpy(&fbi->var, new_var, sizeof(fbi->var)); |
| 907 | set_fb_fix(fbi); | 913 | set_fb_fix(fbi, 0); |
| 908 | } else { | 914 | } else { |
| 909 | /* | 915 | /* |
| 910 | * Set these explicitly to indicate that the | 916 | * Set these explicitly to indicate that the |
| @@ -1504,7 +1510,7 @@ static int fbinfo_init(struct omapfb_device *fbdev, struct fb_info *info) | |||
| 1504 | var->bits_per_pixel = fbdev->panel->bpp; | 1510 | var->bits_per_pixel = fbdev->panel->bpp; |
| 1505 | 1511 | ||
| 1506 | set_fb_var(info, var); | 1512 | set_fb_var(info, var); |
| 1507 | set_fb_fix(info); | 1513 | set_fb_fix(info, 1); |
| 1508 | 1514 | ||
| 1509 | r = fb_alloc_cmap(&info->cmap, 16, 0); | 1515 | r = fb_alloc_cmap(&info->cmap, 16, 0); |
| 1510 | if (r != 0) | 1516 | if (r != 0) |
diff --git a/drivers/w1/masters/ds2482.c b/drivers/w1/masters/ds2482.c index df52cb355f7d..406caa6a71cb 100644 --- a/drivers/w1/masters/ds2482.c +++ b/drivers/w1/masters/ds2482.c | |||
| @@ -24,19 +24,6 @@ | |||
| 24 | #include "../w1_int.h" | 24 | #include "../w1_int.h" |
| 25 | 25 | ||
| 26 | /** | 26 | /** |
| 27 | * Address is selected using 2 pins, resulting in 4 possible addresses. | ||
| 28 | * 0x18, 0x19, 0x1a, 0x1b | ||
| 29 | * However, the chip cannot be detected without doing an i2c write, | ||
| 30 | * so use the force module parameter. | ||
| 31 | */ | ||
| 32 | static const unsigned short normal_i2c[] = { I2C_CLIENT_END }; | ||
| 33 | |||
| 34 | /** | ||
| 35 | * Insmod parameters | ||
| 36 | */ | ||
| 37 | I2C_CLIENT_INSMOD_1(ds2482); | ||
| 38 | |||
| 39 | /** | ||
| 40 | * The DS2482 registers - there are 3 registers that are addressed by a read | 27 | * The DS2482 registers - there are 3 registers that are addressed by a read |
| 41 | * pointer. The read pointer is set by the last command executed. | 28 | * pointer. The read pointer is set by the last command executed. |
| 42 | * | 29 | * |
| @@ -96,8 +83,6 @@ static const u8 ds2482_chan_rd[8] = | |||
| 96 | 83 | ||
| 97 | static int ds2482_probe(struct i2c_client *client, | 84 | static int ds2482_probe(struct i2c_client *client, |
| 98 | const struct i2c_device_id *id); | 85 | const struct i2c_device_id *id); |
| 99 | static int ds2482_detect(struct i2c_client *client, int kind, | ||
| 100 | struct i2c_board_info *info); | ||
| 101 | static int ds2482_remove(struct i2c_client *client); | 86 | static int ds2482_remove(struct i2c_client *client); |
| 102 | 87 | ||
| 103 | 88 | ||
| @@ -117,8 +102,6 @@ static struct i2c_driver ds2482_driver = { | |||
| 117 | .probe = ds2482_probe, | 102 | .probe = ds2482_probe, |
| 118 | .remove = ds2482_remove, | 103 | .remove = ds2482_remove, |
| 119 | .id_table = ds2482_id, | 104 | .id_table = ds2482_id, |
| 120 | .detect = ds2482_detect, | ||
| 121 | .address_data = &addr_data, | ||
| 122 | }; | 105 | }; |
| 123 | 106 | ||
| 124 | /* | 107 | /* |
| @@ -425,19 +408,6 @@ static u8 ds2482_w1_reset_bus(void *data) | |||
| 425 | } | 408 | } |
| 426 | 409 | ||
| 427 | 410 | ||
| 428 | static int ds2482_detect(struct i2c_client *client, int kind, | ||
| 429 | struct i2c_board_info *info) | ||
| 430 | { | ||
| 431 | if (!i2c_check_functionality(client->adapter, | ||
| 432 | I2C_FUNC_SMBUS_WRITE_BYTE_DATA | | ||
| 433 | I2C_FUNC_SMBUS_BYTE)) | ||
| 434 | return -ENODEV; | ||
| 435 | |||
| 436 | strlcpy(info->type, "ds2482", I2C_NAME_SIZE); | ||
| 437 | |||
| 438 | return 0; | ||
| 439 | } | ||
| 440 | |||
| 441 | static int ds2482_probe(struct i2c_client *client, | 411 | static int ds2482_probe(struct i2c_client *client, |
| 442 | const struct i2c_device_id *id) | 412 | const struct i2c_device_id *id) |
| 443 | { | 413 | { |
| @@ -446,6 +416,11 @@ static int ds2482_probe(struct i2c_client *client, | |||
| 446 | int temp1; | 416 | int temp1; |
| 447 | int idx; | 417 | int idx; |
| 448 | 418 | ||
| 419 | if (!i2c_check_functionality(client->adapter, | ||
| 420 | I2C_FUNC_SMBUS_WRITE_BYTE_DATA | | ||
| 421 | I2C_FUNC_SMBUS_BYTE)) | ||
| 422 | return -ENODEV; | ||
| 423 | |||
| 449 | if (!(data = kzalloc(sizeof(struct ds2482_data), GFP_KERNEL))) { | 424 | if (!(data = kzalloc(sizeof(struct ds2482_data), GFP_KERNEL))) { |
| 450 | err = -ENOMEM; | 425 | err = -ENOMEM; |
| 451 | goto exit; | 426 | goto exit; |
diff --git a/drivers/xen/xenfs/xenbus.c b/drivers/xen/xenfs/xenbus.c index a9592d981b10..6c4269b836b7 100644 --- a/drivers/xen/xenfs/xenbus.c +++ b/drivers/xen/xenfs/xenbus.c | |||
| @@ -43,6 +43,7 @@ | |||
| 43 | #include <linux/fs.h> | 43 | #include <linux/fs.h> |
| 44 | #include <linux/poll.h> | 44 | #include <linux/poll.h> |
| 45 | #include <linux/mutex.h> | 45 | #include <linux/mutex.h> |
| 46 | #include <linux/sched.h> | ||
| 46 | #include <linux/spinlock.h> | 47 | #include <linux/spinlock.h> |
| 47 | #include <linux/mount.h> | 48 | #include <linux/mount.h> |
| 48 | #include <linux/pagemap.h> | 49 | #include <linux/pagemap.h> |
diff --git a/firmware/Makefile b/firmware/Makefile index 5ea80b19785b..a6c7c3e47e42 100644 --- a/firmware/Makefile +++ b/firmware/Makefile | |||
| @@ -67,10 +67,13 @@ fw-shipped-$(CONFIG_DVB_TTUSB_BUDGET) += ttusb-budget/dspbootcode.bin | |||
| 67 | fw-shipped-$(CONFIG_E100) += e100/d101m_ucode.bin e100/d101s_ucode.bin \ | 67 | fw-shipped-$(CONFIG_E100) += e100/d101m_ucode.bin e100/d101s_ucode.bin \ |
| 68 | e100/d102e_ucode.bin | 68 | e100/d102e_ucode.bin |
| 69 | fw-shipped-$(CONFIG_MYRI_SBUS) += myricom/lanai.bin | 69 | fw-shipped-$(CONFIG_MYRI_SBUS) += myricom/lanai.bin |
| 70 | fw-shipped-$(CONFIG_PCMCIA_PCNET) += cis/LA-PCM.cis cis/PCMLM28.cis | 70 | fw-shipped-$(CONFIG_PCMCIA_PCNET) += cis/LA-PCM.cis cis/PCMLM28.cis \ |
| 71 | cis/DP83903.cis cis/NE2K.cis \ | ||
| 72 | cis/tamarack.cis | ||
| 71 | fw-shipped-$(CONFIG_PCMCIA_3C589) += cis/3CXEM556.cis | 73 | fw-shipped-$(CONFIG_PCMCIA_3C589) += cis/3CXEM556.cis |
| 72 | fw-shipped-$(CONFIG_PCMCIA_3C574) += cis/3CCFEM556.cis | 74 | fw-shipped-$(CONFIG_PCMCIA_3C574) += cis/3CCFEM556.cis |
| 73 | fw-shipped-$(CONFIG_SERIAL_8250_CS) += cis/MT5634ZLX.cis cis/RS-COM-2P.cis | 75 | fw-shipped-$(CONFIG_SERIAL_8250_CS) += cis/MT5634ZLX.cis cis/RS-COM-2P.cis \ |
| 76 | cis/COMpad2.cis cis/COMpad4.cis | ||
| 74 | fw-shipped-$(CONFIG_PCMCIA_SMC91C92) += ositech/Xilinx7OD.bin | 77 | fw-shipped-$(CONFIG_PCMCIA_SMC91C92) += ositech/Xilinx7OD.bin |
| 75 | fw-shipped-$(CONFIG_SCSI_ADVANSYS) += advansys/mcode.bin advansys/38C1600.bin \ | 78 | fw-shipped-$(CONFIG_SCSI_ADVANSYS) += advansys/mcode.bin advansys/38C1600.bin \ |
| 76 | advansys/3550.bin advansys/38C0800.bin | 79 | advansys/3550.bin advansys/38C0800.bin |
diff --git a/firmware/WHENCE b/firmware/WHENCE index 3f8c4f6bc43f..c437e14f0b11 100644 --- a/firmware/WHENCE +++ b/firmware/WHENCE | |||
| @@ -597,6 +597,9 @@ Driver: PCMCIA_PCNET - NE2000 compatible PCMCIA adapter | |||
| 597 | 597 | ||
| 598 | File: cis/LA-PCM.cis | 598 | File: cis/LA-PCM.cis |
| 599 | cis/PCMLM28.cis | 599 | cis/PCMLM28.cis |
| 600 | cis/DP83903.cis | ||
| 601 | cis/NE2K.cis | ||
| 602 | cis/tamarack.cis | ||
| 600 | 603 | ||
| 601 | Licence: GPL | 604 | Licence: GPL |
| 602 | 605 | ||
| @@ -628,6 +631,8 @@ Driver: SERIAL_8250_CS - Serial PCMCIA adapter | |||
| 628 | 631 | ||
| 629 | File: cis/MT5634ZLX.cis | 632 | File: cis/MT5634ZLX.cis |
| 630 | cis/RS-COM-2P.cis | 633 | cis/RS-COM-2P.cis |
| 634 | cis/COMpad2.cis | ||
| 635 | cis/COMpad4.cis | ||
| 631 | 636 | ||
| 632 | Licence: GPL | 637 | Licence: GPL |
| 633 | 638 | ||
diff --git a/firmware/cis/COMpad2.cis.ihex b/firmware/cis/COMpad2.cis.ihex new file mode 100644 index 000000000000..1671c5e48caa --- /dev/null +++ b/firmware/cis/COMpad2.cis.ihex | |||
| @@ -0,0 +1,11 @@ | |||
| 1 | :1000000001030000FF151F0401414456414E5445B1 | ||
| 2 | :10001000434800434F4D7061642D33322F38350013 | ||
| 3 | :10002000312E300000FF210202011A0501050001F6 | ||
| 4 | :10003000031B0EC18118AA61E80207E8030730B864 | ||
| 5 | :100040009E1B08820108AA6030030F1B0883010869 | ||
| 6 | :10005000AA6040030F1B08840108AA6050030F1B0D | ||
| 7 | :0D00600008850108AA6060030F1400FF006E | ||
| 8 | :00000001FF | ||
| 9 | # | ||
| 10 | # Replacement CIS for Advantech COMpad-32/85 | ||
| 11 | # | ||
diff --git a/firmware/cis/COMpad4.cis.ihex b/firmware/cis/COMpad4.cis.ihex new file mode 100644 index 000000000000..27bbec1921b3 --- /dev/null +++ b/firmware/cis/COMpad4.cis.ihex | |||
| @@ -0,0 +1,9 @@ | |||
| 1 | :1000000001030000FF151F0401414456414E5445B1 | ||
| 2 | :10001000434800434F4D7061642D33322F383542D1 | ||
| 3 | :100020002D34000000FF210202011A050102000127 | ||
| 4 | :10003000011B0BC18118AA6040021F30B89E1B082B | ||
| 5 | :0C004000820108AA6040031F1400FF00AA | ||
| 6 | :00000001FF | ||
| 7 | # | ||
| 8 | # Replacement CIS for Advantech COMpad-32/85B-4 | ||
| 9 | # | ||
diff --git a/firmware/cis/DP83903.cis.ihex b/firmware/cis/DP83903.cis.ihex new file mode 100644 index 000000000000..6d73ea3cf1b8 --- /dev/null +++ b/firmware/cis/DP83903.cis.ihex | |||
| @@ -0,0 +1,14 @@ | |||
| 1 | :1000000001030000FF152904014D756C74696675C4 | ||
| 2 | :100010006E6374696F6E20436172640000004E531A | ||
| 3 | :1000200043204D46204C414E2F4D6F64656D00FFBF | ||
| 4 | :1000300020047501000021020000060B02004900A7 | ||
| 5 | :100040000000006A000000FF00130343495321022F | ||
| 6 | :1000500006001A060517201077021B0C970179017C | ||
| 7 | :10006000556530FFFF284000FF001303434953212B | ||
| 8 | :100070000202001A060507401077021B09870119C2 | ||
| 9 | :0800800001552330FFFFFF00D2 | ||
| 10 | :00000001FF | ||
| 11 | # | ||
| 12 | # This CIS is for cards based on the National Semiconductor | ||
| 13 | # DP83903 Multiple Function Interface Chip | ||
| 14 | # | ||
diff --git a/firmware/cis/NE2K.cis.ihex b/firmware/cis/NE2K.cis.ihex new file mode 100644 index 000000000000..1bb40fc4759f --- /dev/null +++ b/firmware/cis/NE2K.cis.ihex | |||
| @@ -0,0 +1,8 @@ | |||
| 1 | :1000000001030000FF1515040150434D4349410011 | ||
| 2 | :1000100045746865726E6574000000FF2102060079 | ||
| 3 | :100020001A050120F803031B09E001190155653089 | ||
| 4 | :06003000FFFF1400FF00B9 | ||
| 5 | :00000001FF | ||
| 6 | # | ||
| 7 | # Replacement CIS for various busted NE2000-compatible cards | ||
| 8 | # | ||
diff --git a/firmware/cis/tamarack.cis.ihex b/firmware/cis/tamarack.cis.ihex new file mode 100644 index 000000000000..1e86547fb361 --- /dev/null +++ b/firmware/cis/tamarack.cis.ihex | |||
| @@ -0,0 +1,10 @@ | |||
| 1 | :100000000103D400FF17034100FF152404015441EC | ||
| 2 | :100010004D415241434B0045746865726E657400F2 | ||
| 3 | :10002000410030303437343331313830303100FF33 | ||
| 4 | :10003000210206001A050120F803031B14E08119B0 | ||
| 5 | :100040003F554D5D06864626E551000F100F30FFE7 | ||
| 6 | :05005000FF1400FF0099 | ||
| 7 | :00000001FF | ||
| 8 | # | ||
| 9 | # Replacement CIS for Surecom, Tamarack NE2000 cards | ||
| 10 | # | ||
diff --git a/fs/anon_inodes.c b/fs/anon_inodes.c index d11c51fc2a3f..2ca7a7cafdbf 100644 --- a/fs/anon_inodes.c +++ b/fs/anon_inodes.c | |||
| @@ -8,8 +8,10 @@ | |||
| 8 | * | 8 | * |
| 9 | */ | 9 | */ |
| 10 | 10 | ||
| 11 | #include <linux/cred.h> | ||
| 11 | #include <linux/file.h> | 12 | #include <linux/file.h> |
| 12 | #include <linux/poll.h> | 13 | #include <linux/poll.h> |
| 14 | #include <linux/sched.h> | ||
| 13 | #include <linux/slab.h> | 15 | #include <linux/slab.h> |
| 14 | #include <linux/init.h> | 16 | #include <linux/init.h> |
| 15 | #include <linux/fs.h> | 17 | #include <linux/fs.h> |
| @@ -249,6 +249,7 @@ void bio_free(struct bio *bio, struct bio_set *bs) | |||
| 249 | 249 | ||
| 250 | mempool_free(p, bs->bio_pool); | 250 | mempool_free(p, bs->bio_pool); |
| 251 | } | 251 | } |
| 252 | EXPORT_SYMBOL(bio_free); | ||
| 252 | 253 | ||
| 253 | void bio_init(struct bio *bio) | 254 | void bio_init(struct bio *bio) |
| 254 | { | 255 | { |
| @@ -257,6 +258,7 @@ void bio_init(struct bio *bio) | |||
| 257 | bio->bi_comp_cpu = -1; | 258 | bio->bi_comp_cpu = -1; |
| 258 | atomic_set(&bio->bi_cnt, 1); | 259 | atomic_set(&bio->bi_cnt, 1); |
| 259 | } | 260 | } |
| 261 | EXPORT_SYMBOL(bio_init); | ||
| 260 | 262 | ||
| 261 | /** | 263 | /** |
| 262 | * bio_alloc_bioset - allocate a bio for I/O | 264 | * bio_alloc_bioset - allocate a bio for I/O |
| @@ -311,6 +313,7 @@ err_free: | |||
| 311 | mempool_free(p, bs->bio_pool); | 313 | mempool_free(p, bs->bio_pool); |
| 312 | return NULL; | 314 | return NULL; |
| 313 | } | 315 | } |
| 316 | EXPORT_SYMBOL(bio_alloc_bioset); | ||
| 314 | 317 | ||
| 315 | static void bio_fs_destructor(struct bio *bio) | 318 | static void bio_fs_destructor(struct bio *bio) |
| 316 | { | 319 | { |
| @@ -337,6 +340,7 @@ struct bio *bio_alloc(gfp_t gfp_mask, int nr_iovecs) | |||
| 337 | 340 | ||
| 338 | return bio; | 341 | return bio; |
| 339 | } | 342 | } |
| 343 | EXPORT_SYMBOL(bio_alloc); | ||
| 340 | 344 | ||
| 341 | static void bio_kmalloc_destructor(struct bio *bio) | 345 | static void bio_kmalloc_destructor(struct bio *bio) |
| 342 | { | 346 | { |
| @@ -380,6 +384,7 @@ struct bio *bio_kmalloc(gfp_t gfp_mask, int nr_iovecs) | |||
| 380 | 384 | ||
| 381 | return bio; | 385 | return bio; |
| 382 | } | 386 | } |
| 387 | EXPORT_SYMBOL(bio_kmalloc); | ||
| 383 | 388 | ||
| 384 | void zero_fill_bio(struct bio *bio) | 389 | void zero_fill_bio(struct bio *bio) |
| 385 | { | 390 | { |
| @@ -416,6 +421,7 @@ void bio_put(struct bio *bio) | |||
| 416 | bio->bi_destructor(bio); | 421 | bio->bi_destructor(bio); |
| 417 | } | 422 | } |
| 418 | } | 423 | } |
| 424 | EXPORT_SYMBOL(bio_put); | ||
| 419 | 425 | ||
| 420 | inline int bio_phys_segments(struct request_queue *q, struct bio *bio) | 426 | inline int bio_phys_segments(struct request_queue *q, struct bio *bio) |
| 421 | { | 427 | { |
| @@ -424,6 +430,7 @@ inline int bio_phys_segments(struct request_queue *q, struct bio *bio) | |||
| 424 | 430 | ||
| 425 | return bio->bi_phys_segments; | 431 | return bio->bi_phys_segments; |
| 426 | } | 432 | } |
| 433 | EXPORT_SYMBOL(bio_phys_segments); | ||
| 427 | 434 | ||
| 428 | /** | 435 | /** |
| 429 | * __bio_clone - clone a bio | 436 | * __bio_clone - clone a bio |
| @@ -451,6 +458,7 @@ void __bio_clone(struct bio *bio, struct bio *bio_src) | |||
| 451 | bio->bi_size = bio_src->bi_size; | 458 | bio->bi_size = bio_src->bi_size; |
| 452 | bio->bi_idx = bio_src->bi_idx; | 459 | bio->bi_idx = bio_src->bi_idx; |
| 453 | } | 460 | } |
| 461 | EXPORT_SYMBOL(__bio_clone); | ||
| 454 | 462 | ||
| 455 | /** | 463 | /** |
| 456 | * bio_clone - clone a bio | 464 | * bio_clone - clone a bio |
| @@ -482,6 +490,7 @@ struct bio *bio_clone(struct bio *bio, gfp_t gfp_mask) | |||
| 482 | 490 | ||
| 483 | return b; | 491 | return b; |
| 484 | } | 492 | } |
| 493 | EXPORT_SYMBOL(bio_clone); | ||
| 485 | 494 | ||
| 486 | /** | 495 | /** |
| 487 | * bio_get_nr_vecs - return approx number of vecs | 496 | * bio_get_nr_vecs - return approx number of vecs |
| @@ -505,6 +514,7 @@ int bio_get_nr_vecs(struct block_device *bdev) | |||
| 505 | 514 | ||
| 506 | return nr_pages; | 515 | return nr_pages; |
| 507 | } | 516 | } |
| 517 | EXPORT_SYMBOL(bio_get_nr_vecs); | ||
| 508 | 518 | ||
| 509 | static int __bio_add_page(struct request_queue *q, struct bio *bio, struct page | 519 | static int __bio_add_page(struct request_queue *q, struct bio *bio, struct page |
| 510 | *page, unsigned int len, unsigned int offset, | 520 | *page, unsigned int len, unsigned int offset, |
| @@ -635,6 +645,7 @@ int bio_add_pc_page(struct request_queue *q, struct bio *bio, struct page *page, | |||
| 635 | return __bio_add_page(q, bio, page, len, offset, | 645 | return __bio_add_page(q, bio, page, len, offset, |
| 636 | queue_max_hw_sectors(q)); | 646 | queue_max_hw_sectors(q)); |
| 637 | } | 647 | } |
| 648 | EXPORT_SYMBOL(bio_add_pc_page); | ||
| 638 | 649 | ||
| 639 | /** | 650 | /** |
| 640 | * bio_add_page - attempt to add page to bio | 651 | * bio_add_page - attempt to add page to bio |
| @@ -655,6 +666,7 @@ int bio_add_page(struct bio *bio, struct page *page, unsigned int len, | |||
| 655 | struct request_queue *q = bdev_get_queue(bio->bi_bdev); | 666 | struct request_queue *q = bdev_get_queue(bio->bi_bdev); |
| 656 | return __bio_add_page(q, bio, page, len, offset, queue_max_sectors(q)); | 667 | return __bio_add_page(q, bio, page, len, offset, queue_max_sectors(q)); |
| 657 | } | 668 | } |
| 669 | EXPORT_SYMBOL(bio_add_page); | ||
| 658 | 670 | ||
| 659 | struct bio_map_data { | 671 | struct bio_map_data { |
| 660 | struct bio_vec *iovecs; | 672 | struct bio_vec *iovecs; |
| @@ -776,6 +788,7 @@ int bio_uncopy_user(struct bio *bio) | |||
| 776 | bio_put(bio); | 788 | bio_put(bio); |
| 777 | return ret; | 789 | return ret; |
| 778 | } | 790 | } |
| 791 | EXPORT_SYMBOL(bio_uncopy_user); | ||
| 779 | 792 | ||
| 780 | /** | 793 | /** |
| 781 | * bio_copy_user_iov - copy user data to bio | 794 | * bio_copy_user_iov - copy user data to bio |
| @@ -920,6 +933,7 @@ struct bio *bio_copy_user(struct request_queue *q, struct rq_map_data *map_data, | |||
| 920 | 933 | ||
| 921 | return bio_copy_user_iov(q, map_data, &iov, 1, write_to_vm, gfp_mask); | 934 | return bio_copy_user_iov(q, map_data, &iov, 1, write_to_vm, gfp_mask); |
| 922 | } | 935 | } |
| 936 | EXPORT_SYMBOL(bio_copy_user); | ||
| 923 | 937 | ||
| 924 | static struct bio *__bio_map_user_iov(struct request_queue *q, | 938 | static struct bio *__bio_map_user_iov(struct request_queue *q, |
| 925 | struct block_device *bdev, | 939 | struct block_device *bdev, |
| @@ -1050,6 +1064,7 @@ struct bio *bio_map_user(struct request_queue *q, struct block_device *bdev, | |||
| 1050 | 1064 | ||
| 1051 | return bio_map_user_iov(q, bdev, &iov, 1, write_to_vm, gfp_mask); | 1065 | return bio_map_user_iov(q, bdev, &iov, 1, write_to_vm, gfp_mask); |
| 1052 | } | 1066 | } |
| 1067 | EXPORT_SYMBOL(bio_map_user); | ||
| 1053 | 1068 | ||
| 1054 | /** | 1069 | /** |
| 1055 | * bio_map_user_iov - map user sg_iovec table into bio | 1070 | * bio_map_user_iov - map user sg_iovec table into bio |
| @@ -1117,13 +1132,13 @@ void bio_unmap_user(struct bio *bio) | |||
| 1117 | __bio_unmap_user(bio); | 1132 | __bio_unmap_user(bio); |
| 1118 | bio_put(bio); | 1133 | bio_put(bio); |
| 1119 | } | 1134 | } |
| 1135 | EXPORT_SYMBOL(bio_unmap_user); | ||
| 1120 | 1136 | ||
| 1121 | static void bio_map_kern_endio(struct bio *bio, int err) | 1137 | static void bio_map_kern_endio(struct bio *bio, int err) |
| 1122 | { | 1138 | { |
| 1123 | bio_put(bio); | 1139 | bio_put(bio); |
| 1124 | } | 1140 | } |
| 1125 | 1141 | ||
| 1126 | |||
| 1127 | static struct bio *__bio_map_kern(struct request_queue *q, void *data, | 1142 | static struct bio *__bio_map_kern(struct request_queue *q, void *data, |
| 1128 | unsigned int len, gfp_t gfp_mask) | 1143 | unsigned int len, gfp_t gfp_mask) |
| 1129 | { | 1144 | { |
| @@ -1189,6 +1204,7 @@ struct bio *bio_map_kern(struct request_queue *q, void *data, unsigned int len, | |||
| 1189 | bio_put(bio); | 1204 | bio_put(bio); |
| 1190 | return ERR_PTR(-EINVAL); | 1205 | return ERR_PTR(-EINVAL); |
| 1191 | } | 1206 | } |
| 1207 | EXPORT_SYMBOL(bio_map_kern); | ||
| 1192 | 1208 | ||
| 1193 | static void bio_copy_kern_endio(struct bio *bio, int err) | 1209 | static void bio_copy_kern_endio(struct bio *bio, int err) |
| 1194 | { | 1210 | { |
| @@ -1250,6 +1266,7 @@ struct bio *bio_copy_kern(struct request_queue *q, void *data, unsigned int len, | |||
| 1250 | 1266 | ||
| 1251 | return bio; | 1267 | return bio; |
| 1252 | } | 1268 | } |
| 1269 | EXPORT_SYMBOL(bio_copy_kern); | ||
| 1253 | 1270 | ||
| 1254 | /* | 1271 | /* |
| 1255 | * bio_set_pages_dirty() and bio_check_pages_dirty() are support functions | 1272 | * bio_set_pages_dirty() and bio_check_pages_dirty() are support functions |
| @@ -1400,6 +1417,7 @@ void bio_endio(struct bio *bio, int error) | |||
| 1400 | if (bio->bi_end_io) | 1417 | if (bio->bi_end_io) |
| 1401 | bio->bi_end_io(bio, error); | 1418 | bio->bi_end_io(bio, error); |
| 1402 | } | 1419 | } |
| 1420 | EXPORT_SYMBOL(bio_endio); | ||
| 1403 | 1421 | ||
| 1404 | void bio_pair_release(struct bio_pair *bp) | 1422 | void bio_pair_release(struct bio_pair *bp) |
| 1405 | { | 1423 | { |
| @@ -1410,6 +1428,7 @@ void bio_pair_release(struct bio_pair *bp) | |||
| 1410 | mempool_free(bp, bp->bio2.bi_private); | 1428 | mempool_free(bp, bp->bio2.bi_private); |
| 1411 | } | 1429 | } |
| 1412 | } | 1430 | } |
| 1431 | EXPORT_SYMBOL(bio_pair_release); | ||
| 1413 | 1432 | ||
| 1414 | static void bio_pair_end_1(struct bio *bi, int err) | 1433 | static void bio_pair_end_1(struct bio *bi, int err) |
| 1415 | { | 1434 | { |
| @@ -1477,6 +1496,7 @@ struct bio_pair *bio_split(struct bio *bi, int first_sectors) | |||
| 1477 | 1496 | ||
| 1478 | return bp; | 1497 | return bp; |
| 1479 | } | 1498 | } |
| 1499 | EXPORT_SYMBOL(bio_split); | ||
| 1480 | 1500 | ||
| 1481 | /** | 1501 | /** |
| 1482 | * bio_sector_offset - Find hardware sector offset in bio | 1502 | * bio_sector_offset - Find hardware sector offset in bio |
| @@ -1547,6 +1567,7 @@ void bioset_free(struct bio_set *bs) | |||
| 1547 | 1567 | ||
| 1548 | kfree(bs); | 1568 | kfree(bs); |
| 1549 | } | 1569 | } |
| 1570 | EXPORT_SYMBOL(bioset_free); | ||
| 1550 | 1571 | ||
| 1551 | /** | 1572 | /** |
| 1552 | * bioset_create - Create a bio_set | 1573 | * bioset_create - Create a bio_set |
| @@ -1592,6 +1613,7 @@ bad: | |||
| 1592 | bioset_free(bs); | 1613 | bioset_free(bs); |
| 1593 | return NULL; | 1614 | return NULL; |
| 1594 | } | 1615 | } |
| 1616 | EXPORT_SYMBOL(bioset_create); | ||
| 1595 | 1617 | ||
| 1596 | static void __init biovec_init_slabs(void) | 1618 | static void __init biovec_init_slabs(void) |
| 1597 | { | 1619 | { |
| @@ -1636,29 +1658,4 @@ static int __init init_bio(void) | |||
| 1636 | 1658 | ||
| 1637 | return 0; | 1659 | return 0; |
| 1638 | } | 1660 | } |
| 1639 | |||
| 1640 | subsys_initcall(init_bio); | 1661 | subsys_initcall(init_bio); |
| 1641 | |||
| 1642 | EXPORT_SYMBOL(bio_alloc); | ||
| 1643 | EXPORT_SYMBOL(bio_kmalloc); | ||
| 1644 | EXPORT_SYMBOL(bio_put); | ||
| 1645 | EXPORT_SYMBOL(bio_free); | ||
| 1646 | EXPORT_SYMBOL(bio_endio); | ||
| 1647 | EXPORT_SYMBOL(bio_init); | ||
| 1648 | EXPORT_SYMBOL(__bio_clone); | ||
| 1649 | EXPORT_SYMBOL(bio_clone); | ||
| 1650 | EXPORT_SYMBOL(bio_phys_segments); | ||
| 1651 | EXPORT_SYMBOL(bio_add_page); | ||
| 1652 | EXPORT_SYMBOL(bio_add_pc_page); | ||
| 1653 | EXPORT_SYMBOL(bio_get_nr_vecs); | ||
| 1654 | EXPORT_SYMBOL(bio_map_user); | ||
| 1655 | EXPORT_SYMBOL(bio_unmap_user); | ||
| 1656 | EXPORT_SYMBOL(bio_map_kern); | ||
| 1657 | EXPORT_SYMBOL(bio_copy_kern); | ||
| 1658 | EXPORT_SYMBOL(bio_pair_release); | ||
| 1659 | EXPORT_SYMBOL(bio_split); | ||
| 1660 | EXPORT_SYMBOL(bio_copy_user); | ||
| 1661 | EXPORT_SYMBOL(bio_uncopy_user); | ||
| 1662 | EXPORT_SYMBOL(bioset_create); | ||
| 1663 | EXPORT_SYMBOL(bioset_free); | ||
| 1664 | EXPORT_SYMBOL(bio_alloc_bioset); | ||
diff --git a/fs/coda/psdev.c b/fs/coda/psdev.c index 0376ac66c44a..be4392ca2098 100644 --- a/fs/coda/psdev.c +++ b/fs/coda/psdev.c | |||
| @@ -22,6 +22,7 @@ | |||
| 22 | #include <linux/kernel.h> | 22 | #include <linux/kernel.h> |
| 23 | #include <linux/major.h> | 23 | #include <linux/major.h> |
| 24 | #include <linux/time.h> | 24 | #include <linux/time.h> |
| 25 | #include <linux/sched.h> | ||
| 25 | #include <linux/slab.h> | 26 | #include <linux/slab.h> |
| 26 | #include <linux/ioport.h> | 27 | #include <linux/ioport.h> |
| 27 | #include <linux/fcntl.h> | 28 | #include <linux/fcntl.h> |
diff --git a/fs/partitions/check.c b/fs/partitions/check.c index 7b685e10cbad..f38fee0311a7 100644 --- a/fs/partitions/check.c +++ b/fs/partitions/check.c | |||
| @@ -248,19 +248,11 @@ ssize_t part_stat_show(struct device *dev, | |||
| 248 | part_stat_read(p, merges[WRITE]), | 248 | part_stat_read(p, merges[WRITE]), |
| 249 | (unsigned long long)part_stat_read(p, sectors[WRITE]), | 249 | (unsigned long long)part_stat_read(p, sectors[WRITE]), |
| 250 | jiffies_to_msecs(part_stat_read(p, ticks[WRITE])), | 250 | jiffies_to_msecs(part_stat_read(p, ticks[WRITE])), |
| 251 | part_in_flight(p), | 251 | p->in_flight, |
| 252 | jiffies_to_msecs(part_stat_read(p, io_ticks)), | 252 | jiffies_to_msecs(part_stat_read(p, io_ticks)), |
| 253 | jiffies_to_msecs(part_stat_read(p, time_in_queue))); | 253 | jiffies_to_msecs(part_stat_read(p, time_in_queue))); |
| 254 | } | 254 | } |
| 255 | 255 | ||
| 256 | ssize_t part_inflight_show(struct device *dev, | ||
| 257 | struct device_attribute *attr, char *buf) | ||
| 258 | { | ||
| 259 | struct hd_struct *p = dev_to_part(dev); | ||
| 260 | |||
| 261 | return sprintf(buf, "%8u %8u\n", p->in_flight[0], p->in_flight[1]); | ||
| 262 | } | ||
| 263 | |||
| 264 | #ifdef CONFIG_FAIL_MAKE_REQUEST | 256 | #ifdef CONFIG_FAIL_MAKE_REQUEST |
| 265 | ssize_t part_fail_show(struct device *dev, | 257 | ssize_t part_fail_show(struct device *dev, |
| 266 | struct device_attribute *attr, char *buf) | 258 | struct device_attribute *attr, char *buf) |
| @@ -289,7 +281,6 @@ static DEVICE_ATTR(start, S_IRUGO, part_start_show, NULL); | |||
| 289 | static DEVICE_ATTR(size, S_IRUGO, part_size_show, NULL); | 281 | static DEVICE_ATTR(size, S_IRUGO, part_size_show, NULL); |
| 290 | static DEVICE_ATTR(alignment_offset, S_IRUGO, part_alignment_offset_show, NULL); | 282 | static DEVICE_ATTR(alignment_offset, S_IRUGO, part_alignment_offset_show, NULL); |
| 291 | static DEVICE_ATTR(stat, S_IRUGO, part_stat_show, NULL); | 283 | static DEVICE_ATTR(stat, S_IRUGO, part_stat_show, NULL); |
| 292 | static DEVICE_ATTR(inflight, S_IRUGO, part_inflight_show, NULL); | ||
| 293 | #ifdef CONFIG_FAIL_MAKE_REQUEST | 284 | #ifdef CONFIG_FAIL_MAKE_REQUEST |
| 294 | static struct device_attribute dev_attr_fail = | 285 | static struct device_attribute dev_attr_fail = |
| 295 | __ATTR(make-it-fail, S_IRUGO|S_IWUSR, part_fail_show, part_fail_store); | 286 | __ATTR(make-it-fail, S_IRUGO|S_IWUSR, part_fail_show, part_fail_store); |
| @@ -301,7 +292,6 @@ static struct attribute *part_attrs[] = { | |||
| 301 | &dev_attr_size.attr, | 292 | &dev_attr_size.attr, |
| 302 | &dev_attr_alignment_offset.attr, | 293 | &dev_attr_alignment_offset.attr, |
| 303 | &dev_attr_stat.attr, | 294 | &dev_attr_stat.attr, |
| 304 | &dev_attr_inflight.attr, | ||
| 305 | #ifdef CONFIG_FAIL_MAKE_REQUEST | 295 | #ifdef CONFIG_FAIL_MAKE_REQUEST |
| 306 | &dev_attr_fail.attr, | 296 | &dev_attr_fail.attr, |
| 307 | #endif | 297 | #endif |
diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c index 56013371f9f3..a44a7897fd4d 100644 --- a/fs/proc/kcore.c +++ b/fs/proc/kcore.c | |||
| @@ -23,7 +23,6 @@ | |||
| 23 | #include <asm/io.h> | 23 | #include <asm/io.h> |
| 24 | #include <linux/list.h> | 24 | #include <linux/list.h> |
| 25 | #include <linux/ioport.h> | 25 | #include <linux/ioport.h> |
| 26 | #include <linux/mm.h> | ||
| 27 | #include <linux/memory.h> | 26 | #include <linux/memory.h> |
| 28 | #include <asm/sections.h> | 27 | #include <asm/sections.h> |
| 29 | 28 | ||
diff --git a/fs/proc/page.c b/fs/proc/page.c index 2281c2cbfe2b..5033ce0d254b 100644 --- a/fs/proc/page.c +++ b/fs/proc/page.c | |||
| @@ -94,6 +94,7 @@ static const struct file_operations proc_kpagecount_operations = { | |||
| 94 | #define KPF_COMPOUND_TAIL 16 | 94 | #define KPF_COMPOUND_TAIL 16 |
| 95 | #define KPF_HUGE 17 | 95 | #define KPF_HUGE 17 |
| 96 | #define KPF_UNEVICTABLE 18 | 96 | #define KPF_UNEVICTABLE 18 |
| 97 | #define KPF_HWPOISON 19 | ||
| 97 | #define KPF_NOPAGE 20 | 98 | #define KPF_NOPAGE 20 |
| 98 | 99 | ||
| 99 | #define KPF_KSM 21 | 100 | #define KPF_KSM 21 |
| @@ -180,6 +181,10 @@ static u64 get_uflags(struct page *page) | |||
| 180 | u |= kpf_copy_bit(k, KPF_UNEVICTABLE, PG_unevictable); | 181 | u |= kpf_copy_bit(k, KPF_UNEVICTABLE, PG_unevictable); |
| 181 | u |= kpf_copy_bit(k, KPF_MLOCKED, PG_mlocked); | 182 | u |= kpf_copy_bit(k, KPF_MLOCKED, PG_mlocked); |
| 182 | 183 | ||
| 184 | #ifdef CONFIG_MEMORY_FAILURE | ||
| 185 | u |= kpf_copy_bit(k, KPF_HWPOISON, PG_hwpoison); | ||
| 186 | #endif | ||
| 187 | |||
| 183 | #ifdef CONFIG_IA64_UNCACHED_ALLOCATOR | 188 | #ifdef CONFIG_IA64_UNCACHED_ALLOCATOR |
| 184 | u |= kpf_copy_bit(k, KPF_UNCACHED, PG_uncached); | 189 | u |= kpf_copy_bit(k, KPF_UNCACHED, PG_uncached); |
| 185 | #endif | 190 | #endif |
diff --git a/fs/select.c b/fs/select.c index a201fc370223..fd38ce2e32e3 100644 --- a/fs/select.c +++ b/fs/select.c | |||
| @@ -15,6 +15,7 @@ | |||
| 15 | */ | 15 | */ |
| 16 | 16 | ||
| 17 | #include <linux/kernel.h> | 17 | #include <linux/kernel.h> |
| 18 | #include <linux/sched.h> | ||
| 18 | #include <linux/syscalls.h> | 19 | #include <linux/syscalls.h> |
| 19 | #include <linux/module.h> | 20 | #include <linux/module.h> |
| 20 | #include <linux/slab.h> | 21 | #include <linux/slab.h> |
diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h index ef47dfd8e5e9..b29e20168b5f 100644 --- a/include/drm/drm_crtc_helper.h +++ b/include/drm/drm_crtc_helper.h | |||
| @@ -61,6 +61,9 @@ struct drm_crtc_helper_funcs { | |||
| 61 | /* Move the crtc on the current fb to the given position *optional* */ | 61 | /* Move the crtc on the current fb to the given position *optional* */ |
| 62 | int (*mode_set_base)(struct drm_crtc *crtc, int x, int y, | 62 | int (*mode_set_base)(struct drm_crtc *crtc, int x, int y, |
| 63 | struct drm_framebuffer *old_fb); | 63 | struct drm_framebuffer *old_fb); |
| 64 | |||
| 65 | /* reload the current crtc LUT */ | ||
| 66 | void (*load_lut)(struct drm_crtc *crtc); | ||
| 64 | }; | 67 | }; |
| 65 | 68 | ||
| 66 | struct drm_encoder_helper_funcs { | 69 | struct drm_encoder_helper_funcs { |
diff --git a/include/drm/drm_fb_helper.h b/include/drm/drm_fb_helper.h index 4aa5740ce59f..58c892a2cbfa 100644 --- a/include/drm/drm_fb_helper.h +++ b/include/drm/drm_fb_helper.h | |||
| @@ -39,6 +39,8 @@ struct drm_fb_helper_crtc { | |||
| 39 | struct drm_fb_helper_funcs { | 39 | struct drm_fb_helper_funcs { |
| 40 | void (*gamma_set)(struct drm_crtc *crtc, u16 red, u16 green, | 40 | void (*gamma_set)(struct drm_crtc *crtc, u16 red, u16 green, |
| 41 | u16 blue, int regno); | 41 | u16 blue, int regno); |
| 42 | void (*gamma_get)(struct drm_crtc *crtc, u16 *red, u16 *green, | ||
| 43 | u16 *blue, int regno); | ||
| 42 | }; | 44 | }; |
| 43 | 45 | ||
| 44 | /* mode specified on the command line */ | 46 | /* mode specified on the command line */ |
| @@ -71,6 +73,7 @@ struct drm_fb_helper { | |||
| 71 | }; | 73 | }; |
| 72 | 74 | ||
| 73 | int drm_fb_helper_single_fb_probe(struct drm_device *dev, | 75 | int drm_fb_helper_single_fb_probe(struct drm_device *dev, |
| 76 | int preferred_bpp, | ||
| 74 | int (*fb_create)(struct drm_device *dev, | 77 | int (*fb_create)(struct drm_device *dev, |
| 75 | uint32_t fb_width, | 78 | uint32_t fb_width, |
| 76 | uint32_t fb_height, | 79 | uint32_t fb_height, |
| @@ -98,9 +101,11 @@ int drm_fb_helper_setcolreg(unsigned regno, | |||
| 98 | void drm_fb_helper_restore(void); | 101 | void drm_fb_helper_restore(void); |
| 99 | void drm_fb_helper_fill_var(struct fb_info *info, struct drm_framebuffer *fb, | 102 | void drm_fb_helper_fill_var(struct fb_info *info, struct drm_framebuffer *fb, |
| 100 | uint32_t fb_width, uint32_t fb_height); | 103 | uint32_t fb_width, uint32_t fb_height); |
| 101 | void drm_fb_helper_fill_fix(struct fb_info *info, uint32_t pitch); | 104 | void drm_fb_helper_fill_fix(struct fb_info *info, uint32_t pitch, |
| 105 | uint32_t depth); | ||
| 102 | 106 | ||
| 103 | int drm_fb_helper_add_connector(struct drm_connector *connector); | 107 | int drm_fb_helper_add_connector(struct drm_connector *connector); |
| 104 | int drm_fb_helper_parse_command_line(struct drm_device *dev); | 108 | int drm_fb_helper_parse_command_line(struct drm_device *dev); |
| 109 | int drm_fb_helper_setcmap(struct fb_cmap *cmap, struct fb_info *info); | ||
| 105 | 110 | ||
| 106 | #endif | 111 | #endif |
diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h index 3f6e545609be..e6f3b120f51a 100644 --- a/include/drm/drm_pciids.h +++ b/include/drm/drm_pciids.h | |||
| @@ -80,7 +80,7 @@ | |||
| 80 | {0x1002, 0x5158, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV200}, \ | 80 | {0x1002, 0x5158, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV200}, \ |
| 81 | {0x1002, 0x5159, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV100}, \ | 81 | {0x1002, 0x5159, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV100}, \ |
| 82 | {0x1002, 0x515A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV100}, \ | 82 | {0x1002, 0x515A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV100}, \ |
| 83 | {0x1002, 0x515E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV100}, \ | 83 | {0x1002, 0x515E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV100|RADEON_SINGLE_CRTC}, \ |
| 84 | {0x1002, 0x5460, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \ | 84 | {0x1002, 0x5460, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \ |
| 85 | {0x1002, 0x5462, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \ | 85 | {0x1002, 0x5462, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \ |
| 86 | {0x1002, 0x5464, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \ | 86 | {0x1002, 0x5464, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \ |
| @@ -113,7 +113,7 @@ | |||
| 113 | {0x1002, 0x5962, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280}, \ | 113 | {0x1002, 0x5962, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280}, \ |
| 114 | {0x1002, 0x5964, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280}, \ | 114 | {0x1002, 0x5964, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280}, \ |
| 115 | {0x1002, 0x5965, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280}, \ | 115 | {0x1002, 0x5965, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280}, \ |
| 116 | {0x1002, 0x5969, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV100}, \ | 116 | {0x1002, 0x5969, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV100|RADEON_SINGLE_CRTC}, \ |
| 117 | {0x1002, 0x5a41, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS400|RADEON_IS_IGP|RADEON_IS_IGPGART}, \ | 117 | {0x1002, 0x5a41, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS400|RADEON_IS_IGP|RADEON_IS_IGPGART}, \ |
| 118 | {0x1002, 0x5a42, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS400|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART}, \ | 118 | {0x1002, 0x5a42, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS400|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART}, \ |
| 119 | {0x1002, 0x5a61, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS400|RADEON_IS_IGP|RADEON_IS_IGPGART}, \ | 119 | {0x1002, 0x5a61, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS400|RADEON_IS_IGP|RADEON_IS_IGPGART}, \ |
diff --git a/include/linux/Kbuild b/include/linux/Kbuild index cff4a101f266..3f384d4b163a 100644 --- a/include/linux/Kbuild +++ b/include/linux/Kbuild | |||
| @@ -126,6 +126,7 @@ header-y += nfs_mount.h | |||
| 126 | header-y += nl80211.h | 126 | header-y += nl80211.h |
| 127 | header-y += param.h | 127 | header-y += param.h |
| 128 | header-y += pci_regs.h | 128 | header-y += pci_regs.h |
| 129 | header-y += perf_event.h | ||
| 129 | header-y += pfkeyv2.h | 130 | header-y += pfkeyv2.h |
| 130 | header-y += pg.h | 131 | header-y += pg.h |
| 131 | header-y += phantom.h | 132 | header-y += phantom.h |
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index e23a86cae5ac..25119041e034 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h | |||
| @@ -82,7 +82,6 @@ enum rq_cmd_type_bits { | |||
| 82 | enum { | 82 | enum { |
| 83 | REQ_LB_OP_EJECT = 0x40, /* eject request */ | 83 | REQ_LB_OP_EJECT = 0x40, /* eject request */ |
| 84 | REQ_LB_OP_FLUSH = 0x41, /* flush request */ | 84 | REQ_LB_OP_FLUSH = 0x41, /* flush request */ |
| 85 | REQ_LB_OP_DISCARD = 0x42, /* discard sectors */ | ||
| 86 | }; | 85 | }; |
| 87 | 86 | ||
| 88 | /* | 87 | /* |
| @@ -261,7 +260,6 @@ typedef void (request_fn_proc) (struct request_queue *q); | |||
| 261 | typedef int (make_request_fn) (struct request_queue *q, struct bio *bio); | 260 | typedef int (make_request_fn) (struct request_queue *q, struct bio *bio); |
| 262 | typedef int (prep_rq_fn) (struct request_queue *, struct request *); | 261 | typedef int (prep_rq_fn) (struct request_queue *, struct request *); |
| 263 | typedef void (unplug_fn) (struct request_queue *); | 262 | typedef void (unplug_fn) (struct request_queue *); |
| 264 | typedef int (prepare_discard_fn) (struct request_queue *, struct request *); | ||
| 265 | 263 | ||
| 266 | struct bio_vec; | 264 | struct bio_vec; |
| 267 | struct bvec_merge_data { | 265 | struct bvec_merge_data { |
| @@ -313,6 +311,7 @@ struct queue_limits { | |||
| 313 | unsigned int alignment_offset; | 311 | unsigned int alignment_offset; |
| 314 | unsigned int io_min; | 312 | unsigned int io_min; |
| 315 | unsigned int io_opt; | 313 | unsigned int io_opt; |
| 314 | unsigned int max_discard_sectors; | ||
| 316 | 315 | ||
| 317 | unsigned short logical_block_size; | 316 | unsigned short logical_block_size; |
| 318 | unsigned short max_hw_segments; | 317 | unsigned short max_hw_segments; |
| @@ -340,7 +339,6 @@ struct request_queue | |||
| 340 | make_request_fn *make_request_fn; | 339 | make_request_fn *make_request_fn; |
| 341 | prep_rq_fn *prep_rq_fn; | 340 | prep_rq_fn *prep_rq_fn; |
| 342 | unplug_fn *unplug_fn; | 341 | unplug_fn *unplug_fn; |
| 343 | prepare_discard_fn *prepare_discard_fn; | ||
| 344 | merge_bvec_fn *merge_bvec_fn; | 342 | merge_bvec_fn *merge_bvec_fn; |
| 345 | prepare_flush_fn *prepare_flush_fn; | 343 | prepare_flush_fn *prepare_flush_fn; |
| 346 | softirq_done_fn *softirq_done_fn; | 344 | softirq_done_fn *softirq_done_fn; |
| @@ -460,6 +458,7 @@ struct request_queue | |||
| 460 | #define QUEUE_FLAG_VIRT QUEUE_FLAG_NONROT /* paravirt device */ | 458 | #define QUEUE_FLAG_VIRT QUEUE_FLAG_NONROT /* paravirt device */ |
| 461 | #define QUEUE_FLAG_IO_STAT 15 /* do IO stats */ | 459 | #define QUEUE_FLAG_IO_STAT 15 /* do IO stats */ |
| 462 | #define QUEUE_FLAG_CQ 16 /* hardware does queuing */ | 460 | #define QUEUE_FLAG_CQ 16 /* hardware does queuing */ |
| 461 | #define QUEUE_FLAG_DISCARD 17 /* supports DISCARD */ | ||
| 463 | 462 | ||
| 464 | #define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ | 463 | #define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ |
| 465 | (1 << QUEUE_FLAG_CLUSTER) | \ | 464 | (1 << QUEUE_FLAG_CLUSTER) | \ |
| @@ -591,6 +590,7 @@ enum { | |||
| 591 | #define blk_queue_flushing(q) ((q)->ordseq) | 590 | #define blk_queue_flushing(q) ((q)->ordseq) |
| 592 | #define blk_queue_stackable(q) \ | 591 | #define blk_queue_stackable(q) \ |
| 593 | test_bit(QUEUE_FLAG_STACKABLE, &(q)->queue_flags) | 592 | test_bit(QUEUE_FLAG_STACKABLE, &(q)->queue_flags) |
| 593 | #define blk_queue_discard(q) test_bit(QUEUE_FLAG_DISCARD, &(q)->queue_flags) | ||
| 594 | 594 | ||
| 595 | #define blk_fs_request(rq) ((rq)->cmd_type == REQ_TYPE_FS) | 595 | #define blk_fs_request(rq) ((rq)->cmd_type == REQ_TYPE_FS) |
| 596 | #define blk_pc_request(rq) ((rq)->cmd_type == REQ_TYPE_BLOCK_PC) | 596 | #define blk_pc_request(rq) ((rq)->cmd_type == REQ_TYPE_BLOCK_PC) |
| @@ -929,6 +929,8 @@ extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int); | |||
| 929 | extern void blk_queue_max_phys_segments(struct request_queue *, unsigned short); | 929 | extern void blk_queue_max_phys_segments(struct request_queue *, unsigned short); |
| 930 | extern void blk_queue_max_hw_segments(struct request_queue *, unsigned short); | 930 | extern void blk_queue_max_hw_segments(struct request_queue *, unsigned short); |
| 931 | extern void blk_queue_max_segment_size(struct request_queue *, unsigned int); | 931 | extern void blk_queue_max_segment_size(struct request_queue *, unsigned int); |
| 932 | extern void blk_queue_max_discard_sectors(struct request_queue *q, | ||
| 933 | unsigned int max_discard_sectors); | ||
| 932 | extern void blk_queue_logical_block_size(struct request_queue *, unsigned short); | 934 | extern void blk_queue_logical_block_size(struct request_queue *, unsigned short); |
| 933 | extern void blk_queue_physical_block_size(struct request_queue *, unsigned short); | 935 | extern void blk_queue_physical_block_size(struct request_queue *, unsigned short); |
| 934 | extern void blk_queue_alignment_offset(struct request_queue *q, | 936 | extern void blk_queue_alignment_offset(struct request_queue *q, |
| @@ -955,7 +957,6 @@ extern void blk_queue_merge_bvec(struct request_queue *, merge_bvec_fn *); | |||
| 955 | extern void blk_queue_dma_alignment(struct request_queue *, int); | 957 | extern void blk_queue_dma_alignment(struct request_queue *, int); |
| 956 | extern void blk_queue_update_dma_alignment(struct request_queue *, int); | 958 | extern void blk_queue_update_dma_alignment(struct request_queue *, int); |
| 957 | extern void blk_queue_softirq_done(struct request_queue *, softirq_done_fn *); | 959 | extern void blk_queue_softirq_done(struct request_queue *, softirq_done_fn *); |
| 958 | extern void blk_queue_set_discard(struct request_queue *, prepare_discard_fn *); | ||
| 959 | extern void blk_queue_rq_timed_out(struct request_queue *, rq_timed_out_fn *); | 960 | extern void blk_queue_rq_timed_out(struct request_queue *, rq_timed_out_fn *); |
| 960 | extern void blk_queue_rq_timeout(struct request_queue *, unsigned int); | 961 | extern void blk_queue_rq_timeout(struct request_queue *, unsigned int); |
| 961 | extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev); | 962 | extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev); |
| @@ -1080,25 +1081,37 @@ static inline unsigned int queue_physical_block_size(struct request_queue *q) | |||
| 1080 | return q->limits.physical_block_size; | 1081 | return q->limits.physical_block_size; |
| 1081 | } | 1082 | } |
| 1082 | 1083 | ||
| 1084 | static inline int bdev_physical_block_size(struct block_device *bdev) | ||
| 1085 | { | ||
| 1086 | return queue_physical_block_size(bdev_get_queue(bdev)); | ||
| 1087 | } | ||
| 1088 | |||
| 1083 | static inline unsigned int queue_io_min(struct request_queue *q) | 1089 | static inline unsigned int queue_io_min(struct request_queue *q) |
| 1084 | { | 1090 | { |
| 1085 | return q->limits.io_min; | 1091 | return q->limits.io_min; |
| 1086 | } | 1092 | } |
| 1087 | 1093 | ||
| 1094 | static inline int bdev_io_min(struct block_device *bdev) | ||
| 1095 | { | ||
| 1096 | return queue_io_min(bdev_get_queue(bdev)); | ||
| 1097 | } | ||
| 1098 | |||
| 1088 | static inline unsigned int queue_io_opt(struct request_queue *q) | 1099 | static inline unsigned int queue_io_opt(struct request_queue *q) |
| 1089 | { | 1100 | { |
| 1090 | return q->limits.io_opt; | 1101 | return q->limits.io_opt; |
| 1091 | } | 1102 | } |
| 1092 | 1103 | ||
| 1104 | static inline int bdev_io_opt(struct block_device *bdev) | ||
| 1105 | { | ||
| 1106 | return queue_io_opt(bdev_get_queue(bdev)); | ||
| 1107 | } | ||
| 1108 | |||
| 1093 | static inline int queue_alignment_offset(struct request_queue *q) | 1109 | static inline int queue_alignment_offset(struct request_queue *q) |
| 1094 | { | 1110 | { |
| 1095 | if (q && q->limits.misaligned) | 1111 | if (q->limits.misaligned) |
| 1096 | return -1; | 1112 | return -1; |
| 1097 | 1113 | ||
| 1098 | if (q && q->limits.alignment_offset) | 1114 | return q->limits.alignment_offset; |
| 1099 | return q->limits.alignment_offset; | ||
| 1100 | |||
| 1101 | return 0; | ||
| 1102 | } | 1115 | } |
| 1103 | 1116 | ||
| 1104 | static inline int queue_sector_alignment_offset(struct request_queue *q, | 1117 | static inline int queue_sector_alignment_offset(struct request_queue *q, |
| @@ -1108,6 +1121,19 @@ static inline int queue_sector_alignment_offset(struct request_queue *q, | |||
| 1108 | & (q->limits.io_min - 1); | 1121 | & (q->limits.io_min - 1); |
| 1109 | } | 1122 | } |
| 1110 | 1123 | ||
| 1124 | static inline int bdev_alignment_offset(struct block_device *bdev) | ||
| 1125 | { | ||
| 1126 | struct request_queue *q = bdev_get_queue(bdev); | ||
| 1127 | |||
| 1128 | if (q->limits.misaligned) | ||
| 1129 | return -1; | ||
| 1130 | |||
| 1131 | if (bdev != bdev->bd_contains) | ||
| 1132 | return bdev->bd_part->alignment_offset; | ||
| 1133 | |||
| 1134 | return q->limits.alignment_offset; | ||
| 1135 | } | ||
| 1136 | |||
| 1111 | static inline int queue_dma_alignment(struct request_queue *q) | 1137 | static inline int queue_dma_alignment(struct request_queue *q) |
| 1112 | { | 1138 | { |
| 1113 | return q ? q->dma_alignment : 511; | 1139 | return q ? q->dma_alignment : 511; |
| @@ -1146,7 +1172,11 @@ static inline void put_dev_sector(Sector p) | |||
| 1146 | } | 1172 | } |
| 1147 | 1173 | ||
| 1148 | struct work_struct; | 1174 | struct work_struct; |
| 1175 | struct delayed_work; | ||
| 1149 | int kblockd_schedule_work(struct request_queue *q, struct work_struct *work); | 1176 | int kblockd_schedule_work(struct request_queue *q, struct work_struct *work); |
| 1177 | int kblockd_schedule_delayed_work(struct request_queue *q, | ||
| 1178 | struct delayed_work *work, | ||
| 1179 | unsigned long delay); | ||
| 1150 | 1180 | ||
| 1151 | #define MODULE_ALIAS_BLOCKDEV(major,minor) \ | 1181 | #define MODULE_ALIAS_BLOCKDEV(major,minor) \ |
| 1152 | MODULE_ALIAS("block-major-" __stringify(major) "-" __stringify(minor)) | 1182 | MODULE_ALIAS("block-major-" __stringify(major) "-" __stringify(minor)) |
diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h index 7e4350ece0f8..3b73b9992b26 100644 --- a/include/linux/blktrace_api.h +++ b/include/linux/blktrace_api.h | |||
| @@ -198,6 +198,7 @@ extern int blk_trace_setup(struct request_queue *q, char *name, dev_t dev, | |||
| 198 | char __user *arg); | 198 | char __user *arg); |
| 199 | extern int blk_trace_startstop(struct request_queue *q, int start); | 199 | extern int blk_trace_startstop(struct request_queue *q, int start); |
| 200 | extern int blk_trace_remove(struct request_queue *q); | 200 | extern int blk_trace_remove(struct request_queue *q); |
| 201 | extern void blk_trace_remove_sysfs(struct device *dev); | ||
| 201 | extern int blk_trace_init_sysfs(struct device *dev); | 202 | extern int blk_trace_init_sysfs(struct device *dev); |
| 202 | 203 | ||
| 203 | extern struct attribute_group blk_trace_attr_group; | 204 | extern struct attribute_group blk_trace_attr_group; |
| @@ -211,6 +212,7 @@ extern struct attribute_group blk_trace_attr_group; | |||
| 211 | # define blk_trace_startstop(q, start) (-ENOTTY) | 212 | # define blk_trace_startstop(q, start) (-ENOTTY) |
| 212 | # define blk_trace_remove(q) (-ENOTTY) | 213 | # define blk_trace_remove(q) (-ENOTTY) |
| 213 | # define blk_add_trace_msg(q, fmt, ...) do { } while (0) | 214 | # define blk_add_trace_msg(q, fmt, ...) do { } while (0) |
| 215 | # define blk_trace_remove_sysfs(dev) do { } while (0) | ||
| 214 | static inline int blk_trace_init_sysfs(struct device *dev) | 216 | static inline int blk_trace_init_sysfs(struct device *dev) |
| 215 | { | 217 | { |
| 216 | return 0; | 218 | return 0; |
diff --git a/include/linux/fs.h b/include/linux/fs.h index a1e6899d4b6c..2620a8c63571 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h | |||
| @@ -300,6 +300,10 @@ struct inodes_stat_t { | |||
| 300 | #define BLKTRACESTOP _IO(0x12,117) | 300 | #define BLKTRACESTOP _IO(0x12,117) |
| 301 | #define BLKTRACETEARDOWN _IO(0x12,118) | 301 | #define BLKTRACETEARDOWN _IO(0x12,118) |
| 302 | #define BLKDISCARD _IO(0x12,119) | 302 | #define BLKDISCARD _IO(0x12,119) |
| 303 | #define BLKIOMIN _IO(0x12,120) | ||
| 304 | #define BLKIOOPT _IO(0x12,121) | ||
| 305 | #define BLKALIGNOFF _IO(0x12,122) | ||
| 306 | #define BLKPBSZGET _IO(0x12,123) | ||
| 303 | 307 | ||
| 304 | #define BMAP_IOCTL 1 /* obsolete - kept for compatibility */ | 308 | #define BMAP_IOCTL 1 /* obsolete - kept for compatibility */ |
| 305 | #define FIBMAP _IO(0x00,1) /* bmap access */ | 309 | #define FIBMAP _IO(0x00,1) /* bmap access */ |
diff --git a/include/linux/genhd.h b/include/linux/genhd.h index 297df45ffd0a..7beaa21b3880 100644 --- a/include/linux/genhd.h +++ b/include/linux/genhd.h | |||
| @@ -98,7 +98,7 @@ struct hd_struct { | |||
| 98 | int make_it_fail; | 98 | int make_it_fail; |
| 99 | #endif | 99 | #endif |
| 100 | unsigned long stamp; | 100 | unsigned long stamp; |
| 101 | int in_flight[2]; | 101 | int in_flight; |
| 102 | #ifdef CONFIG_SMP | 102 | #ifdef CONFIG_SMP |
| 103 | struct disk_stats *dkstats; | 103 | struct disk_stats *dkstats; |
| 104 | #else | 104 | #else |
| @@ -322,23 +322,18 @@ static inline void free_part_stats(struct hd_struct *part) | |||
| 322 | #define part_stat_sub(cpu, gendiskp, field, subnd) \ | 322 | #define part_stat_sub(cpu, gendiskp, field, subnd) \ |
| 323 | part_stat_add(cpu, gendiskp, field, -subnd) | 323 | part_stat_add(cpu, gendiskp, field, -subnd) |
| 324 | 324 | ||
| 325 | static inline void part_inc_in_flight(struct hd_struct *part, int rw) | 325 | static inline void part_inc_in_flight(struct hd_struct *part) |
| 326 | { | 326 | { |
| 327 | part->in_flight[rw]++; | 327 | part->in_flight++; |
| 328 | if (part->partno) | 328 | if (part->partno) |
| 329 | part_to_disk(part)->part0.in_flight[rw]++; | 329 | part_to_disk(part)->part0.in_flight++; |
| 330 | } | 330 | } |
| 331 | 331 | ||
| 332 | static inline void part_dec_in_flight(struct hd_struct *part, int rw) | 332 | static inline void part_dec_in_flight(struct hd_struct *part) |
| 333 | { | 333 | { |
| 334 | part->in_flight[rw]--; | 334 | part->in_flight--; |
| 335 | if (part->partno) | 335 | if (part->partno) |
| 336 | part_to_disk(part)->part0.in_flight[rw]--; | 336 | part_to_disk(part)->part0.in_flight--; |
| 337 | } | ||
| 338 | |||
| 339 | static inline int part_in_flight(struct hd_struct *part) | ||
| 340 | { | ||
| 341 | return part->in_flight[0] + part->in_flight[1]; | ||
| 342 | } | 337 | } |
| 343 | 338 | ||
| 344 | /* block/blk-core.c */ | 339 | /* block/blk-core.c */ |
| @@ -551,8 +546,6 @@ extern ssize_t part_size_show(struct device *dev, | |||
| 551 | struct device_attribute *attr, char *buf); | 546 | struct device_attribute *attr, char *buf); |
| 552 | extern ssize_t part_stat_show(struct device *dev, | 547 | extern ssize_t part_stat_show(struct device *dev, |
| 553 | struct device_attribute *attr, char *buf); | 548 | struct device_attribute *attr, char *buf); |
| 554 | extern ssize_t part_inflight_show(struct device *dev, | ||
| 555 | struct device_attribute *attr, char *buf); | ||
| 556 | #ifdef CONFIG_FAIL_MAKE_REQUEST | 549 | #ifdef CONFIG_FAIL_MAKE_REQUEST |
| 557 | extern ssize_t part_fail_show(struct device *dev, | 550 | extern ssize_t part_fail_show(struct device *dev, |
| 558 | struct device_attribute *attr, char *buf); | 551 | struct device_attribute *attr, char *buf); |
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 3a9d36d1e92a..2e6d95f97419 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h | |||
| @@ -442,6 +442,7 @@ enum perf_callchain_context { | |||
| 442 | #include <linux/hrtimer.h> | 442 | #include <linux/hrtimer.h> |
| 443 | #include <linux/fs.h> | 443 | #include <linux/fs.h> |
| 444 | #include <linux/pid_namespace.h> | 444 | #include <linux/pid_namespace.h> |
| 445 | #include <linux/workqueue.h> | ||
| 445 | #include <asm/atomic.h> | 446 | #include <asm/atomic.h> |
| 446 | 447 | ||
| 447 | #define PERF_MAX_STACK_DEPTH 255 | 448 | #define PERF_MAX_STACK_DEPTH 255 |
| @@ -513,6 +514,10 @@ struct file; | |||
| 513 | 514 | ||
| 514 | struct perf_mmap_data { | 515 | struct perf_mmap_data { |
| 515 | struct rcu_head rcu_head; | 516 | struct rcu_head rcu_head; |
| 517 | #ifdef CONFIG_PERF_USE_VMALLOC | ||
| 518 | struct work_struct work; | ||
| 519 | #endif | ||
| 520 | int data_order; | ||
| 516 | int nr_pages; /* nr of data pages */ | 521 | int nr_pages; /* nr of data pages */ |
| 517 | int writable; /* are we writable */ | 522 | int writable; /* are we writable */ |
| 518 | int nr_locked; /* nr pages mlocked */ | 523 | int nr_locked; /* nr pages mlocked */ |
diff --git a/include/linux/poll.h b/include/linux/poll.h index fa287f25138d..6673743946f7 100644 --- a/include/linux/poll.h +++ b/include/linux/poll.h | |||
| @@ -6,10 +6,10 @@ | |||
| 6 | #ifdef __KERNEL__ | 6 | #ifdef __KERNEL__ |
| 7 | 7 | ||
| 8 | #include <linux/compiler.h> | 8 | #include <linux/compiler.h> |
| 9 | #include <linux/ktime.h> | ||
| 9 | #include <linux/wait.h> | 10 | #include <linux/wait.h> |
| 10 | #include <linux/string.h> | 11 | #include <linux/string.h> |
| 11 | #include <linux/fs.h> | 12 | #include <linux/fs.h> |
| 12 | #include <linux/sched.h> | ||
| 13 | #include <asm/uaccess.h> | 13 | #include <asm/uaccess.h> |
| 14 | 14 | ||
| 15 | /* ~832 bytes of stack space used max in sys_select/sys_poll before allocating | 15 | /* ~832 bytes of stack space used max in sys_select/sys_poll before allocating |
diff --git a/include/linux/socket.h b/include/linux/socket.h index 3b461dffe244..3273a0c5043b 100644 --- a/include/linux/socket.h +++ b/include/linux/socket.h | |||
| @@ -16,7 +16,7 @@ struct __kernel_sockaddr_storage { | |||
| 16 | /* _SS_MAXSIZE value minus size of ss_family */ | 16 | /* _SS_MAXSIZE value minus size of ss_family */ |
| 17 | } __attribute__ ((aligned(_K_SS_ALIGNSIZE))); /* force desired alignment */ | 17 | } __attribute__ ((aligned(_K_SS_ALIGNSIZE))); /* force desired alignment */ |
| 18 | 18 | ||
| 19 | #if defined(__KERNEL__) || !defined(__GLIBC__) || (__GLIBC__ < 2) | 19 | #ifdef __KERNEL__ |
| 20 | 20 | ||
| 21 | #include <asm/socket.h> /* arch-dependent defines */ | 21 | #include <asm/socket.h> /* arch-dependent defines */ |
| 22 | #include <linux/sockios.h> /* the SIOCxxx I/O controls */ | 22 | #include <linux/sockios.h> /* the SIOCxxx I/O controls */ |
| @@ -101,21 +101,6 @@ struct cmsghdr { | |||
| 101 | ((char *)(cmsg) - (char *)(mhdr)->msg_control))) | 101 | ((char *)(cmsg) - (char *)(mhdr)->msg_control))) |
| 102 | 102 | ||
| 103 | /* | 103 | /* |
| 104 | * This mess will go away with glibc | ||
| 105 | */ | ||
| 106 | |||
| 107 | #ifdef __KERNEL__ | ||
| 108 | #define __KINLINE static inline | ||
| 109 | #elif defined(__GNUC__) | ||
| 110 | #define __KINLINE static __inline__ | ||
| 111 | #elif defined(__cplusplus) | ||
| 112 | #define __KINLINE static inline | ||
| 113 | #else | ||
| 114 | #define __KINLINE static | ||
| 115 | #endif | ||
| 116 | |||
| 117 | |||
| 118 | /* | ||
| 119 | * Get the next cmsg header | 104 | * Get the next cmsg header |
| 120 | * | 105 | * |
| 121 | * PLEASE, do not touch this function. If you think, that it is | 106 | * PLEASE, do not touch this function. If you think, that it is |
| @@ -128,7 +113,7 @@ struct cmsghdr { | |||
| 128 | * ancillary object DATA. --ANK (980731) | 113 | * ancillary object DATA. --ANK (980731) |
| 129 | */ | 114 | */ |
| 130 | 115 | ||
| 131 | __KINLINE struct cmsghdr * __cmsg_nxthdr(void *__ctl, __kernel_size_t __size, | 116 | static inline struct cmsghdr * __cmsg_nxthdr(void *__ctl, __kernel_size_t __size, |
| 132 | struct cmsghdr *__cmsg) | 117 | struct cmsghdr *__cmsg) |
| 133 | { | 118 | { |
| 134 | struct cmsghdr * __ptr; | 119 | struct cmsghdr * __ptr; |
| @@ -140,7 +125,7 @@ __KINLINE struct cmsghdr * __cmsg_nxthdr(void *__ctl, __kernel_size_t __size, | |||
| 140 | return __ptr; | 125 | return __ptr; |
| 141 | } | 126 | } |
| 142 | 127 | ||
| 143 | __KINLINE struct cmsghdr * cmsg_nxthdr (struct msghdr *__msg, struct cmsghdr *__cmsg) | 128 | static inline struct cmsghdr * cmsg_nxthdr (struct msghdr *__msg, struct cmsghdr *__cmsg) |
| 144 | { | 129 | { |
| 145 | return __cmsg_nxthdr(__msg->msg_control, __msg->msg_controllen, __cmsg); | 130 | return __cmsg_nxthdr(__msg->msg_control, __msg->msg_controllen, __cmsg); |
| 146 | } | 131 | } |
diff --git a/include/trace/events/block.h b/include/trace/events/block.h index d86af94691c2..00405b5f624a 100644 --- a/include/trace/events/block.h +++ b/include/trace/events/block.h | |||
| @@ -488,6 +488,39 @@ TRACE_EVENT(block_remap, | |||
| 488 | (unsigned long long)__entry->old_sector) | 488 | (unsigned long long)__entry->old_sector) |
| 489 | ); | 489 | ); |
| 490 | 490 | ||
| 491 | TRACE_EVENT(block_rq_remap, | ||
| 492 | |||
| 493 | TP_PROTO(struct request_queue *q, struct request *rq, dev_t dev, | ||
| 494 | sector_t from), | ||
| 495 | |||
| 496 | TP_ARGS(q, rq, dev, from), | ||
| 497 | |||
| 498 | TP_STRUCT__entry( | ||
| 499 | __field( dev_t, dev ) | ||
| 500 | __field( sector_t, sector ) | ||
| 501 | __field( unsigned int, nr_sector ) | ||
| 502 | __field( dev_t, old_dev ) | ||
| 503 | __field( sector_t, old_sector ) | ||
| 504 | __array( char, rwbs, 6 ) | ||
| 505 | ), | ||
| 506 | |||
| 507 | TP_fast_assign( | ||
| 508 | __entry->dev = disk_devt(rq->rq_disk); | ||
| 509 | __entry->sector = blk_rq_pos(rq); | ||
| 510 | __entry->nr_sector = blk_rq_sectors(rq); | ||
| 511 | __entry->old_dev = dev; | ||
| 512 | __entry->old_sector = from; | ||
| 513 | blk_fill_rwbs_rq(__entry->rwbs, rq); | ||
| 514 | ), | ||
| 515 | |||
| 516 | TP_printk("%d,%d %s %llu + %u <- (%d,%d) %llu", | ||
| 517 | MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs, | ||
| 518 | (unsigned long long)__entry->sector, | ||
| 519 | __entry->nr_sector, | ||
| 520 | MAJOR(__entry->old_dev), MINOR(__entry->old_dev), | ||
| 521 | (unsigned long long)__entry->old_sector) | ||
| 522 | ); | ||
| 523 | |||
| 491 | #endif /* _TRACE_BLOCK_H */ | 524 | #endif /* _TRACE_BLOCK_H */ |
| 492 | 525 | ||
| 493 | /* This part must be outside protection */ | 526 | /* This part must be outside protection */ |
diff --git a/init/Kconfig b/init/Kconfig index c7bac39d6c61..09c5c6431f42 100644 --- a/init/Kconfig +++ b/init/Kconfig | |||
| @@ -921,6 +921,11 @@ config HAVE_PERF_EVENTS | |||
| 921 | help | 921 | help |
| 922 | See tools/perf/design.txt for details. | 922 | See tools/perf/design.txt for details. |
| 923 | 923 | ||
| 924 | config PERF_USE_VMALLOC | ||
| 925 | bool | ||
| 926 | help | ||
| 927 | See tools/perf/design.txt for details | ||
| 928 | |||
| 924 | menu "Kernel Performance Events And Counters" | 929 | menu "Kernel Performance Events And Counters" |
| 925 | 930 | ||
| 926 | config PERF_EVENTS | 931 | config PERF_EVENTS |
| @@ -976,6 +981,19 @@ config PERF_COUNTERS | |||
| 976 | 981 | ||
| 977 | Say N if unsure. | 982 | Say N if unsure. |
| 978 | 983 | ||
| 984 | config DEBUG_PERF_USE_VMALLOC | ||
| 985 | default n | ||
| 986 | bool "Debug: use vmalloc to back perf mmap() buffers" | ||
| 987 | depends on PERF_EVENTS && DEBUG_KERNEL | ||
| 988 | select PERF_USE_VMALLOC | ||
| 989 | help | ||
| 990 | Use vmalloc memory to back perf mmap() buffers. | ||
| 991 | |||
| 992 | Mostly useful for debugging the vmalloc code on platforms | ||
| 993 | that don't require it. | ||
| 994 | |||
| 995 | Say N if unsure. | ||
| 996 | |||
| 979 | endmenu | 997 | endmenu |
| 980 | 998 | ||
| 981 | config VM_EVENT_COUNTERS | 999 | config VM_EVENT_COUNTERS |
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c index 6d7020490f94..3e1c36e7998f 100644 --- a/kernel/hrtimer.c +++ b/kernel/hrtimer.c | |||
| @@ -726,8 +726,6 @@ static int hrtimer_switch_to_hres(void) | |||
| 726 | /* "Retrigger" the interrupt to get things going */ | 726 | /* "Retrigger" the interrupt to get things going */ |
| 727 | retrigger_next_event(NULL); | 727 | retrigger_next_event(NULL); |
| 728 | local_irq_restore(flags); | 728 | local_irq_restore(flags); |
| 729 | printk(KERN_DEBUG "Switched to high resolution mode on CPU %d\n", | ||
| 730 | smp_processor_id()); | ||
| 731 | return 1; | 729 | return 1; |
| 732 | } | 730 | } |
| 733 | 731 | ||
diff --git a/kernel/perf_event.c b/kernel/perf_event.c index 0f86feb6db0c..9d0b5c665883 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c | |||
| @@ -20,6 +20,7 @@ | |||
| 20 | #include <linux/percpu.h> | 20 | #include <linux/percpu.h> |
| 21 | #include <linux/ptrace.h> | 21 | #include <linux/ptrace.h> |
| 22 | #include <linux/vmstat.h> | 22 | #include <linux/vmstat.h> |
| 23 | #include <linux/vmalloc.h> | ||
| 23 | #include <linux/hardirq.h> | 24 | #include <linux/hardirq.h> |
| 24 | #include <linux/rculist.h> | 25 | #include <linux/rculist.h> |
| 25 | #include <linux/uaccess.h> | 26 | #include <linux/uaccess.h> |
| @@ -1030,14 +1031,10 @@ void __perf_event_sched_out(struct perf_event_context *ctx, | |||
| 1030 | update_context_time(ctx); | 1031 | update_context_time(ctx); |
| 1031 | 1032 | ||
| 1032 | perf_disable(); | 1033 | perf_disable(); |
| 1033 | if (ctx->nr_active) { | 1034 | if (ctx->nr_active) |
| 1034 | list_for_each_entry(event, &ctx->group_list, group_entry) { | 1035 | list_for_each_entry(event, &ctx->group_list, group_entry) |
| 1035 | if (event != event->group_leader) | 1036 | group_sched_out(event, cpuctx, ctx); |
| 1036 | event_sched_out(event, cpuctx, ctx); | 1037 | |
| 1037 | else | ||
| 1038 | group_sched_out(event, cpuctx, ctx); | ||
| 1039 | } | ||
| 1040 | } | ||
| 1041 | perf_enable(); | 1038 | perf_enable(); |
| 1042 | out: | 1039 | out: |
| 1043 | spin_unlock(&ctx->lock); | 1040 | spin_unlock(&ctx->lock); |
| @@ -1258,12 +1255,8 @@ __perf_event_sched_in(struct perf_event_context *ctx, | |||
| 1258 | if (event->cpu != -1 && event->cpu != cpu) | 1255 | if (event->cpu != -1 && event->cpu != cpu) |
| 1259 | continue; | 1256 | continue; |
| 1260 | 1257 | ||
| 1261 | if (event != event->group_leader) | 1258 | if (group_can_go_on(event, cpuctx, 1)) |
| 1262 | event_sched_in(event, cpuctx, ctx, cpu); | 1259 | group_sched_in(event, cpuctx, ctx, cpu); |
| 1263 | else { | ||
| 1264 | if (group_can_go_on(event, cpuctx, 1)) | ||
| 1265 | group_sched_in(event, cpuctx, ctx, cpu); | ||
| 1266 | } | ||
| 1267 | 1260 | ||
| 1268 | /* | 1261 | /* |
| 1269 | * If this pinned group hasn't been scheduled, | 1262 | * If this pinned group hasn't been scheduled, |
| @@ -1291,15 +1284,9 @@ __perf_event_sched_in(struct perf_event_context *ctx, | |||
| 1291 | if (event->cpu != -1 && event->cpu != cpu) | 1284 | if (event->cpu != -1 && event->cpu != cpu) |
| 1292 | continue; | 1285 | continue; |
| 1293 | 1286 | ||
| 1294 | if (event != event->group_leader) { | 1287 | if (group_can_go_on(event, cpuctx, can_add_hw)) |
| 1295 | if (event_sched_in(event, cpuctx, ctx, cpu)) | 1288 | if (group_sched_in(event, cpuctx, ctx, cpu)) |
| 1296 | can_add_hw = 0; | 1289 | can_add_hw = 0; |
| 1297 | } else { | ||
| 1298 | if (group_can_go_on(event, cpuctx, can_add_hw)) { | ||
| 1299 | if (group_sched_in(event, cpuctx, ctx, cpu)) | ||
| 1300 | can_add_hw = 0; | ||
| 1301 | } | ||
| 1302 | } | ||
| 1303 | } | 1290 | } |
| 1304 | perf_enable(); | 1291 | perf_enable(); |
| 1305 | out: | 1292 | out: |
| @@ -2105,49 +2092,31 @@ unlock: | |||
| 2105 | rcu_read_unlock(); | 2092 | rcu_read_unlock(); |
| 2106 | } | 2093 | } |
| 2107 | 2094 | ||
| 2108 | static int perf_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | 2095 | static unsigned long perf_data_size(struct perf_mmap_data *data) |
| 2109 | { | 2096 | { |
| 2110 | struct perf_event *event = vma->vm_file->private_data; | 2097 | return data->nr_pages << (PAGE_SHIFT + data->data_order); |
| 2111 | struct perf_mmap_data *data; | 2098 | } |
| 2112 | int ret = VM_FAULT_SIGBUS; | ||
| 2113 | |||
| 2114 | if (vmf->flags & FAULT_FLAG_MKWRITE) { | ||
| 2115 | if (vmf->pgoff == 0) | ||
| 2116 | ret = 0; | ||
| 2117 | return ret; | ||
| 2118 | } | ||
| 2119 | |||
| 2120 | rcu_read_lock(); | ||
| 2121 | data = rcu_dereference(event->data); | ||
| 2122 | if (!data) | ||
| 2123 | goto unlock; | ||
| 2124 | |||
| 2125 | if (vmf->pgoff == 0) { | ||
| 2126 | vmf->page = virt_to_page(data->user_page); | ||
| 2127 | } else { | ||
| 2128 | int nr = vmf->pgoff - 1; | ||
| 2129 | |||
| 2130 | if ((unsigned)nr > data->nr_pages) | ||
| 2131 | goto unlock; | ||
| 2132 | 2099 | ||
| 2133 | if (vmf->flags & FAULT_FLAG_WRITE) | 2100 | #ifndef CONFIG_PERF_USE_VMALLOC |
| 2134 | goto unlock; | ||
| 2135 | 2101 | ||
| 2136 | vmf->page = virt_to_page(data->data_pages[nr]); | 2102 | /* |
| 2137 | } | 2103 | * Back perf_mmap() with regular GFP_KERNEL-0 pages. |
| 2104 | */ | ||
| 2138 | 2105 | ||
| 2139 | get_page(vmf->page); | 2106 | static struct page * |
| 2140 | vmf->page->mapping = vma->vm_file->f_mapping; | 2107 | perf_mmap_to_page(struct perf_mmap_data *data, unsigned long pgoff) |
| 2141 | vmf->page->index = vmf->pgoff; | 2108 | { |
| 2109 | if (pgoff > data->nr_pages) | ||
| 2110 | return NULL; | ||
| 2142 | 2111 | ||
| 2143 | ret = 0; | 2112 | if (pgoff == 0) |
| 2144 | unlock: | 2113 | return virt_to_page(data->user_page); |
| 2145 | rcu_read_unlock(); | ||
| 2146 | 2114 | ||
| 2147 | return ret; | 2115 | return virt_to_page(data->data_pages[pgoff - 1]); |
| 2148 | } | 2116 | } |
| 2149 | 2117 | ||
| 2150 | static int perf_mmap_data_alloc(struct perf_event *event, int nr_pages) | 2118 | static struct perf_mmap_data * |
| 2119 | perf_mmap_data_alloc(struct perf_event *event, int nr_pages) | ||
| 2151 | { | 2120 | { |
| 2152 | struct perf_mmap_data *data; | 2121 | struct perf_mmap_data *data; |
| 2153 | unsigned long size; | 2122 | unsigned long size; |
| @@ -2172,19 +2141,10 @@ static int perf_mmap_data_alloc(struct perf_event *event, int nr_pages) | |||
| 2172 | goto fail_data_pages; | 2141 | goto fail_data_pages; |
| 2173 | } | 2142 | } |
| 2174 | 2143 | ||
| 2144 | data->data_order = 0; | ||
| 2175 | data->nr_pages = nr_pages; | 2145 | data->nr_pages = nr_pages; |
| 2176 | atomic_set(&data->lock, -1); | ||
| 2177 | |||
| 2178 | if (event->attr.watermark) { | ||
| 2179 | data->watermark = min_t(long, PAGE_SIZE * nr_pages, | ||
| 2180 | event->attr.wakeup_watermark); | ||
| 2181 | } | ||
| 2182 | if (!data->watermark) | ||
| 2183 | data->watermark = max(PAGE_SIZE, PAGE_SIZE * nr_pages / 4); | ||
| 2184 | 2146 | ||
| 2185 | rcu_assign_pointer(event->data, data); | 2147 | return data; |
| 2186 | |||
| 2187 | return 0; | ||
| 2188 | 2148 | ||
| 2189 | fail_data_pages: | 2149 | fail_data_pages: |
| 2190 | for (i--; i >= 0; i--) | 2150 | for (i--; i >= 0; i--) |
| @@ -2196,7 +2156,7 @@ fail_user_page: | |||
| 2196 | kfree(data); | 2156 | kfree(data); |
| 2197 | 2157 | ||
| 2198 | fail: | 2158 | fail: |
| 2199 | return -ENOMEM; | 2159 | return NULL; |
| 2200 | } | 2160 | } |
| 2201 | 2161 | ||
| 2202 | static void perf_mmap_free_page(unsigned long addr) | 2162 | static void perf_mmap_free_page(unsigned long addr) |
| @@ -2207,28 +2167,169 @@ static void perf_mmap_free_page(unsigned long addr) | |||
| 2207 | __free_page(page); | 2167 | __free_page(page); |
| 2208 | } | 2168 | } |
| 2209 | 2169 | ||
| 2210 | static void __perf_mmap_data_free(struct rcu_head *rcu_head) | 2170 | static void perf_mmap_data_free(struct perf_mmap_data *data) |
| 2211 | { | 2171 | { |
| 2212 | struct perf_mmap_data *data; | ||
| 2213 | int i; | 2172 | int i; |
| 2214 | 2173 | ||
| 2215 | data = container_of(rcu_head, struct perf_mmap_data, rcu_head); | ||
| 2216 | |||
| 2217 | perf_mmap_free_page((unsigned long)data->user_page); | 2174 | perf_mmap_free_page((unsigned long)data->user_page); |
| 2218 | for (i = 0; i < data->nr_pages; i++) | 2175 | for (i = 0; i < data->nr_pages; i++) |
| 2219 | perf_mmap_free_page((unsigned long)data->data_pages[i]); | 2176 | perf_mmap_free_page((unsigned long)data->data_pages[i]); |
| 2177 | } | ||
| 2178 | |||
| 2179 | #else | ||
| 2180 | |||
| 2181 | /* | ||
| 2182 | * Back perf_mmap() with vmalloc memory. | ||
| 2183 | * | ||
| 2184 | * Required for architectures that have d-cache aliasing issues. | ||
| 2185 | */ | ||
| 2186 | |||
| 2187 | static struct page * | ||
| 2188 | perf_mmap_to_page(struct perf_mmap_data *data, unsigned long pgoff) | ||
| 2189 | { | ||
| 2190 | if (pgoff > (1UL << data->data_order)) | ||
| 2191 | return NULL; | ||
| 2192 | |||
| 2193 | return vmalloc_to_page((void *)data->user_page + pgoff * PAGE_SIZE); | ||
| 2194 | } | ||
| 2195 | |||
| 2196 | static void perf_mmap_unmark_page(void *addr) | ||
| 2197 | { | ||
| 2198 | struct page *page = vmalloc_to_page(addr); | ||
| 2199 | |||
| 2200 | page->mapping = NULL; | ||
| 2201 | } | ||
| 2202 | |||
| 2203 | static void perf_mmap_data_free_work(struct work_struct *work) | ||
| 2204 | { | ||
| 2205 | struct perf_mmap_data *data; | ||
| 2206 | void *base; | ||
| 2207 | int i, nr; | ||
| 2208 | |||
| 2209 | data = container_of(work, struct perf_mmap_data, work); | ||
| 2210 | nr = 1 << data->data_order; | ||
| 2211 | |||
| 2212 | base = data->user_page; | ||
| 2213 | for (i = 0; i < nr + 1; i++) | ||
| 2214 | perf_mmap_unmark_page(base + (i * PAGE_SIZE)); | ||
| 2215 | |||
| 2216 | vfree(base); | ||
| 2217 | } | ||
| 2220 | 2218 | ||
| 2219 | static void perf_mmap_data_free(struct perf_mmap_data *data) | ||
| 2220 | { | ||
| 2221 | schedule_work(&data->work); | ||
| 2222 | } | ||
| 2223 | |||
| 2224 | static struct perf_mmap_data * | ||
| 2225 | perf_mmap_data_alloc(struct perf_event *event, int nr_pages) | ||
| 2226 | { | ||
| 2227 | struct perf_mmap_data *data; | ||
| 2228 | unsigned long size; | ||
| 2229 | void *all_buf; | ||
| 2230 | |||
| 2231 | WARN_ON(atomic_read(&event->mmap_count)); | ||
| 2232 | |||
| 2233 | size = sizeof(struct perf_mmap_data); | ||
| 2234 | size += sizeof(void *); | ||
| 2235 | |||
| 2236 | data = kzalloc(size, GFP_KERNEL); | ||
| 2237 | if (!data) | ||
| 2238 | goto fail; | ||
| 2239 | |||
| 2240 | INIT_WORK(&data->work, perf_mmap_data_free_work); | ||
| 2241 | |||
| 2242 | all_buf = vmalloc_user((nr_pages + 1) * PAGE_SIZE); | ||
| 2243 | if (!all_buf) | ||
| 2244 | goto fail_all_buf; | ||
| 2245 | |||
| 2246 | data->user_page = all_buf; | ||
| 2247 | data->data_pages[0] = all_buf + PAGE_SIZE; | ||
| 2248 | data->data_order = ilog2(nr_pages); | ||
| 2249 | data->nr_pages = 1; | ||
| 2250 | |||
| 2251 | return data; | ||
| 2252 | |||
| 2253 | fail_all_buf: | ||
| 2254 | kfree(data); | ||
| 2255 | |||
| 2256 | fail: | ||
| 2257 | return NULL; | ||
| 2258 | } | ||
| 2259 | |||
| 2260 | #endif | ||
| 2261 | |||
| 2262 | static int perf_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | ||
| 2263 | { | ||
| 2264 | struct perf_event *event = vma->vm_file->private_data; | ||
| 2265 | struct perf_mmap_data *data; | ||
| 2266 | int ret = VM_FAULT_SIGBUS; | ||
| 2267 | |||
| 2268 | if (vmf->flags & FAULT_FLAG_MKWRITE) { | ||
| 2269 | if (vmf->pgoff == 0) | ||
| 2270 | ret = 0; | ||
| 2271 | return ret; | ||
| 2272 | } | ||
| 2273 | |||
| 2274 | rcu_read_lock(); | ||
| 2275 | data = rcu_dereference(event->data); | ||
| 2276 | if (!data) | ||
| 2277 | goto unlock; | ||
| 2278 | |||
| 2279 | if (vmf->pgoff && (vmf->flags & FAULT_FLAG_WRITE)) | ||
| 2280 | goto unlock; | ||
| 2281 | |||
| 2282 | vmf->page = perf_mmap_to_page(data, vmf->pgoff); | ||
| 2283 | if (!vmf->page) | ||
| 2284 | goto unlock; | ||
| 2285 | |||
| 2286 | get_page(vmf->page); | ||
| 2287 | vmf->page->mapping = vma->vm_file->f_mapping; | ||
| 2288 | vmf->page->index = vmf->pgoff; | ||
| 2289 | |||
| 2290 | ret = 0; | ||
| 2291 | unlock: | ||
| 2292 | rcu_read_unlock(); | ||
| 2293 | |||
| 2294 | return ret; | ||
| 2295 | } | ||
| 2296 | |||
| 2297 | static void | ||
| 2298 | perf_mmap_data_init(struct perf_event *event, struct perf_mmap_data *data) | ||
| 2299 | { | ||
| 2300 | long max_size = perf_data_size(data); | ||
| 2301 | |||
| 2302 | atomic_set(&data->lock, -1); | ||
| 2303 | |||
| 2304 | if (event->attr.watermark) { | ||
| 2305 | data->watermark = min_t(long, max_size, | ||
| 2306 | event->attr.wakeup_watermark); | ||
| 2307 | } | ||
| 2308 | |||
| 2309 | if (!data->watermark) | ||
| 2310 | data->watermark = max_t(long, PAGE_SIZE, max_size / 2); | ||
| 2311 | |||
| 2312 | |||
| 2313 | rcu_assign_pointer(event->data, data); | ||
| 2314 | } | ||
| 2315 | |||
| 2316 | static void perf_mmap_data_free_rcu(struct rcu_head *rcu_head) | ||
| 2317 | { | ||
| 2318 | struct perf_mmap_data *data; | ||
| 2319 | |||
| 2320 | data = container_of(rcu_head, struct perf_mmap_data, rcu_head); | ||
| 2321 | perf_mmap_data_free(data); | ||
| 2221 | kfree(data); | 2322 | kfree(data); |
| 2222 | } | 2323 | } |
| 2223 | 2324 | ||
| 2224 | static void perf_mmap_data_free(struct perf_event *event) | 2325 | static void perf_mmap_data_release(struct perf_event *event) |
| 2225 | { | 2326 | { |
| 2226 | struct perf_mmap_data *data = event->data; | 2327 | struct perf_mmap_data *data = event->data; |
| 2227 | 2328 | ||
| 2228 | WARN_ON(atomic_read(&event->mmap_count)); | 2329 | WARN_ON(atomic_read(&event->mmap_count)); |
| 2229 | 2330 | ||
| 2230 | rcu_assign_pointer(event->data, NULL); | 2331 | rcu_assign_pointer(event->data, NULL); |
| 2231 | call_rcu(&data->rcu_head, __perf_mmap_data_free); | 2332 | call_rcu(&data->rcu_head, perf_mmap_data_free_rcu); |
| 2232 | } | 2333 | } |
| 2233 | 2334 | ||
| 2234 | static void perf_mmap_open(struct vm_area_struct *vma) | 2335 | static void perf_mmap_open(struct vm_area_struct *vma) |
| @@ -2244,11 +2345,12 @@ static void perf_mmap_close(struct vm_area_struct *vma) | |||
| 2244 | 2345 | ||
| 2245 | WARN_ON_ONCE(event->ctx->parent_ctx); | 2346 | WARN_ON_ONCE(event->ctx->parent_ctx); |
| 2246 | if (atomic_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex)) { | 2347 | if (atomic_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex)) { |
| 2348 | unsigned long size = perf_data_size(event->data); | ||
| 2247 | struct user_struct *user = current_user(); | 2349 | struct user_struct *user = current_user(); |
| 2248 | 2350 | ||
| 2249 | atomic_long_sub(event->data->nr_pages + 1, &user->locked_vm); | 2351 | atomic_long_sub((size >> PAGE_SHIFT) + 1, &user->locked_vm); |
| 2250 | vma->vm_mm->locked_vm -= event->data->nr_locked; | 2352 | vma->vm_mm->locked_vm -= event->data->nr_locked; |
| 2251 | perf_mmap_data_free(event); | 2353 | perf_mmap_data_release(event); |
| 2252 | mutex_unlock(&event->mmap_mutex); | 2354 | mutex_unlock(&event->mmap_mutex); |
| 2253 | } | 2355 | } |
| 2254 | } | 2356 | } |
| @@ -2266,6 +2368,7 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma) | |||
| 2266 | unsigned long user_locked, user_lock_limit; | 2368 | unsigned long user_locked, user_lock_limit; |
| 2267 | struct user_struct *user = current_user(); | 2369 | struct user_struct *user = current_user(); |
| 2268 | unsigned long locked, lock_limit; | 2370 | unsigned long locked, lock_limit; |
| 2371 | struct perf_mmap_data *data; | ||
| 2269 | unsigned long vma_size; | 2372 | unsigned long vma_size; |
| 2270 | unsigned long nr_pages; | 2373 | unsigned long nr_pages; |
| 2271 | long user_extra, extra; | 2374 | long user_extra, extra; |
| @@ -2328,10 +2431,15 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma) | |||
| 2328 | } | 2431 | } |
| 2329 | 2432 | ||
| 2330 | WARN_ON(event->data); | 2433 | WARN_ON(event->data); |
| 2331 | ret = perf_mmap_data_alloc(event, nr_pages); | 2434 | |
| 2332 | if (ret) | 2435 | data = perf_mmap_data_alloc(event, nr_pages); |
| 2436 | ret = -ENOMEM; | ||
| 2437 | if (!data) | ||
| 2333 | goto unlock; | 2438 | goto unlock; |
| 2334 | 2439 | ||
| 2440 | ret = 0; | ||
| 2441 | perf_mmap_data_init(event, data); | ||
| 2442 | |||
| 2335 | atomic_set(&event->mmap_count, 1); | 2443 | atomic_set(&event->mmap_count, 1); |
| 2336 | atomic_long_add(user_extra, &user->locked_vm); | 2444 | atomic_long_add(user_extra, &user->locked_vm); |
| 2337 | vma->vm_mm->locked_vm += extra; | 2445 | vma->vm_mm->locked_vm += extra; |
| @@ -2519,7 +2627,7 @@ static bool perf_output_space(struct perf_mmap_data *data, unsigned long tail, | |||
| 2519 | if (!data->writable) | 2627 | if (!data->writable) |
| 2520 | return true; | 2628 | return true; |
| 2521 | 2629 | ||
| 2522 | mask = (data->nr_pages << PAGE_SHIFT) - 1; | 2630 | mask = perf_data_size(data) - 1; |
| 2523 | 2631 | ||
| 2524 | offset = (offset - tail) & mask; | 2632 | offset = (offset - tail) & mask; |
| 2525 | head = (head - tail) & mask; | 2633 | head = (head - tail) & mask; |
| @@ -2624,7 +2732,7 @@ void perf_output_copy(struct perf_output_handle *handle, | |||
| 2624 | const void *buf, unsigned int len) | 2732 | const void *buf, unsigned int len) |
| 2625 | { | 2733 | { |
| 2626 | unsigned int pages_mask; | 2734 | unsigned int pages_mask; |
| 2627 | unsigned int offset; | 2735 | unsigned long offset; |
| 2628 | unsigned int size; | 2736 | unsigned int size; |
| 2629 | void **pages; | 2737 | void **pages; |
| 2630 | 2738 | ||
| @@ -2633,12 +2741,14 @@ void perf_output_copy(struct perf_output_handle *handle, | |||
| 2633 | pages = handle->data->data_pages; | 2741 | pages = handle->data->data_pages; |
| 2634 | 2742 | ||
| 2635 | do { | 2743 | do { |
| 2636 | unsigned int page_offset; | 2744 | unsigned long page_offset; |
| 2745 | unsigned long page_size; | ||
| 2637 | int nr; | 2746 | int nr; |
| 2638 | 2747 | ||
| 2639 | nr = (offset >> PAGE_SHIFT) & pages_mask; | 2748 | nr = (offset >> PAGE_SHIFT) & pages_mask; |
| 2640 | page_offset = offset & (PAGE_SIZE - 1); | 2749 | page_size = 1UL << (handle->data->data_order + PAGE_SHIFT); |
| 2641 | size = min_t(unsigned int, PAGE_SIZE - page_offset, len); | 2750 | page_offset = offset & (page_size - 1); |
| 2751 | size = min_t(unsigned int, page_size - page_offset, len); | ||
| 2642 | 2752 | ||
| 2643 | memcpy(pages[nr] + page_offset, buf, size); | 2753 | memcpy(pages[nr] + page_offset, buf, size); |
| 2644 | 2754 | ||
| @@ -4781,9 +4891,7 @@ int perf_event_init_task(struct task_struct *child) | |||
| 4781 | * We dont have to disable NMIs - we are only looking at | 4891 | * We dont have to disable NMIs - we are only looking at |
| 4782 | * the list, not manipulating it: | 4892 | * the list, not manipulating it: |
| 4783 | */ | 4893 | */ |
| 4784 | list_for_each_entry_rcu(event, &parent_ctx->event_list, event_entry) { | 4894 | list_for_each_entry(event, &parent_ctx->group_list, group_entry) { |
| 4785 | if (event != event->group_leader) | ||
| 4786 | continue; | ||
| 4787 | 4895 | ||
| 4788 | if (!event->attr.inherit) { | 4896 | if (!event->attr.inherit) { |
| 4789 | inherited_all = 0; | 4897 | inherited_all = 0; |
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index e0f59a21c061..89aed5933ed4 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c | |||
| @@ -231,6 +231,13 @@ void tick_nohz_stop_sched_tick(int inidle) | |||
| 231 | if (!inidle && !ts->inidle) | 231 | if (!inidle && !ts->inidle) |
| 232 | goto end; | 232 | goto end; |
| 233 | 233 | ||
| 234 | /* | ||
| 235 | * Set ts->inidle unconditionally. Even if the system did not | ||
| 236 | * switch to NOHZ mode the cpu frequency governers rely on the | ||
| 237 | * update of the idle time accounting in tick_nohz_start_idle(). | ||
| 238 | */ | ||
| 239 | ts->inidle = 1; | ||
| 240 | |||
| 234 | now = tick_nohz_start_idle(ts); | 241 | now = tick_nohz_start_idle(ts); |
| 235 | 242 | ||
| 236 | /* | 243 | /* |
| @@ -248,8 +255,6 @@ void tick_nohz_stop_sched_tick(int inidle) | |||
| 248 | if (unlikely(ts->nohz_mode == NOHZ_MODE_INACTIVE)) | 255 | if (unlikely(ts->nohz_mode == NOHZ_MODE_INACTIVE)) |
| 249 | goto end; | 256 | goto end; |
| 250 | 257 | ||
| 251 | ts->inidle = 1; | ||
| 252 | |||
| 253 | if (need_resched()) | 258 | if (need_resched()) |
| 254 | goto end; | 259 | goto end; |
| 255 | 260 | ||
diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c index 3eb159c277c8..d9d6206e0b14 100644 --- a/kernel/trace/blktrace.c +++ b/kernel/trace/blktrace.c | |||
| @@ -856,6 +856,37 @@ static void blk_add_trace_remap(struct request_queue *q, struct bio *bio, | |||
| 856 | } | 856 | } |
| 857 | 857 | ||
| 858 | /** | 858 | /** |
| 859 | * blk_add_trace_rq_remap - Add a trace for a request-remap operation | ||
| 860 | * @q: queue the io is for | ||
| 861 | * @rq: the source request | ||
| 862 | * @dev: target device | ||
| 863 | * @from: source sector | ||
| 864 | * | ||
| 865 | * Description: | ||
| 866 | * Device mapper remaps request to other devices. | ||
| 867 | * Add a trace for that action. | ||
| 868 | * | ||
| 869 | **/ | ||
| 870 | static void blk_add_trace_rq_remap(struct request_queue *q, | ||
| 871 | struct request *rq, dev_t dev, | ||
| 872 | sector_t from) | ||
| 873 | { | ||
| 874 | struct blk_trace *bt = q->blk_trace; | ||
| 875 | struct blk_io_trace_remap r; | ||
| 876 | |||
| 877 | if (likely(!bt)) | ||
| 878 | return; | ||
| 879 | |||
| 880 | r.device_from = cpu_to_be32(dev); | ||
| 881 | r.device_to = cpu_to_be32(disk_devt(rq->rq_disk)); | ||
| 882 | r.sector_from = cpu_to_be64(from); | ||
| 883 | |||
| 884 | __blk_add_trace(bt, blk_rq_pos(rq), blk_rq_bytes(rq), | ||
| 885 | rq_data_dir(rq), BLK_TA_REMAP, !!rq->errors, | ||
| 886 | sizeof(r), &r); | ||
| 887 | } | ||
| 888 | |||
| 889 | /** | ||
| 859 | * blk_add_driver_data - Add binary message with driver-specific data | 890 | * blk_add_driver_data - Add binary message with driver-specific data |
| 860 | * @q: queue the io is for | 891 | * @q: queue the io is for |
| 861 | * @rq: io request | 892 | * @rq: io request |
| @@ -922,10 +953,13 @@ static void blk_register_tracepoints(void) | |||
| 922 | WARN_ON(ret); | 953 | WARN_ON(ret); |
| 923 | ret = register_trace_block_remap(blk_add_trace_remap); | 954 | ret = register_trace_block_remap(blk_add_trace_remap); |
| 924 | WARN_ON(ret); | 955 | WARN_ON(ret); |
| 956 | ret = register_trace_block_rq_remap(blk_add_trace_rq_remap); | ||
| 957 | WARN_ON(ret); | ||
| 925 | } | 958 | } |
| 926 | 959 | ||
| 927 | static void blk_unregister_tracepoints(void) | 960 | static void blk_unregister_tracepoints(void) |
| 928 | { | 961 | { |
| 962 | unregister_trace_block_rq_remap(blk_add_trace_rq_remap); | ||
| 929 | unregister_trace_block_remap(blk_add_trace_remap); | 963 | unregister_trace_block_remap(blk_add_trace_remap); |
| 930 | unregister_trace_block_split(blk_add_trace_split); | 964 | unregister_trace_block_split(blk_add_trace_split); |
| 931 | unregister_trace_block_unplug_io(blk_add_trace_unplug_io); | 965 | unregister_trace_block_unplug_io(blk_add_trace_unplug_io); |
| @@ -1657,6 +1691,11 @@ int blk_trace_init_sysfs(struct device *dev) | |||
| 1657 | return sysfs_create_group(&dev->kobj, &blk_trace_attr_group); | 1691 | return sysfs_create_group(&dev->kobj, &blk_trace_attr_group); |
| 1658 | } | 1692 | } |
| 1659 | 1693 | ||
| 1694 | void blk_trace_remove_sysfs(struct device *dev) | ||
| 1695 | { | ||
| 1696 | sysfs_remove_group(&dev->kobj, &blk_trace_attr_group); | ||
| 1697 | } | ||
| 1698 | |||
| 1660 | #endif /* CONFIG_BLK_DEV_IO_TRACE */ | 1699 | #endif /* CONFIG_BLK_DEV_IO_TRACE */ |
| 1661 | 1700 | ||
| 1662 | #ifdef CONFIG_EVENT_TRACING | 1701 | #ifdef CONFIG_EVENT_TRACING |
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index f136fe5da07a..37ba67e33265 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c | |||
| @@ -225,7 +225,11 @@ static void ftrace_update_pid_func(void) | |||
| 225 | if (ftrace_trace_function == ftrace_stub) | 225 | if (ftrace_trace_function == ftrace_stub) |
| 226 | return; | 226 | return; |
| 227 | 227 | ||
| 228 | #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST | ||
| 228 | func = ftrace_trace_function; | 229 | func = ftrace_trace_function; |
| 230 | #else | ||
| 231 | func = __ftrace_trace_function; | ||
| 232 | #endif | ||
| 229 | 233 | ||
| 230 | if (ftrace_pid_trace) { | 234 | if (ftrace_pid_trace) { |
| 231 | set_ftrace_pid_function(func); | 235 | set_ftrace_pid_function(func); |
diff --git a/kernel/trace/kmemtrace.c b/kernel/trace/kmemtrace.c index 81b1645c8549..a91da69f153a 100644 --- a/kernel/trace/kmemtrace.c +++ b/kernel/trace/kmemtrace.c | |||
| @@ -501,7 +501,7 @@ static int __init init_kmem_tracer(void) | |||
| 501 | return 1; | 501 | return 1; |
| 502 | } | 502 | } |
| 503 | 503 | ||
| 504 | if (!register_tracer(&kmem_tracer)) { | 504 | if (register_tracer(&kmem_tracer) != 0) { |
| 505 | pr_warning("Warning: could not register the kmem tracer\n"); | 505 | pr_warning("Warning: could not register the kmem tracer\n"); |
| 506 | return 1; | 506 | return 1; |
| 507 | } | 507 | } |
diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c index 9fbce6c9d2e1..527e17eae575 100644 --- a/kernel/trace/trace_syscalls.c +++ b/kernel/trace/trace_syscalls.c | |||
| @@ -166,7 +166,7 @@ int syscall_exit_format(struct ftrace_event_call *call, struct trace_seq *s) | |||
| 166 | "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n" | 166 | "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n" |
| 167 | "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n", | 167 | "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n", |
| 168 | SYSCALL_FIELD(int, nr), | 168 | SYSCALL_FIELD(int, nr), |
| 169 | SYSCALL_FIELD(unsigned long, ret)); | 169 | SYSCALL_FIELD(long, ret)); |
| 170 | if (!ret) | 170 | if (!ret) |
| 171 | return 0; | 171 | return 0; |
| 172 | 172 | ||
| @@ -212,7 +212,7 @@ int syscall_exit_define_fields(struct ftrace_event_call *call) | |||
| 212 | if (ret) | 212 | if (ret) |
| 213 | return ret; | 213 | return ret; |
| 214 | 214 | ||
| 215 | ret = trace_define_field(call, SYSCALL_FIELD(unsigned long, ret), 0, | 215 | ret = trace_define_field(call, SYSCALL_FIELD(long, ret), 0, |
| 216 | FILTER_OTHER); | 216 | FILTER_OTHER); |
| 217 | 217 | ||
| 218 | return ret; | 218 | return ret; |
diff --git a/mm/Kconfig b/mm/Kconfig index edd300aca173..57963c6063d1 100644 --- a/mm/Kconfig +++ b/mm/Kconfig | |||
| @@ -224,7 +224,9 @@ config KSM | |||
| 224 | the many instances by a single resident page with that content, so | 224 | the many instances by a single resident page with that content, so |
| 225 | saving memory until one or another app needs to modify the content. | 225 | saving memory until one or another app needs to modify the content. |
| 226 | Recommended for use with KVM, or with other duplicative applications. | 226 | Recommended for use with KVM, or with other duplicative applications. |
| 227 | See Documentation/vm/ksm.txt for more information. | 227 | See Documentation/vm/ksm.txt for more information: KSM is inactive |
| 228 | until a program has madvised that an area is MADV_MERGEABLE, and | ||
| 229 | root has set /sys/kernel/mm/ksm/run to 1 (if CONFIG_SYSFS is set). | ||
| 228 | 230 | ||
| 229 | config DEFAULT_MMAP_MIN_ADDR | 231 | config DEFAULT_MMAP_MIN_ADDR |
| 230 | int "Low address space to protect from user allocation" | 232 | int "Low address space to protect from user allocation" |
| @@ -184,11 +184,6 @@ static DEFINE_SPINLOCK(ksm_mmlist_lock); | |||
| 184 | sizeof(struct __struct), __alignof__(struct __struct),\ | 184 | sizeof(struct __struct), __alignof__(struct __struct),\ |
| 185 | (__flags), NULL) | 185 | (__flags), NULL) |
| 186 | 186 | ||
| 187 | static void __init ksm_init_max_kernel_pages(void) | ||
| 188 | { | ||
| 189 | ksm_max_kernel_pages = nr_free_buffer_pages() / 4; | ||
| 190 | } | ||
| 191 | |||
| 192 | static int __init ksm_slab_init(void) | 187 | static int __init ksm_slab_init(void) |
| 193 | { | 188 | { |
| 194 | rmap_item_cache = KSM_KMEM_CACHE(rmap_item, 0); | 189 | rmap_item_cache = KSM_KMEM_CACHE(rmap_item, 0); |
| @@ -1673,7 +1668,7 @@ static int __init ksm_init(void) | |||
| 1673 | struct task_struct *ksm_thread; | 1668 | struct task_struct *ksm_thread; |
| 1674 | int err; | 1669 | int err; |
| 1675 | 1670 | ||
| 1676 | ksm_init_max_kernel_pages(); | 1671 | ksm_max_kernel_pages = totalram_pages / 4; |
| 1677 | 1672 | ||
| 1678 | err = ksm_slab_init(); | 1673 | err = ksm_slab_init(); |
| 1679 | if (err) | 1674 | if (err) |
| @@ -1697,6 +1692,9 @@ static int __init ksm_init(void) | |||
| 1697 | kthread_stop(ksm_thread); | 1692 | kthread_stop(ksm_thread); |
| 1698 | goto out_free2; | 1693 | goto out_free2; |
| 1699 | } | 1694 | } |
| 1695 | #else | ||
| 1696 | ksm_run = KSM_RUN_MERGE; /* no way for user to start it */ | ||
| 1697 | |||
| 1700 | #endif /* CONFIG_SYSFS */ | 1698 | #endif /* CONFIG_SYSFS */ |
| 1701 | 1699 | ||
| 1702 | return 0; | 1700 | return 0; |
diff --git a/mm/swapfile.c b/mm/swapfile.c index 4de7f02f820b..a1bc6b9af9a2 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c | |||
| @@ -1974,12 +1974,14 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags) | |||
| 1974 | goto bad_swap; | 1974 | goto bad_swap; |
| 1975 | } | 1975 | } |
| 1976 | 1976 | ||
| 1977 | if (blk_queue_nonrot(bdev_get_queue(p->bdev))) { | 1977 | if (p->bdev) { |
| 1978 | p->flags |= SWP_SOLIDSTATE; | 1978 | if (blk_queue_nonrot(bdev_get_queue(p->bdev))) { |
| 1979 | p->cluster_next = 1 + (random32() % p->highest_bit); | 1979 | p->flags |= SWP_SOLIDSTATE; |
| 1980 | p->cluster_next = 1 + (random32() % p->highest_bit); | ||
| 1981 | } | ||
| 1982 | if (discard_swap(p) == 0) | ||
| 1983 | p->flags |= SWP_DISCARDABLE; | ||
| 1980 | } | 1984 | } |
| 1981 | if (discard_swap(p) == 0) | ||
| 1982 | p->flags |= SWP_DISCARDABLE; | ||
| 1983 | 1985 | ||
| 1984 | mutex_lock(&swapon_mutex); | 1986 | mutex_lock(&swapon_mutex); |
| 1985 | spin_lock(&swap_lock); | 1987 | spin_lock(&swap_lock); |
diff --git a/mm/vmalloc.c b/mm/vmalloc.c index 69511e663234..5e7aed0802bf 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c | |||
| @@ -25,10 +25,10 @@ | |||
| 25 | #include <linux/rcupdate.h> | 25 | #include <linux/rcupdate.h> |
| 26 | #include <linux/pfn.h> | 26 | #include <linux/pfn.h> |
| 27 | #include <linux/kmemleak.h> | 27 | #include <linux/kmemleak.h> |
| 28 | #include <linux/highmem.h> | ||
| 29 | #include <asm/atomic.h> | 28 | #include <asm/atomic.h> |
| 30 | #include <asm/uaccess.h> | 29 | #include <asm/uaccess.h> |
| 31 | #include <asm/tlbflush.h> | 30 | #include <asm/tlbflush.h> |
| 31 | #include <asm/shmparam.h> | ||
| 32 | 32 | ||
| 33 | 33 | ||
| 34 | /*** Page table manipulation functions ***/ | 34 | /*** Page table manipulation functions ***/ |
| @@ -1156,12 +1156,11 @@ static void insert_vmalloc_vm(struct vm_struct *vm, struct vmap_area *va, | |||
| 1156 | } | 1156 | } |
| 1157 | 1157 | ||
| 1158 | static struct vm_struct *__get_vm_area_node(unsigned long size, | 1158 | static struct vm_struct *__get_vm_area_node(unsigned long size, |
| 1159 | unsigned long flags, unsigned long start, unsigned long end, | 1159 | unsigned long align, unsigned long flags, unsigned long start, |
| 1160 | int node, gfp_t gfp_mask, void *caller) | 1160 | unsigned long end, int node, gfp_t gfp_mask, void *caller) |
| 1161 | { | 1161 | { |
| 1162 | static struct vmap_area *va; | 1162 | static struct vmap_area *va; |
| 1163 | struct vm_struct *area; | 1163 | struct vm_struct *area; |
| 1164 | unsigned long align = 1; | ||
| 1165 | 1164 | ||
| 1166 | BUG_ON(in_interrupt()); | 1165 | BUG_ON(in_interrupt()); |
| 1167 | if (flags & VM_IOREMAP) { | 1166 | if (flags & VM_IOREMAP) { |
| @@ -1201,7 +1200,7 @@ static struct vm_struct *__get_vm_area_node(unsigned long size, | |||
| 1201 | struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags, | 1200 | struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags, |
| 1202 | unsigned long start, unsigned long end) | 1201 | unsigned long start, unsigned long end) |
| 1203 | { | 1202 | { |
| 1204 | return __get_vm_area_node(size, flags, start, end, -1, GFP_KERNEL, | 1203 | return __get_vm_area_node(size, 1, flags, start, end, -1, GFP_KERNEL, |
| 1205 | __builtin_return_address(0)); | 1204 | __builtin_return_address(0)); |
| 1206 | } | 1205 | } |
| 1207 | EXPORT_SYMBOL_GPL(__get_vm_area); | 1206 | EXPORT_SYMBOL_GPL(__get_vm_area); |
| @@ -1210,7 +1209,7 @@ struct vm_struct *__get_vm_area_caller(unsigned long size, unsigned long flags, | |||
| 1210 | unsigned long start, unsigned long end, | 1209 | unsigned long start, unsigned long end, |
| 1211 | void *caller) | 1210 | void *caller) |
| 1212 | { | 1211 | { |
| 1213 | return __get_vm_area_node(size, flags, start, end, -1, GFP_KERNEL, | 1212 | return __get_vm_area_node(size, 1, flags, start, end, -1, GFP_KERNEL, |
| 1214 | caller); | 1213 | caller); |
| 1215 | } | 1214 | } |
| 1216 | 1215 | ||
| @@ -1225,22 +1224,22 @@ struct vm_struct *__get_vm_area_caller(unsigned long size, unsigned long flags, | |||
| 1225 | */ | 1224 | */ |
| 1226 | struct vm_struct *get_vm_area(unsigned long size, unsigned long flags) | 1225 | struct vm_struct *get_vm_area(unsigned long size, unsigned long flags) |
| 1227 | { | 1226 | { |
| 1228 | return __get_vm_area_node(size, flags, VMALLOC_START, VMALLOC_END, | 1227 | return __get_vm_area_node(size, 1, flags, VMALLOC_START, VMALLOC_END, |
| 1229 | -1, GFP_KERNEL, __builtin_return_address(0)); | 1228 | -1, GFP_KERNEL, __builtin_return_address(0)); |
| 1230 | } | 1229 | } |
| 1231 | 1230 | ||
| 1232 | struct vm_struct *get_vm_area_caller(unsigned long size, unsigned long flags, | 1231 | struct vm_struct *get_vm_area_caller(unsigned long size, unsigned long flags, |
| 1233 | void *caller) | 1232 | void *caller) |
| 1234 | { | 1233 | { |
| 1235 | return __get_vm_area_node(size, flags, VMALLOC_START, VMALLOC_END, | 1234 | return __get_vm_area_node(size, 1, flags, VMALLOC_START, VMALLOC_END, |
| 1236 | -1, GFP_KERNEL, caller); | 1235 | -1, GFP_KERNEL, caller); |
| 1237 | } | 1236 | } |
| 1238 | 1237 | ||
| 1239 | struct vm_struct *get_vm_area_node(unsigned long size, unsigned long flags, | 1238 | struct vm_struct *get_vm_area_node(unsigned long size, unsigned long flags, |
| 1240 | int node, gfp_t gfp_mask) | 1239 | int node, gfp_t gfp_mask) |
| 1241 | { | 1240 | { |
| 1242 | return __get_vm_area_node(size, flags, VMALLOC_START, VMALLOC_END, node, | 1241 | return __get_vm_area_node(size, 1, flags, VMALLOC_START, VMALLOC_END, |
| 1243 | gfp_mask, __builtin_return_address(0)); | 1242 | node, gfp_mask, __builtin_return_address(0)); |
| 1244 | } | 1243 | } |
| 1245 | 1244 | ||
| 1246 | static struct vm_struct *find_vm_area(const void *addr) | 1245 | static struct vm_struct *find_vm_area(const void *addr) |
| @@ -1403,7 +1402,8 @@ void *vmap(struct page **pages, unsigned int count, | |||
| 1403 | } | 1402 | } |
| 1404 | EXPORT_SYMBOL(vmap); | 1403 | EXPORT_SYMBOL(vmap); |
| 1405 | 1404 | ||
| 1406 | static void *__vmalloc_node(unsigned long size, gfp_t gfp_mask, pgprot_t prot, | 1405 | static void *__vmalloc_node(unsigned long size, unsigned long align, |
| 1406 | gfp_t gfp_mask, pgprot_t prot, | ||
| 1407 | int node, void *caller); | 1407 | int node, void *caller); |
| 1408 | static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask, | 1408 | static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask, |
| 1409 | pgprot_t prot, int node, void *caller) | 1409 | pgprot_t prot, int node, void *caller) |
| @@ -1417,7 +1417,7 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask, | |||
| 1417 | area->nr_pages = nr_pages; | 1417 | area->nr_pages = nr_pages; |
| 1418 | /* Please note that the recursion is strictly bounded. */ | 1418 | /* Please note that the recursion is strictly bounded. */ |
| 1419 | if (array_size > PAGE_SIZE) { | 1419 | if (array_size > PAGE_SIZE) { |
| 1420 | pages = __vmalloc_node(array_size, gfp_mask | __GFP_ZERO, | 1420 | pages = __vmalloc_node(array_size, 1, gfp_mask | __GFP_ZERO, |
| 1421 | PAGE_KERNEL, node, caller); | 1421 | PAGE_KERNEL, node, caller); |
| 1422 | area->flags |= VM_VPAGES; | 1422 | area->flags |= VM_VPAGES; |
| 1423 | } else { | 1423 | } else { |
| @@ -1476,6 +1476,7 @@ void *__vmalloc_area(struct vm_struct *area, gfp_t gfp_mask, pgprot_t prot) | |||
| 1476 | /** | 1476 | /** |
| 1477 | * __vmalloc_node - allocate virtually contiguous memory | 1477 | * __vmalloc_node - allocate virtually contiguous memory |
| 1478 | * @size: allocation size | 1478 | * @size: allocation size |
| 1479 | * @align: desired alignment | ||
| 1479 | * @gfp_mask: flags for the page level allocator | 1480 | * @gfp_mask: flags for the page level allocator |
| 1480 | * @prot: protection mask for the allocated pages | 1481 | * @prot: protection mask for the allocated pages |
| 1481 | * @node: node to use for allocation or -1 | 1482 | * @node: node to use for allocation or -1 |
| @@ -1485,8 +1486,9 @@ void *__vmalloc_area(struct vm_struct *area, gfp_t gfp_mask, pgprot_t prot) | |||
| 1485 | * allocator with @gfp_mask flags. Map them into contiguous | 1486 | * allocator with @gfp_mask flags. Map them into contiguous |
| 1486 | * kernel virtual space, using a pagetable protection of @prot. | 1487 | * kernel virtual space, using a pagetable protection of @prot. |
| 1487 | */ | 1488 | */ |
| 1488 | static void *__vmalloc_node(unsigned long size, gfp_t gfp_mask, pgprot_t prot, | 1489 | static void *__vmalloc_node(unsigned long size, unsigned long align, |
| 1489 | int node, void *caller) | 1490 | gfp_t gfp_mask, pgprot_t prot, |
| 1491 | int node, void *caller) | ||
| 1490 | { | 1492 | { |
| 1491 | struct vm_struct *area; | 1493 | struct vm_struct *area; |
| 1492 | void *addr; | 1494 | void *addr; |
| @@ -1496,8 +1498,8 @@ static void *__vmalloc_node(unsigned long size, gfp_t gfp_mask, pgprot_t prot, | |||
| 1496 | if (!size || (size >> PAGE_SHIFT) > totalram_pages) | 1498 | if (!size || (size >> PAGE_SHIFT) > totalram_pages) |
| 1497 | return NULL; | 1499 | return NULL; |
| 1498 | 1500 | ||
| 1499 | area = __get_vm_area_node(size, VM_ALLOC, VMALLOC_START, VMALLOC_END, | 1501 | area = __get_vm_area_node(size, align, VM_ALLOC, VMALLOC_START, |
| 1500 | node, gfp_mask, caller); | 1502 | VMALLOC_END, node, gfp_mask, caller); |
| 1501 | 1503 | ||
| 1502 | if (!area) | 1504 | if (!area) |
| 1503 | return NULL; | 1505 | return NULL; |
| @@ -1516,7 +1518,7 @@ static void *__vmalloc_node(unsigned long size, gfp_t gfp_mask, pgprot_t prot, | |||
| 1516 | 1518 | ||
| 1517 | void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot) | 1519 | void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot) |
| 1518 | { | 1520 | { |
| 1519 | return __vmalloc_node(size, gfp_mask, prot, -1, | 1521 | return __vmalloc_node(size, 1, gfp_mask, prot, -1, |
| 1520 | __builtin_return_address(0)); | 1522 | __builtin_return_address(0)); |
| 1521 | } | 1523 | } |
| 1522 | EXPORT_SYMBOL(__vmalloc); | 1524 | EXPORT_SYMBOL(__vmalloc); |
| @@ -1532,7 +1534,7 @@ EXPORT_SYMBOL(__vmalloc); | |||
| 1532 | */ | 1534 | */ |
| 1533 | void *vmalloc(unsigned long size) | 1535 | void *vmalloc(unsigned long size) |
| 1534 | { | 1536 | { |
| 1535 | return __vmalloc_node(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL, | 1537 | return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL, |
| 1536 | -1, __builtin_return_address(0)); | 1538 | -1, __builtin_return_address(0)); |
| 1537 | } | 1539 | } |
| 1538 | EXPORT_SYMBOL(vmalloc); | 1540 | EXPORT_SYMBOL(vmalloc); |
| @@ -1549,7 +1551,8 @@ void *vmalloc_user(unsigned long size) | |||
| 1549 | struct vm_struct *area; | 1551 | struct vm_struct *area; |
| 1550 | void *ret; | 1552 | void *ret; |
| 1551 | 1553 | ||
| 1552 | ret = __vmalloc_node(size, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, | 1554 | ret = __vmalloc_node(size, SHMLBA, |
| 1555 | GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, | ||
| 1553 | PAGE_KERNEL, -1, __builtin_return_address(0)); | 1556 | PAGE_KERNEL, -1, __builtin_return_address(0)); |
| 1554 | if (ret) { | 1557 | if (ret) { |
| 1555 | area = find_vm_area(ret); | 1558 | area = find_vm_area(ret); |
| @@ -1572,7 +1575,7 @@ EXPORT_SYMBOL(vmalloc_user); | |||
| 1572 | */ | 1575 | */ |
| 1573 | void *vmalloc_node(unsigned long size, int node) | 1576 | void *vmalloc_node(unsigned long size, int node) |
| 1574 | { | 1577 | { |
| 1575 | return __vmalloc_node(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL, | 1578 | return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL, |
| 1576 | node, __builtin_return_address(0)); | 1579 | node, __builtin_return_address(0)); |
| 1577 | } | 1580 | } |
| 1578 | EXPORT_SYMBOL(vmalloc_node); | 1581 | EXPORT_SYMBOL(vmalloc_node); |
| @@ -1595,7 +1598,7 @@ EXPORT_SYMBOL(vmalloc_node); | |||
| 1595 | 1598 | ||
| 1596 | void *vmalloc_exec(unsigned long size) | 1599 | void *vmalloc_exec(unsigned long size) |
| 1597 | { | 1600 | { |
| 1598 | return __vmalloc_node(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC, | 1601 | return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC, |
| 1599 | -1, __builtin_return_address(0)); | 1602 | -1, __builtin_return_address(0)); |
| 1600 | } | 1603 | } |
| 1601 | 1604 | ||
| @@ -1616,7 +1619,7 @@ void *vmalloc_exec(unsigned long size) | |||
| 1616 | */ | 1619 | */ |
| 1617 | void *vmalloc_32(unsigned long size) | 1620 | void *vmalloc_32(unsigned long size) |
| 1618 | { | 1621 | { |
| 1619 | return __vmalloc_node(size, GFP_VMALLOC32, PAGE_KERNEL, | 1622 | return __vmalloc_node(size, 1, GFP_VMALLOC32, PAGE_KERNEL, |
| 1620 | -1, __builtin_return_address(0)); | 1623 | -1, __builtin_return_address(0)); |
| 1621 | } | 1624 | } |
| 1622 | EXPORT_SYMBOL(vmalloc_32); | 1625 | EXPORT_SYMBOL(vmalloc_32); |
| @@ -1633,7 +1636,7 @@ void *vmalloc_32_user(unsigned long size) | |||
| 1633 | struct vm_struct *area; | 1636 | struct vm_struct *area; |
| 1634 | void *ret; | 1637 | void *ret; |
| 1635 | 1638 | ||
| 1636 | ret = __vmalloc_node(size, GFP_VMALLOC32 | __GFP_ZERO, PAGE_KERNEL, | 1639 | ret = __vmalloc_node(size, 1, GFP_VMALLOC32 | __GFP_ZERO, PAGE_KERNEL, |
| 1637 | -1, __builtin_return_address(0)); | 1640 | -1, __builtin_return_address(0)); |
| 1638 | if (ret) { | 1641 | if (ret) { |
| 1639 | area = find_vm_area(ret); | 1642 | area = find_vm_area(ret); |
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c index 821d30918cfc..427ded841224 100644 --- a/net/core/net-sysfs.c +++ b/net/core/net-sysfs.c | |||
| @@ -366,13 +366,13 @@ static ssize_t wireless_show(struct device *d, char *buf, | |||
| 366 | const struct iw_statistics *iw; | 366 | const struct iw_statistics *iw; |
| 367 | ssize_t ret = -EINVAL; | 367 | ssize_t ret = -EINVAL; |
| 368 | 368 | ||
| 369 | read_lock(&dev_base_lock); | 369 | rtnl_lock(); |
| 370 | if (dev_isalive(dev)) { | 370 | if (dev_isalive(dev)) { |
| 371 | iw = get_wireless_stats(dev); | 371 | iw = get_wireless_stats(dev); |
| 372 | if (iw) | 372 | if (iw) |
| 373 | ret = (*format)(iw, buf); | 373 | ret = (*format)(iw, buf); |
| 374 | } | 374 | } |
| 375 | read_unlock(&dev_base_lock); | 375 | rtnl_unlock(); |
| 376 | 376 | ||
| 377 | return ret; | 377 | return ret; |
| 378 | } | 378 | } |
diff --git a/net/core/pktgen.c b/net/core/pktgen.c index b69455217ed6..86acdba0a97d 100644 --- a/net/core/pktgen.c +++ b/net/core/pktgen.c | |||
| @@ -964,7 +964,7 @@ static ssize_t pktgen_if_write(struct file *file, | |||
| 964 | if (value == 0x7FFFFFFF) | 964 | if (value == 0x7FFFFFFF) |
| 965 | pkt_dev->delay = ULLONG_MAX; | 965 | pkt_dev->delay = ULLONG_MAX; |
| 966 | else | 966 | else |
| 967 | pkt_dev->delay = (u64)value * NSEC_PER_USEC; | 967 | pkt_dev->delay = (u64)value; |
| 968 | 968 | ||
| 969 | sprintf(pg_result, "OK: delay=%llu", | 969 | sprintf(pg_result, "OK: delay=%llu", |
| 970 | (unsigned long long) pkt_dev->delay); | 970 | (unsigned long long) pkt_dev->delay); |
| @@ -2212,7 +2212,7 @@ static void set_cur_queue_map(struct pktgen_dev *pkt_dev) | |||
| 2212 | if (pkt_dev->flags & F_QUEUE_MAP_CPU) | 2212 | if (pkt_dev->flags & F_QUEUE_MAP_CPU) |
| 2213 | pkt_dev->cur_queue_map = smp_processor_id(); | 2213 | pkt_dev->cur_queue_map = smp_processor_id(); |
| 2214 | 2214 | ||
| 2215 | else if (pkt_dev->queue_map_min < pkt_dev->queue_map_max) { | 2215 | else if (pkt_dev->queue_map_min <= pkt_dev->queue_map_max) { |
| 2216 | __u16 t; | 2216 | __u16 t; |
| 2217 | if (pkt_dev->flags & F_QUEUE_MAP_RND) { | 2217 | if (pkt_dev->flags & F_QUEUE_MAP_RND) { |
| 2218 | t = random32() % | 2218 | t = random32() % |
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c index e92f1fd28aa5..5df2f6a0b0f0 100644 --- a/net/ipv4/devinet.c +++ b/net/ipv4/devinet.c | |||
| @@ -1077,12 +1077,16 @@ static int inetdev_event(struct notifier_block *this, unsigned long event, | |||
| 1077 | ip_mc_up(in_dev); | 1077 | ip_mc_up(in_dev); |
| 1078 | /* fall through */ | 1078 | /* fall through */ |
| 1079 | case NETDEV_CHANGEADDR: | 1079 | case NETDEV_CHANGEADDR: |
| 1080 | if (IN_DEV_ARP_NOTIFY(in_dev)) | 1080 | /* Send gratuitous ARP to notify of link change */ |
| 1081 | arp_send(ARPOP_REQUEST, ETH_P_ARP, | 1081 | if (IN_DEV_ARP_NOTIFY(in_dev)) { |
| 1082 | in_dev->ifa_list->ifa_address, | 1082 | struct in_ifaddr *ifa = in_dev->ifa_list; |
| 1083 | dev, | 1083 | |
| 1084 | in_dev->ifa_list->ifa_address, | 1084 | if (ifa) |
| 1085 | NULL, dev->dev_addr, NULL); | 1085 | arp_send(ARPOP_REQUEST, ETH_P_ARP, |
| 1086 | ifa->ifa_address, dev, | ||
| 1087 | ifa->ifa_address, NULL, | ||
| 1088 | dev->dev_addr, NULL); | ||
| 1089 | } | ||
| 1086 | break; | 1090 | break; |
| 1087 | case NETDEV_DOWN: | 1091 | case NETDEV_DOWN: |
| 1088 | ip_mc_down(in_dev); | 1092 | ip_mc_down(in_dev); |
diff --git a/net/rfkill/core.c b/net/rfkill/core.c index dbeaf2983822..ba2efb960c60 100644 --- a/net/rfkill/core.c +++ b/net/rfkill/core.c | |||
| @@ -27,6 +27,7 @@ | |||
| 27 | #include <linux/list.h> | 27 | #include <linux/list.h> |
| 28 | #include <linux/mutex.h> | 28 | #include <linux/mutex.h> |
| 29 | #include <linux/rfkill.h> | 29 | #include <linux/rfkill.h> |
| 30 | #include <linux/sched.h> | ||
| 30 | #include <linux/spinlock.h> | 31 | #include <linux/spinlock.h> |
| 31 | #include <linux/miscdevice.h> | 32 | #include <linux/miscdevice.h> |
| 32 | #include <linux/wait.h> | 33 | #include <linux/wait.h> |
diff --git a/sound/drivers/opl3/opl3_midi.c b/sound/drivers/opl3/opl3_midi.c index 6e7d09ae0e82..7d722a025d0d 100644 --- a/sound/drivers/opl3/opl3_midi.c +++ b/sound/drivers/opl3/opl3_midi.c | |||
| @@ -29,6 +29,8 @@ extern char snd_opl3_regmap[MAX_OPL2_VOICES][4]; | |||
| 29 | 29 | ||
| 30 | extern int use_internal_drums; | 30 | extern int use_internal_drums; |
| 31 | 31 | ||
| 32 | static void snd_opl3_note_off_unsafe(void *p, int note, int vel, | ||
| 33 | struct snd_midi_channel *chan); | ||
| 32 | /* | 34 | /* |
| 33 | * The next table looks magical, but it certainly is not. Its values have | 35 | * The next table looks magical, but it certainly is not. Its values have |
| 34 | * been calculated as table[i]=8*log(i/64)/log(2) with an obvious exception | 36 | * been calculated as table[i]=8*log(i/64)/log(2) with an obvious exception |
| @@ -242,16 +244,20 @@ void snd_opl3_timer_func(unsigned long data) | |||
| 242 | int again = 0; | 244 | int again = 0; |
| 243 | int i; | 245 | int i; |
| 244 | 246 | ||
| 245 | spin_lock_irqsave(&opl3->sys_timer_lock, flags); | 247 | spin_lock_irqsave(&opl3->voice_lock, flags); |
| 246 | for (i = 0; i < opl3->max_voices; i++) { | 248 | for (i = 0; i < opl3->max_voices; i++) { |
| 247 | struct snd_opl3_voice *vp = &opl3->voices[i]; | 249 | struct snd_opl3_voice *vp = &opl3->voices[i]; |
| 248 | if (vp->state > 0 && vp->note_off_check) { | 250 | if (vp->state > 0 && vp->note_off_check) { |
| 249 | if (vp->note_off == jiffies) | 251 | if (vp->note_off == jiffies) |
| 250 | snd_opl3_note_off(opl3, vp->note, 0, vp->chan); | 252 | snd_opl3_note_off_unsafe(opl3, vp->note, 0, |
| 253 | vp->chan); | ||
| 251 | else | 254 | else |
| 252 | again++; | 255 | again++; |
| 253 | } | 256 | } |
| 254 | } | 257 | } |
| 258 | spin_unlock_irqrestore(&opl3->voice_lock, flags); | ||
| 259 | |||
| 260 | spin_lock_irqsave(&opl3->sys_timer_lock, flags); | ||
| 255 | if (again) { | 261 | if (again) { |
| 256 | opl3->tlist.expires = jiffies + 1; /* invoke again */ | 262 | opl3->tlist.expires = jiffies + 1; /* invoke again */ |
| 257 | add_timer(&opl3->tlist); | 263 | add_timer(&opl3->tlist); |
| @@ -658,15 +664,14 @@ static void snd_opl3_kill_voice(struct snd_opl3 *opl3, int voice) | |||
| 658 | /* | 664 | /* |
| 659 | * Release a note in response to a midi note off. | 665 | * Release a note in response to a midi note off. |
| 660 | */ | 666 | */ |
| 661 | void snd_opl3_note_off(void *p, int note, int vel, struct snd_midi_channel *chan) | 667 | static void snd_opl3_note_off_unsafe(void *p, int note, int vel, |
| 668 | struct snd_midi_channel *chan) | ||
| 662 | { | 669 | { |
| 663 | struct snd_opl3 *opl3; | 670 | struct snd_opl3 *opl3; |
| 664 | 671 | ||
| 665 | int voice; | 672 | int voice; |
| 666 | struct snd_opl3_voice *vp; | 673 | struct snd_opl3_voice *vp; |
| 667 | 674 | ||
| 668 | unsigned long flags; | ||
| 669 | |||
| 670 | opl3 = p; | 675 | opl3 = p; |
| 671 | 676 | ||
| 672 | #ifdef DEBUG_MIDI | 677 | #ifdef DEBUG_MIDI |
| @@ -674,12 +679,9 @@ void snd_opl3_note_off(void *p, int note, int vel, struct snd_midi_channel *chan | |||
| 674 | chan->number, chan->midi_program, note); | 679 | chan->number, chan->midi_program, note); |
| 675 | #endif | 680 | #endif |
| 676 | 681 | ||
| 677 | spin_lock_irqsave(&opl3->voice_lock, flags); | ||
| 678 | |||
| 679 | if (opl3->synth_mode == SNDRV_OPL3_MODE_SEQ) { | 682 | if (opl3->synth_mode == SNDRV_OPL3_MODE_SEQ) { |
| 680 | if (chan->drum_channel && use_internal_drums) { | 683 | if (chan->drum_channel && use_internal_drums) { |
| 681 | snd_opl3_drum_switch(opl3, note, vel, 0, chan); | 684 | snd_opl3_drum_switch(opl3, note, vel, 0, chan); |
| 682 | spin_unlock_irqrestore(&opl3->voice_lock, flags); | ||
| 683 | return; | 685 | return; |
| 684 | } | 686 | } |
| 685 | /* this loop will hopefully kill all extra voices, because | 687 | /* this loop will hopefully kill all extra voices, because |
| @@ -697,6 +699,16 @@ void snd_opl3_note_off(void *p, int note, int vel, struct snd_midi_channel *chan | |||
| 697 | snd_opl3_kill_voice(opl3, voice); | 699 | snd_opl3_kill_voice(opl3, voice); |
| 698 | } | 700 | } |
| 699 | } | 701 | } |
| 702 | } | ||
| 703 | |||
| 704 | void snd_opl3_note_off(void *p, int note, int vel, | ||
| 705 | struct snd_midi_channel *chan) | ||
| 706 | { | ||
| 707 | struct snd_opl3 *opl3 = p; | ||
| 708 | unsigned long flags; | ||
| 709 | |||
| 710 | spin_lock_irqsave(&opl3->voice_lock, flags); | ||
| 711 | snd_opl3_note_off_unsafe(p, note, vel, chan); | ||
| 700 | spin_unlock_irqrestore(&opl3->voice_lock, flags); | 712 | spin_unlock_irqrestore(&opl3->voice_lock, flags); |
| 701 | } | 713 | } |
| 702 | 714 | ||
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index 7810d3dcad83..470fd74a0a1a 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c | |||
| @@ -1332,15 +1332,20 @@ do_sku: | |||
| 1332 | * when the external headphone out jack is plugged" | 1332 | * when the external headphone out jack is plugged" |
| 1333 | */ | 1333 | */ |
| 1334 | if (!spec->autocfg.hp_pins[0]) { | 1334 | if (!spec->autocfg.hp_pins[0]) { |
| 1335 | hda_nid_t nid; | ||
| 1335 | tmp = (ass >> 11) & 0x3; /* HP to chassis */ | 1336 | tmp = (ass >> 11) & 0x3; /* HP to chassis */ |
| 1336 | if (tmp == 0) | 1337 | if (tmp == 0) |
| 1337 | spec->autocfg.hp_pins[0] = porta; | 1338 | nid = porta; |
| 1338 | else if (tmp == 1) | 1339 | else if (tmp == 1) |
| 1339 | spec->autocfg.hp_pins[0] = porte; | 1340 | nid = porte; |
| 1340 | else if (tmp == 2) | 1341 | else if (tmp == 2) |
| 1341 | spec->autocfg.hp_pins[0] = portd; | 1342 | nid = portd; |
| 1342 | else | 1343 | else |
| 1343 | return 1; | 1344 | return 1; |
| 1345 | for (i = 0; i < spec->autocfg.line_outs; i++) | ||
| 1346 | if (spec->autocfg.line_out_pins[i] == nid) | ||
| 1347 | return 1; | ||
| 1348 | spec->autocfg.hp_pins[0] = nid; | ||
| 1344 | } | 1349 | } |
| 1345 | 1350 | ||
| 1346 | alc_init_auto_hp(codec); | 1351 | alc_init_auto_hp(codec); |
| @@ -1362,7 +1367,7 @@ static void alc_ssid_check(struct hda_codec *codec, | |||
| 1362 | } | 1367 | } |
| 1363 | 1368 | ||
| 1364 | /* | 1369 | /* |
| 1365 | * Fix-up pin default configurations | 1370 | * Fix-up pin default configurations and add default verbs |
| 1366 | */ | 1371 | */ |
| 1367 | 1372 | ||
| 1368 | struct alc_pincfg { | 1373 | struct alc_pincfg { |
| @@ -1370,9 +1375,14 @@ struct alc_pincfg { | |||
| 1370 | u32 val; | 1375 | u32 val; |
| 1371 | }; | 1376 | }; |
| 1372 | 1377 | ||
| 1373 | static void alc_fix_pincfg(struct hda_codec *codec, | 1378 | struct alc_fixup { |
| 1379 | const struct alc_pincfg *pins; | ||
| 1380 | const struct hda_verb *verbs; | ||
| 1381 | }; | ||
| 1382 | |||
| 1383 | static void alc_pick_fixup(struct hda_codec *codec, | ||
| 1374 | const struct snd_pci_quirk *quirk, | 1384 | const struct snd_pci_quirk *quirk, |
| 1375 | const struct alc_pincfg **pinfix) | 1385 | const struct alc_fixup *fix) |
| 1376 | { | 1386 | { |
| 1377 | const struct alc_pincfg *cfg; | 1387 | const struct alc_pincfg *cfg; |
| 1378 | 1388 | ||
| @@ -1380,9 +1390,14 @@ static void alc_fix_pincfg(struct hda_codec *codec, | |||
| 1380 | if (!quirk) | 1390 | if (!quirk) |
| 1381 | return; | 1391 | return; |
| 1382 | 1392 | ||
| 1383 | cfg = pinfix[quirk->value]; | 1393 | fix += quirk->value; |
| 1384 | for (; cfg->nid; cfg++) | 1394 | cfg = fix->pins; |
| 1385 | snd_hda_codec_set_pincfg(codec, cfg->nid, cfg->val); | 1395 | if (cfg) { |
| 1396 | for (; cfg->nid; cfg++) | ||
| 1397 | snd_hda_codec_set_pincfg(codec, cfg->nid, cfg->val); | ||
| 1398 | } | ||
| 1399 | if (fix->verbs) | ||
| 1400 | add_verb(codec->spec, fix->verbs); | ||
| 1386 | } | 1401 | } |
| 1387 | 1402 | ||
| 1388 | /* | 1403 | /* |
| @@ -9593,11 +9608,13 @@ static struct alc_pincfg alc882_abit_aw9d_pinfix[] = { | |||
| 9593 | { } | 9608 | { } |
| 9594 | }; | 9609 | }; |
| 9595 | 9610 | ||
| 9596 | static const struct alc_pincfg *alc882_pin_fixes[] = { | 9611 | static const struct alc_fixup alc882_fixups[] = { |
| 9597 | [PINFIX_ABIT_AW9D_MAX] = alc882_abit_aw9d_pinfix, | 9612 | [PINFIX_ABIT_AW9D_MAX] = { |
| 9613 | .pins = alc882_abit_aw9d_pinfix | ||
| 9614 | }, | ||
| 9598 | }; | 9615 | }; |
| 9599 | 9616 | ||
| 9600 | static struct snd_pci_quirk alc882_pinfix_tbl[] = { | 9617 | static struct snd_pci_quirk alc882_fixup_tbl[] = { |
| 9601 | SND_PCI_QUIRK(0x147b, 0x107a, "Abit AW9D-MAX", PINFIX_ABIT_AW9D_MAX), | 9618 | SND_PCI_QUIRK(0x147b, 0x107a, "Abit AW9D-MAX", PINFIX_ABIT_AW9D_MAX), |
| 9602 | {} | 9619 | {} |
| 9603 | }; | 9620 | }; |
| @@ -9869,7 +9886,7 @@ static int patch_alc882(struct hda_codec *codec) | |||
| 9869 | board_config = ALC882_AUTO; | 9886 | board_config = ALC882_AUTO; |
| 9870 | } | 9887 | } |
| 9871 | 9888 | ||
| 9872 | alc_fix_pincfg(codec, alc882_pinfix_tbl, alc882_pin_fixes); | 9889 | alc_pick_fixup(codec, alc882_fixup_tbl, alc882_fixups); |
| 9873 | 9890 | ||
| 9874 | if (board_config == ALC882_AUTO) { | 9891 | if (board_config == ALC882_AUTO) { |
| 9875 | /* automatic parse from the BIOS config */ | 9892 | /* automatic parse from the BIOS config */ |
| @@ -12842,12 +12859,15 @@ static int patch_alc268(struct hda_codec *codec) | |||
| 12842 | unsigned int wcap = get_wcaps(codec, 0x07); | 12859 | unsigned int wcap = get_wcaps(codec, 0x07); |
| 12843 | int i; | 12860 | int i; |
| 12844 | 12861 | ||
| 12862 | spec->capsrc_nids = alc268_capsrc_nids; | ||
| 12845 | /* get type */ | 12863 | /* get type */ |
| 12846 | wcap = get_wcaps_type(wcap); | 12864 | wcap = get_wcaps_type(wcap); |
| 12847 | if (spec->auto_mic || | 12865 | if (spec->auto_mic || |
| 12848 | wcap != AC_WID_AUD_IN || spec->input_mux->num_items == 1) { | 12866 | wcap != AC_WID_AUD_IN || spec->input_mux->num_items == 1) { |
| 12849 | spec->adc_nids = alc268_adc_nids_alt; | 12867 | spec->adc_nids = alc268_adc_nids_alt; |
| 12850 | spec->num_adc_nids = ARRAY_SIZE(alc268_adc_nids_alt); | 12868 | spec->num_adc_nids = ARRAY_SIZE(alc268_adc_nids_alt); |
| 12869 | if (spec->auto_mic) | ||
| 12870 | fixup_automic_adc(codec); | ||
| 12851 | if (spec->auto_mic || spec->input_mux->num_items == 1) | 12871 | if (spec->auto_mic || spec->input_mux->num_items == 1) |
| 12852 | add_mixer(spec, alc268_capture_nosrc_mixer); | 12872 | add_mixer(spec, alc268_capture_nosrc_mixer); |
| 12853 | else | 12873 | else |
| @@ -12857,7 +12877,6 @@ static int patch_alc268(struct hda_codec *codec) | |||
| 12857 | spec->num_adc_nids = ARRAY_SIZE(alc268_adc_nids); | 12877 | spec->num_adc_nids = ARRAY_SIZE(alc268_adc_nids); |
| 12858 | add_mixer(spec, alc268_capture_mixer); | 12878 | add_mixer(spec, alc268_capture_mixer); |
| 12859 | } | 12879 | } |
| 12860 | spec->capsrc_nids = alc268_capsrc_nids; | ||
| 12861 | /* set default input source */ | 12880 | /* set default input source */ |
| 12862 | for (i = 0; i < spec->num_adc_nids; i++) | 12881 | for (i = 0; i < spec->num_adc_nids; i++) |
| 12863 | snd_hda_codec_write_cache(codec, alc268_capsrc_nids[i], | 12882 | snd_hda_codec_write_cache(codec, alc268_capsrc_nids[i], |
| @@ -14357,15 +14376,16 @@ static void alc861_auto_init_multi_out(struct hda_codec *codec) | |||
| 14357 | static void alc861_auto_init_hp_out(struct hda_codec *codec) | 14376 | static void alc861_auto_init_hp_out(struct hda_codec *codec) |
| 14358 | { | 14377 | { |
| 14359 | struct alc_spec *spec = codec->spec; | 14378 | struct alc_spec *spec = codec->spec; |
| 14360 | hda_nid_t pin; | ||
| 14361 | 14379 | ||
| 14362 | pin = spec->autocfg.hp_pins[0]; | 14380 | if (spec->autocfg.hp_outs) |
| 14363 | if (pin) | 14381 | alc861_auto_set_output_and_unmute(codec, |
| 14364 | alc861_auto_set_output_and_unmute(codec, pin, PIN_HP, | 14382 | spec->autocfg.hp_pins[0], |
| 14383 | PIN_HP, | ||
| 14365 | spec->multiout.hp_nid); | 14384 | spec->multiout.hp_nid); |
| 14366 | pin = spec->autocfg.speaker_pins[0]; | 14385 | if (spec->autocfg.speaker_outs) |
| 14367 | if (pin) | 14386 | alc861_auto_set_output_and_unmute(codec, |
| 14368 | alc861_auto_set_output_and_unmute(codec, pin, PIN_OUT, | 14387 | spec->autocfg.speaker_pins[0], |
| 14388 | PIN_OUT, | ||
| 14369 | spec->multiout.dac_nids[0]); | 14389 | spec->multiout.dac_nids[0]); |
| 14370 | } | 14390 | } |
| 14371 | 14391 | ||
| @@ -15158,7 +15178,7 @@ static struct snd_pci_quirk alc861vd_cfg_tbl[] = { | |||
| 15158 | SND_PCI_QUIRK(0x1019, 0xa88d, "Realtek ALC660 demo", ALC660VD_3ST), | 15178 | SND_PCI_QUIRK(0x1019, 0xa88d, "Realtek ALC660 demo", ALC660VD_3ST), |
| 15159 | SND_PCI_QUIRK(0x103c, 0x30bf, "HP TX1000", ALC861VD_HP), | 15179 | SND_PCI_QUIRK(0x103c, 0x30bf, "HP TX1000", ALC861VD_HP), |
| 15160 | SND_PCI_QUIRK(0x1043, 0x12e2, "Asus z35m", ALC660VD_3ST), | 15180 | SND_PCI_QUIRK(0x1043, 0x12e2, "Asus z35m", ALC660VD_3ST), |
| 15161 | SND_PCI_QUIRK(0x1043, 0x1339, "Asus G1", ALC660VD_3ST), | 15181 | /*SND_PCI_QUIRK(0x1043, 0x1339, "Asus G1", ALC660VD_3ST),*/ /* auto */ |
| 15162 | SND_PCI_QUIRK(0x1043, 0x1633, "Asus V1Sn", ALC660VD_ASUS_V1S), | 15182 | SND_PCI_QUIRK(0x1043, 0x1633, "Asus V1Sn", ALC660VD_ASUS_V1S), |
| 15163 | SND_PCI_QUIRK(0x1043, 0x81e7, "ASUS", ALC660VD_3ST_DIG), | 15183 | SND_PCI_QUIRK(0x1043, 0x81e7, "ASUS", ALC660VD_3ST_DIG), |
| 15164 | SND_PCI_QUIRK(0x10de, 0x03f0, "Realtek ALC660 demo", ALC660VD_3ST), | 15184 | SND_PCI_QUIRK(0x10de, 0x03f0, "Realtek ALC660 demo", ALC660VD_3ST), |
| @@ -15551,6 +15571,29 @@ static void alc861vd_auto_init(struct hda_codec *codec) | |||
| 15551 | alc_inithook(codec); | 15571 | alc_inithook(codec); |
| 15552 | } | 15572 | } |
| 15553 | 15573 | ||
| 15574 | enum { | ||
| 15575 | ALC660VD_FIX_ASUS_GPIO1 | ||
| 15576 | }; | ||
| 15577 | |||
| 15578 | /* reset GPIO1 */ | ||
| 15579 | static const struct hda_verb alc660vd_fix_asus_gpio1_verbs[] = { | ||
| 15580 | {0x01, AC_VERB_SET_GPIO_MASK, 0x03}, | ||
| 15581 | {0x01, AC_VERB_SET_GPIO_DIRECTION, 0x01}, | ||
| 15582 | {0x01, AC_VERB_SET_GPIO_DATA, 0x01}, | ||
| 15583 | { } | ||
| 15584 | }; | ||
| 15585 | |||
| 15586 | static const struct alc_fixup alc861vd_fixups[] = { | ||
| 15587 | [ALC660VD_FIX_ASUS_GPIO1] = { | ||
| 15588 | .verbs = alc660vd_fix_asus_gpio1_verbs, | ||
| 15589 | }, | ||
| 15590 | }; | ||
| 15591 | |||
| 15592 | static struct snd_pci_quirk alc861vd_fixup_tbl[] = { | ||
| 15593 | SND_PCI_QUIRK(0x1043, 0x1339, "ASUS A7-K", ALC660VD_FIX_ASUS_GPIO1), | ||
| 15594 | {} | ||
| 15595 | }; | ||
| 15596 | |||
| 15554 | static int patch_alc861vd(struct hda_codec *codec) | 15597 | static int patch_alc861vd(struct hda_codec *codec) |
| 15555 | { | 15598 | { |
| 15556 | struct alc_spec *spec; | 15599 | struct alc_spec *spec; |
| @@ -15572,6 +15615,8 @@ static int patch_alc861vd(struct hda_codec *codec) | |||
| 15572 | board_config = ALC861VD_AUTO; | 15615 | board_config = ALC861VD_AUTO; |
| 15573 | } | 15616 | } |
| 15574 | 15617 | ||
| 15618 | alc_pick_fixup(codec, alc861vd_fixup_tbl, alc861vd_fixups); | ||
| 15619 | |||
| 15575 | if (board_config == ALC861VD_AUTO) { | 15620 | if (board_config == ALC861VD_AUTO) { |
| 15576 | /* automatic parse from the BIOS config */ | 15621 | /* automatic parse from the BIOS config */ |
| 15577 | err = alc861vd_parse_auto_config(codec); | 15622 | err = alc861vd_parse_auto_config(codec); |
diff --git a/sound/pci/ice1712/ice1712.c b/sound/pci/ice1712/ice1712.c index cecf1ffeeaaa..d74033a2cfbe 100644 --- a/sound/pci/ice1712/ice1712.c +++ b/sound/pci/ice1712/ice1712.c | |||
| @@ -2259,7 +2259,7 @@ static int snd_ice1712_pro_peak_get(struct snd_kcontrol *kcontrol, | |||
| 2259 | } | 2259 | } |
| 2260 | 2260 | ||
| 2261 | static struct snd_kcontrol_new snd_ice1712_mixer_pro_peak __devinitdata = { | 2261 | static struct snd_kcontrol_new snd_ice1712_mixer_pro_peak __devinitdata = { |
| 2262 | .iface = SNDRV_CTL_ELEM_IFACE_MIXER, | 2262 | .iface = SNDRV_CTL_ELEM_IFACE_PCM, |
| 2263 | .name = "Multi Track Peak", | 2263 | .name = "Multi Track Peak", |
| 2264 | .access = SNDRV_CTL_ELEM_ACCESS_READ | SNDRV_CTL_ELEM_ACCESS_VOLATILE, | 2264 | .access = SNDRV_CTL_ELEM_ACCESS_READ | SNDRV_CTL_ELEM_ACCESS_VOLATILE, |
| 2265 | .info = snd_ice1712_pro_peak_info, | 2265 | .info = snd_ice1712_pro_peak_info, |
diff --git a/sound/pci/ice1712/ice1724.c b/sound/pci/ice1712/ice1724.c index af6e00148621..76b717dae4b6 100644 --- a/sound/pci/ice1712/ice1724.c +++ b/sound/pci/ice1712/ice1724.c | |||
| @@ -1294,7 +1294,7 @@ static int __devinit snd_vt1724_pcm_spdif(struct snd_ice1712 *ice, int device) | |||
| 1294 | 1294 | ||
| 1295 | snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV, | 1295 | snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV, |
| 1296 | snd_dma_pci_data(ice->pci), | 1296 | snd_dma_pci_data(ice->pci), |
| 1297 | 64*1024, 64*1024); | 1297 | 256*1024, 256*1024); |
| 1298 | 1298 | ||
| 1299 | ice->pcm = pcm; | 1299 | ice->pcm = pcm; |
| 1300 | 1300 | ||
| @@ -1408,7 +1408,7 @@ static int __devinit snd_vt1724_pcm_indep(struct snd_ice1712 *ice, int device) | |||
| 1408 | 1408 | ||
| 1409 | snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV, | 1409 | snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV, |
| 1410 | snd_dma_pci_data(ice->pci), | 1410 | snd_dma_pci_data(ice->pci), |
| 1411 | 64*1024, 64*1024); | 1411 | 256*1024, 256*1024); |
| 1412 | 1412 | ||
| 1413 | ice->pcm_ds = pcm; | 1413 | ice->pcm_ds = pcm; |
| 1414 | 1414 | ||
| @@ -2110,7 +2110,7 @@ static int snd_vt1724_pro_peak_get(struct snd_kcontrol *kcontrol, | |||
| 2110 | } | 2110 | } |
| 2111 | 2111 | ||
| 2112 | static struct snd_kcontrol_new snd_vt1724_mixer_pro_peak __devinitdata = { | 2112 | static struct snd_kcontrol_new snd_vt1724_mixer_pro_peak __devinitdata = { |
| 2113 | .iface = SNDRV_CTL_ELEM_IFACE_MIXER, | 2113 | .iface = SNDRV_CTL_ELEM_IFACE_PCM, |
| 2114 | .name = "Multi Track Peak", | 2114 | .name = "Multi Track Peak", |
| 2115 | .access = SNDRV_CTL_ELEM_ACCESS_READ | SNDRV_CTL_ELEM_ACCESS_VOLATILE, | 2115 | .access = SNDRV_CTL_ELEM_ACCESS_READ | SNDRV_CTL_ELEM_ACCESS_VOLATILE, |
| 2116 | .info = snd_vt1724_pro_peak_info, | 2116 | .info = snd_vt1724_pro_peak_info, |
diff --git a/sound/pci/via82xx.c b/sound/pci/via82xx.c index acfa4760da49..91683a349035 100644 --- a/sound/pci/via82xx.c +++ b/sound/pci/via82xx.c | |||
| @@ -1626,7 +1626,7 @@ static int snd_via8233_dxs_volume_get(struct snd_kcontrol *kcontrol, | |||
| 1626 | struct snd_ctl_elem_value *ucontrol) | 1626 | struct snd_ctl_elem_value *ucontrol) |
| 1627 | { | 1627 | { |
| 1628 | struct via82xx *chip = snd_kcontrol_chip(kcontrol); | 1628 | struct via82xx *chip = snd_kcontrol_chip(kcontrol); |
| 1629 | unsigned int idx = snd_ctl_get_ioff(kcontrol, &ucontrol->id); | 1629 | unsigned int idx = kcontrol->id.subdevice; |
| 1630 | 1630 | ||
| 1631 | ucontrol->value.integer.value[0] = VIA_DXS_MAX_VOLUME - chip->playback_volume[idx][0]; | 1631 | ucontrol->value.integer.value[0] = VIA_DXS_MAX_VOLUME - chip->playback_volume[idx][0]; |
| 1632 | ucontrol->value.integer.value[1] = VIA_DXS_MAX_VOLUME - chip->playback_volume[idx][1]; | 1632 | ucontrol->value.integer.value[1] = VIA_DXS_MAX_VOLUME - chip->playback_volume[idx][1]; |
| @@ -1646,7 +1646,7 @@ static int snd_via8233_dxs_volume_put(struct snd_kcontrol *kcontrol, | |||
| 1646 | struct snd_ctl_elem_value *ucontrol) | 1646 | struct snd_ctl_elem_value *ucontrol) |
| 1647 | { | 1647 | { |
| 1648 | struct via82xx *chip = snd_kcontrol_chip(kcontrol); | 1648 | struct via82xx *chip = snd_kcontrol_chip(kcontrol); |
| 1649 | unsigned int idx = snd_ctl_get_ioff(kcontrol, &ucontrol->id); | 1649 | unsigned int idx = kcontrol->id.subdevice; |
| 1650 | unsigned long port = chip->port + 0x10 * idx; | 1650 | unsigned long port = chip->port + 0x10 * idx; |
| 1651 | unsigned char val; | 1651 | unsigned char val; |
| 1652 | int i, change = 0; | 1652 | int i, change = 0; |
| @@ -1705,11 +1705,12 @@ static struct snd_kcontrol_new snd_via8233_pcmdxs_volume_control __devinitdata = | |||
| 1705 | }; | 1705 | }; |
| 1706 | 1706 | ||
| 1707 | static struct snd_kcontrol_new snd_via8233_dxs_volume_control __devinitdata = { | 1707 | static struct snd_kcontrol_new snd_via8233_dxs_volume_control __devinitdata = { |
| 1708 | .name = "VIA DXS Playback Volume", | 1708 | .iface = SNDRV_CTL_ELEM_IFACE_PCM, |
| 1709 | .iface = SNDRV_CTL_ELEM_IFACE_MIXER, | 1709 | .device = 0, |
| 1710 | /* .subdevice set later */ | ||
| 1711 | .name = "PCM Playback Volume", | ||
| 1710 | .access = (SNDRV_CTL_ELEM_ACCESS_READWRITE | | 1712 | .access = (SNDRV_CTL_ELEM_ACCESS_READWRITE | |
| 1711 | SNDRV_CTL_ELEM_ACCESS_TLV_READ), | 1713 | SNDRV_CTL_ELEM_ACCESS_TLV_READ), |
| 1712 | .count = 4, | ||
| 1713 | .info = snd_via8233_dxs_volume_info, | 1714 | .info = snd_via8233_dxs_volume_info, |
| 1714 | .get = snd_via8233_dxs_volume_get, | 1715 | .get = snd_via8233_dxs_volume_get, |
| 1715 | .put = snd_via8233_dxs_volume_put, | 1716 | .put = snd_via8233_dxs_volume_put, |
| @@ -1936,10 +1937,18 @@ static int __devinit snd_via8233_init_misc(struct via82xx *chip) | |||
| 1936 | } | 1937 | } |
| 1937 | else /* Using DXS when PCM emulation is enabled is really weird */ | 1938 | else /* Using DXS when PCM emulation is enabled is really weird */ |
| 1938 | { | 1939 | { |
| 1939 | /* Standalone DXS controls */ | 1940 | for (i = 0; i < 4; ++i) { |
| 1940 | err = snd_ctl_add(chip->card, snd_ctl_new1(&snd_via8233_dxs_volume_control, chip)); | 1941 | struct snd_kcontrol *kctl; |
| 1941 | if (err < 0) | 1942 | |
| 1942 | return err; | 1943 | kctl = snd_ctl_new1( |
| 1944 | &snd_via8233_dxs_volume_control, chip); | ||
| 1945 | if (!kctl) | ||
| 1946 | return -ENOMEM; | ||
| 1947 | kctl->id.subdevice = i; | ||
| 1948 | err = snd_ctl_add(chip->card, kctl); | ||
| 1949 | if (err < 0) | ||
| 1950 | return err; | ||
| 1951 | } | ||
| 1943 | } | 1952 | } |
| 1944 | } | 1953 | } |
| 1945 | /* select spdif data slot 10/11 */ | 1954 | /* select spdif data slot 10/11 */ |
diff --git a/sound/soc/codecs/wm8350.c b/sound/soc/codecs/wm8350.c index 3ff0373dff89..593d5b9c9f03 100644 --- a/sound/soc/codecs/wm8350.c +++ b/sound/soc/codecs/wm8350.c | |||
| @@ -579,7 +579,7 @@ static const struct snd_kcontrol_new wm8350_left_capt_mixer_controls[] = { | |||
| 579 | SOC_DAPM_SINGLE_TLV("L3 Capture Volume", | 579 | SOC_DAPM_SINGLE_TLV("L3 Capture Volume", |
| 580 | WM8350_INPUT_MIXER_VOLUME_L, 9, 7, 0, out_mix_tlv), | 580 | WM8350_INPUT_MIXER_VOLUME_L, 9, 7, 0, out_mix_tlv), |
| 581 | SOC_DAPM_SINGLE("PGA Capture Switch", | 581 | SOC_DAPM_SINGLE("PGA Capture Switch", |
| 582 | WM8350_LEFT_INPUT_VOLUME, 14, 1, 0), | 582 | WM8350_LEFT_INPUT_VOLUME, 14, 1, 1), |
| 583 | }; | 583 | }; |
| 584 | 584 | ||
| 585 | /* Right Input Mixer */ | 585 | /* Right Input Mixer */ |
| @@ -589,7 +589,7 @@ static const struct snd_kcontrol_new wm8350_right_capt_mixer_controls[] = { | |||
| 589 | SOC_DAPM_SINGLE_TLV("L3 Capture Volume", | 589 | SOC_DAPM_SINGLE_TLV("L3 Capture Volume", |
| 590 | WM8350_INPUT_MIXER_VOLUME_R, 13, 7, 0, out_mix_tlv), | 590 | WM8350_INPUT_MIXER_VOLUME_R, 13, 7, 0, out_mix_tlv), |
| 591 | SOC_DAPM_SINGLE("PGA Capture Switch", | 591 | SOC_DAPM_SINGLE("PGA Capture Switch", |
| 592 | WM8350_RIGHT_INPUT_VOLUME, 14, 1, 0), | 592 | WM8350_RIGHT_INPUT_VOLUME, 14, 1, 1), |
| 593 | }; | 593 | }; |
| 594 | 594 | ||
| 595 | /* Left Mic Mixer */ | 595 | /* Left Mic Mixer */ |
diff --git a/sound/soc/codecs/wm8940.c b/sound/soc/codecs/wm8940.c index da97aae475a2..1ef2454c5205 100644 --- a/sound/soc/codecs/wm8940.c +++ b/sound/soc/codecs/wm8940.c | |||
| @@ -790,7 +790,7 @@ static int wm8940_register(struct wm8940_priv *wm8940, | |||
| 790 | codec->reg_cache = &wm8940->reg_cache; | 790 | codec->reg_cache = &wm8940->reg_cache; |
| 791 | 791 | ||
| 792 | ret = snd_soc_codec_set_cache_io(codec, 8, 16, control); | 792 | ret = snd_soc_codec_set_cache_io(codec, 8, 16, control); |
| 793 | if (ret == 0) { | 793 | if (ret < 0) { |
| 794 | dev_err(codec->dev, "Failed to set cache I/O: %d\n", ret); | 794 | dev_err(codec->dev, "Failed to set cache I/O: %d\n", ret); |
| 795 | return ret; | 795 | return ret; |
| 796 | } | 796 | } |
diff --git a/sound/soc/imx/mxc-ssi.c b/sound/soc/imx/mxc-ssi.c index 3806ff2c0cd4..ccdefe60e752 100644 --- a/sound/soc/imx/mxc-ssi.c +++ b/sound/soc/imx/mxc-ssi.c | |||
| @@ -397,14 +397,6 @@ static int imx_ssi_set_dai_fmt(struct snd_soc_dai *cpu_dai, | |||
| 397 | break; | 397 | break; |
| 398 | } | 398 | } |
| 399 | 399 | ||
| 400 | /* sync */ | ||
| 401 | if (!(fmt & SND_SOC_DAIFMT_ASYNC)) | ||
| 402 | scr |= SSI_SCR_SYN; | ||
| 403 | |||
| 404 | /* tdm - only for stereo atm */ | ||
| 405 | if (fmt & SND_SOC_DAIFMT_TDM) | ||
| 406 | scr |= SSI_SCR_NET; | ||
| 407 | |||
| 408 | if (cpu_dai->id == IMX_DAI_SSI0 || cpu_dai->id == IMX_DAI_SSI2) { | 400 | if (cpu_dai->id == IMX_DAI_SSI0 || cpu_dai->id == IMX_DAI_SSI2) { |
| 409 | SSI1_STCR = stcr; | 401 | SSI1_STCR = stcr; |
| 410 | SSI1_SRCR = srcr; | 402 | SSI1_SRCR = srcr; |
diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c index f79711b9fa5b..8de6f9dec4a2 100644 --- a/sound/soc/soc-dapm.c +++ b/sound/soc/soc-dapm.c | |||
| @@ -524,7 +524,7 @@ static int is_connected_output_ep(struct snd_soc_dapm_widget *widget) | |||
| 524 | 524 | ||
| 525 | /* connected jack or spk ? */ | 525 | /* connected jack or spk ? */ |
| 526 | if (widget->id == snd_soc_dapm_hp || widget->id == snd_soc_dapm_spk || | 526 | if (widget->id == snd_soc_dapm_hp || widget->id == snd_soc_dapm_spk || |
| 527 | widget->id == snd_soc_dapm_line) | 527 | (widget->id == snd_soc_dapm_line && !list_empty(&widget->sources))) |
| 528 | return 1; | 528 | return 1; |
| 529 | } | 529 | } |
| 530 | 530 | ||
| @@ -573,7 +573,8 @@ static int is_connected_input_ep(struct snd_soc_dapm_widget *widget) | |||
| 573 | return 1; | 573 | return 1; |
| 574 | 574 | ||
| 575 | /* connected jack ? */ | 575 | /* connected jack ? */ |
| 576 | if (widget->id == snd_soc_dapm_mic || widget->id == snd_soc_dapm_line) | 576 | if (widget->id == snd_soc_dapm_mic || |
| 577 | (widget->id == snd_soc_dapm_line && !list_empty(&widget->sinks))) | ||
| 577 | return 1; | 578 | return 1; |
| 578 | } | 579 | } |
| 579 | 580 | ||
diff --git a/tools/perf/Documentation/perf-timechart.txt b/tools/perf/Documentation/perf-timechart.txt index 1c2ed3090cce..a7910099d6fd 100644 --- a/tools/perf/Documentation/perf-timechart.txt +++ b/tools/perf/Documentation/perf-timechart.txt | |||
| @@ -31,6 +31,9 @@ OPTIONS | |||
| 31 | -w:: | 31 | -w:: |
| 32 | --width=:: | 32 | --width=:: |
| 33 | Select the width of the SVG file (default: 1000) | 33 | Select the width of the SVG file (default: 1000) |
| 34 | -p:: | ||
| 35 | --power-only:: | ||
| 36 | Only output the CPU power section of the diagram | ||
| 34 | 37 | ||
| 35 | 38 | ||
| 36 | SEE ALSO | 39 | SEE ALSO |
diff --git a/tools/perf/Makefile b/tools/perf/Makefile index b5f1953b6144..5881943f0c34 100644 --- a/tools/perf/Makefile +++ b/tools/perf/Makefile | |||
| @@ -728,7 +728,7 @@ $(BUILT_INS): perf$X | |||
| 728 | common-cmds.h: util/generate-cmdlist.sh command-list.txt | 728 | common-cmds.h: util/generate-cmdlist.sh command-list.txt |
| 729 | 729 | ||
| 730 | common-cmds.h: $(wildcard Documentation/perf-*.txt) | 730 | common-cmds.h: $(wildcard Documentation/perf-*.txt) |
| 731 | $(QUIET_GEN)util/generate-cmdlist.sh > $@+ && mv $@+ $@ | 731 | $(QUIET_GEN). util/generate-cmdlist.sh > $@+ && mv $@+ $@ |
| 732 | 732 | ||
| 733 | $(patsubst %.sh,%,$(SCRIPT_SH)) : % : %.sh | 733 | $(patsubst %.sh,%,$(SCRIPT_SH)) : % : %.sh |
| 734 | $(QUIET_GEN)$(RM) $@ $@+ && \ | 734 | $(QUIET_GEN)$(RM) $@ $@+ && \ |
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c index a5a050af8e7d..3eeef339c787 100644 --- a/tools/perf/builtin-record.c +++ b/tools/perf/builtin-record.c | |||
| @@ -41,6 +41,7 @@ static int raw_samples = 0; | |||
| 41 | static int system_wide = 0; | 41 | static int system_wide = 0; |
| 42 | static int profile_cpu = -1; | 42 | static int profile_cpu = -1; |
| 43 | static pid_t target_pid = -1; | 43 | static pid_t target_pid = -1; |
| 44 | static pid_t child_pid = -1; | ||
| 44 | static int inherit = 1; | 45 | static int inherit = 1; |
| 45 | static int force = 0; | 46 | static int force = 0; |
| 46 | static int append_file = 0; | 47 | static int append_file = 0; |
| @@ -184,6 +185,9 @@ static void sig_handler(int sig) | |||
| 184 | 185 | ||
| 185 | static void sig_atexit(void) | 186 | static void sig_atexit(void) |
| 186 | { | 187 | { |
| 188 | if (child_pid != -1) | ||
| 189 | kill(child_pid, SIGTERM); | ||
| 190 | |||
| 187 | if (signr == -1) | 191 | if (signr == -1) |
| 188 | return; | 192 | return; |
| 189 | 193 | ||
| @@ -610,6 +614,8 @@ static int __cmd_record(int argc, const char **argv) | |||
| 610 | exit(-1); | 614 | exit(-1); |
| 611 | } | 615 | } |
| 612 | } | 616 | } |
| 617 | |||
| 618 | child_pid = pid; | ||
| 613 | } | 619 | } |
| 614 | 620 | ||
| 615 | if (realtime_prio) { | 621 | if (realtime_prio) { |
diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c index e5f6ece65a13..3db31e7bf173 100644 --- a/tools/perf/builtin-stat.c +++ b/tools/perf/builtin-stat.c | |||
| @@ -69,7 +69,8 @@ static int run_idx = 0; | |||
| 69 | static int run_count = 1; | 69 | static int run_count = 1; |
| 70 | static int inherit = 1; | 70 | static int inherit = 1; |
| 71 | static int scale = 1; | 71 | static int scale = 1; |
| 72 | static int target_pid = -1; | 72 | static pid_t target_pid = -1; |
| 73 | static pid_t child_pid = -1; | ||
| 73 | static int null_run = 0; | 74 | static int null_run = 0; |
| 74 | 75 | ||
| 75 | static int fd[MAX_NR_CPUS][MAX_COUNTERS]; | 76 | static int fd[MAX_NR_CPUS][MAX_COUNTERS]; |
| @@ -285,6 +286,8 @@ static int run_perf_stat(int argc __used, const char **argv) | |||
| 285 | exit(-1); | 286 | exit(-1); |
| 286 | } | 287 | } |
| 287 | 288 | ||
| 289 | child_pid = pid; | ||
| 290 | |||
| 288 | /* | 291 | /* |
| 289 | * Wait for the child to be ready to exec. | 292 | * Wait for the child to be ready to exec. |
| 290 | */ | 293 | */ |
| @@ -433,6 +436,9 @@ static void skip_signal(int signo) | |||
| 433 | 436 | ||
| 434 | static void sig_atexit(void) | 437 | static void sig_atexit(void) |
| 435 | { | 438 | { |
| 439 | if (child_pid != -1) | ||
| 440 | kill(child_pid, SIGTERM); | ||
| 441 | |||
| 436 | if (signr == -1) | 442 | if (signr == -1) |
| 437 | return; | 443 | return; |
| 438 | 444 | ||
diff --git a/tools/perf/builtin-timechart.c b/tools/perf/builtin-timechart.c index 4405681b3134..702d8fe58fbc 100644 --- a/tools/perf/builtin-timechart.c +++ b/tools/perf/builtin-timechart.c | |||
| @@ -46,6 +46,8 @@ static u64 turbo_frequency; | |||
| 46 | 46 | ||
| 47 | static u64 first_time, last_time; | 47 | static u64 first_time, last_time; |
| 48 | 48 | ||
| 49 | static int power_only; | ||
| 50 | |||
| 49 | 51 | ||
| 50 | static struct perf_header *header; | 52 | static struct perf_header *header; |
| 51 | 53 | ||
| @@ -547,7 +549,7 @@ static void end_sample_processing(void) | |||
| 547 | u64 cpu; | 549 | u64 cpu; |
| 548 | struct power_event *pwr; | 550 | struct power_event *pwr; |
| 549 | 551 | ||
| 550 | for (cpu = 0; cpu < numcpus; cpu++) { | 552 | for (cpu = 0; cpu <= numcpus; cpu++) { |
| 551 | pwr = malloc(sizeof(struct power_event)); | 553 | pwr = malloc(sizeof(struct power_event)); |
| 552 | if (!pwr) | 554 | if (!pwr) |
| 553 | return; | 555 | return; |
| @@ -871,7 +873,7 @@ static int determine_display_tasks(u64 threshold) | |||
| 871 | /* no exit marker, task kept running to the end */ | 873 | /* no exit marker, task kept running to the end */ |
| 872 | if (p->end_time == 0) | 874 | if (p->end_time == 0) |
| 873 | p->end_time = last_time; | 875 | p->end_time = last_time; |
| 874 | if (p->total_time >= threshold) | 876 | if (p->total_time >= threshold && !power_only) |
| 875 | p->display = 1; | 877 | p->display = 1; |
| 876 | 878 | ||
| 877 | c = p->all; | 879 | c = p->all; |
| @@ -882,7 +884,7 @@ static int determine_display_tasks(u64 threshold) | |||
| 882 | if (c->start_time == 1) | 884 | if (c->start_time == 1) |
| 883 | c->start_time = first_time; | 885 | c->start_time = first_time; |
| 884 | 886 | ||
| 885 | if (c->total_time >= threshold) { | 887 | if (c->total_time >= threshold && !power_only) { |
| 886 | c->display = 1; | 888 | c->display = 1; |
| 887 | count++; | 889 | count++; |
| 888 | } | 890 | } |
| @@ -1134,6 +1136,8 @@ static const struct option options[] = { | |||
| 1134 | "output file name"), | 1136 | "output file name"), |
| 1135 | OPT_INTEGER('w', "width", &svg_page_width, | 1137 | OPT_INTEGER('w', "width", &svg_page_width, |
| 1136 | "page width"), | 1138 | "page width"), |
| 1139 | OPT_BOOLEAN('p', "power-only", &power_only, | ||
| 1140 | "output power data only"), | ||
| 1137 | OPT_END() | 1141 | OPT_END() |
| 1138 | }; | 1142 | }; |
| 1139 | 1143 | ||
diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c index 1ca88896eee4..37512e936235 100644 --- a/tools/perf/builtin-top.c +++ b/tools/perf/builtin-top.c | |||
| @@ -782,6 +782,7 @@ static const char *skip_symbols[] = { | |||
| 782 | "exit_idle", | 782 | "exit_idle", |
| 783 | "mwait_idle", | 783 | "mwait_idle", |
| 784 | "mwait_idle_with_hints", | 784 | "mwait_idle_with_hints", |
| 785 | "poll_idle", | ||
| 785 | "ppc64_runlatch_off", | 786 | "ppc64_runlatch_off", |
| 786 | "pseries_dedicated_idle_sleep", | 787 | "pseries_dedicated_idle_sleep", |
| 787 | NULL | 788 | NULL |
diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c index e9d256e2f47d..0c5e4f72f2ba 100644 --- a/tools/perf/builtin-trace.c +++ b/tools/perf/builtin-trace.c | |||
| @@ -219,10 +219,6 @@ remap: | |||
| 219 | more: | 219 | more: |
| 220 | event = (event_t *)(buf + head); | 220 | event = (event_t *)(buf + head); |
| 221 | 221 | ||
| 222 | size = event->header.size; | ||
| 223 | if (!size) | ||
| 224 | size = 8; | ||
| 225 | |||
| 226 | if (head + event->header.size >= page_size * mmap_window) { | 222 | if (head + event->header.size >= page_size * mmap_window) { |
| 227 | unsigned long shift = page_size * (head / page_size); | 223 | unsigned long shift = page_size * (head / page_size); |
| 228 | int res; | 224 | int res; |
| @@ -237,7 +233,6 @@ more: | |||
| 237 | 233 | ||
| 238 | size = event->header.size; | 234 | size = event->header.size; |
| 239 | 235 | ||
| 240 | |||
| 241 | if (!size || process_event(event, offset, head) < 0) { | 236 | if (!size || process_event(event, offset, head) < 0) { |
| 242 | 237 | ||
| 243 | /* | 238 | /* |
| @@ -290,7 +285,6 @@ int cmd_trace(int argc, const char **argv, const char *prefix __used) | |||
| 290 | usage_with_options(annotate_usage, options); | 285 | usage_with_options(annotate_usage, options); |
| 291 | } | 286 | } |
| 292 | 287 | ||
| 293 | |||
| 294 | setup_pager(); | 288 | setup_pager(); |
| 295 | 289 | ||
| 296 | return __cmd_trace(); | 290 | return __cmd_trace(); |
diff --git a/tools/perf/design.txt b/tools/perf/design.txt index f1946d107b10..fdd42a824c98 100644 --- a/tools/perf/design.txt +++ b/tools/perf/design.txt | |||
| @@ -455,3 +455,6 @@ will need at least this: | |||
| 455 | 455 | ||
| 456 | If your architecture does have hardware capabilities, you can override the | 456 | If your architecture does have hardware capabilities, you can override the |
| 457 | weak stub hw_perf_event_init() to register hardware counters. | 457 | weak stub hw_perf_event_init() to register hardware counters. |
| 458 | |||
| 459 | Architectures that have d-cache aliassing issues, such as Sparc and ARM, | ||
| 460 | should select PERF_USE_VMALLOC in order to avoid these for perf mmap(). | ||
diff --git a/tools/perf/util/svghelper.c b/tools/perf/util/svghelper.c index a778fd0f4ae4..856655d8b0b8 100644 --- a/tools/perf/util/svghelper.c +++ b/tools/perf/util/svghelper.c | |||
| @@ -28,7 +28,7 @@ static u64 turbo_frequency, max_freq; | |||
| 28 | 28 | ||
| 29 | int svg_page_width = 1000; | 29 | int svg_page_width = 1000; |
| 30 | 30 | ||
| 31 | #define MIN_TEXT_SIZE 0.001 | 31 | #define MIN_TEXT_SIZE 0.01 |
| 32 | 32 | ||
| 33 | static u64 total_height; | 33 | static u64 total_height; |
| 34 | static FILE *svgfile; | 34 | static FILE *svgfile; |
| @@ -217,6 +217,18 @@ static char *cpu_model(void) | |||
| 217 | } | 217 | } |
| 218 | fclose(file); | 218 | fclose(file); |
| 219 | } | 219 | } |
| 220 | |||
| 221 | /* CPU type */ | ||
| 222 | file = fopen("/sys/devices/system/cpu/cpu0/cpufreq/scaling_available_frequencies", "r"); | ||
| 223 | if (file) { | ||
| 224 | while (fgets(buf, 255, file)) { | ||
| 225 | unsigned int freq; | ||
| 226 | freq = strtoull(buf, NULL, 10); | ||
| 227 | if (freq > max_freq) | ||
| 228 | max_freq = freq; | ||
| 229 | } | ||
| 230 | fclose(file); | ||
| 231 | } | ||
| 220 | return cpu_m; | 232 | return cpu_m; |
| 221 | } | 233 | } |
| 222 | 234 | ||
diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c index 559fb06210f5..47ea0609a760 100644 --- a/tools/perf/util/symbol.c +++ b/tools/perf/util/symbol.c | |||
| @@ -324,8 +324,7 @@ static inline int elf_sym__is_function(const GElf_Sym *sym) | |||
| 324 | { | 324 | { |
| 325 | return elf_sym__type(sym) == STT_FUNC && | 325 | return elf_sym__type(sym) == STT_FUNC && |
| 326 | sym->st_name != 0 && | 326 | sym->st_name != 0 && |
| 327 | sym->st_shndx != SHN_UNDEF && | 327 | sym->st_shndx != SHN_UNDEF; |
| 328 | sym->st_size != 0; | ||
| 329 | } | 328 | } |
| 330 | 329 | ||
| 331 | static inline int elf_sym__is_label(const GElf_Sym *sym) | 330 | static inline int elf_sym__is_label(const GElf_Sym *sym) |
diff --git a/tools/perf/util/trace-event-parse.c b/tools/perf/util/trace-event-parse.c index f6a8437141c8..55b41b9e3834 100644 --- a/tools/perf/util/trace-event-parse.c +++ b/tools/perf/util/trace-event-parse.c | |||
| @@ -1968,10 +1968,11 @@ static const struct flag flags[] = { | |||
| 1968 | { "NET_TX_SOFTIRQ", 2 }, | 1968 | { "NET_TX_SOFTIRQ", 2 }, |
| 1969 | { "NET_RX_SOFTIRQ", 3 }, | 1969 | { "NET_RX_SOFTIRQ", 3 }, |
| 1970 | { "BLOCK_SOFTIRQ", 4 }, | 1970 | { "BLOCK_SOFTIRQ", 4 }, |
| 1971 | { "TASKLET_SOFTIRQ", 5 }, | 1971 | { "BLOCK_IOPOLL_SOFTIRQ", 5 }, |
| 1972 | { "SCHED_SOFTIRQ", 6 }, | 1972 | { "TASKLET_SOFTIRQ", 6 }, |
| 1973 | { "HRTIMER_SOFTIRQ", 7 }, | 1973 | { "SCHED_SOFTIRQ", 7 }, |
| 1974 | { "RCU_SOFTIRQ", 8 }, | 1974 | { "HRTIMER_SOFTIRQ", 8 }, |
| 1975 | { "RCU_SOFTIRQ", 9 }, | ||
| 1975 | 1976 | ||
| 1976 | { "HRTIMER_NORESTART", 0 }, | 1977 | { "HRTIMER_NORESTART", 0 }, |
| 1977 | { "HRTIMER_RESTART", 1 }, | 1978 | { "HRTIMER_RESTART", 1 }, |
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index e79c54034bcd..b7c78a403dc2 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c | |||
| @@ -850,6 +850,19 @@ static void kvm_mmu_notifier_invalidate_page(struct mmu_notifier *mn, | |||
| 850 | 850 | ||
| 851 | } | 851 | } |
| 852 | 852 | ||
| 853 | static void kvm_mmu_notifier_change_pte(struct mmu_notifier *mn, | ||
| 854 | struct mm_struct *mm, | ||
| 855 | unsigned long address, | ||
| 856 | pte_t pte) | ||
| 857 | { | ||
| 858 | struct kvm *kvm = mmu_notifier_to_kvm(mn); | ||
| 859 | |||
| 860 | spin_lock(&kvm->mmu_lock); | ||
| 861 | kvm->mmu_notifier_seq++; | ||
| 862 | kvm_set_spte_hva(kvm, address, pte); | ||
| 863 | spin_unlock(&kvm->mmu_lock); | ||
| 864 | } | ||
| 865 | |||
| 853 | static void kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn, | 866 | static void kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn, |
| 854 | struct mm_struct *mm, | 867 | struct mm_struct *mm, |
| 855 | unsigned long start, | 868 | unsigned long start, |
| @@ -929,6 +942,7 @@ static const struct mmu_notifier_ops kvm_mmu_notifier_ops = { | |||
| 929 | .invalidate_range_start = kvm_mmu_notifier_invalidate_range_start, | 942 | .invalidate_range_start = kvm_mmu_notifier_invalidate_range_start, |
| 930 | .invalidate_range_end = kvm_mmu_notifier_invalidate_range_end, | 943 | .invalidate_range_end = kvm_mmu_notifier_invalidate_range_end, |
| 931 | .clear_flush_young = kvm_mmu_notifier_clear_flush_young, | 944 | .clear_flush_young = kvm_mmu_notifier_clear_flush_young, |
| 945 | .change_pte = kvm_mmu_notifier_change_pte, | ||
| 932 | .release = kvm_mmu_notifier_release, | 946 | .release = kvm_mmu_notifier_release, |
| 933 | }; | 947 | }; |
| 934 | #endif /* CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER */ | 948 | #endif /* CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER */ |
