diff options
181 files changed, 5383 insertions, 2485 deletions
diff --git a/Documentation/cgroups/cgroups.txt b/Documentation/cgroups/cgroups.txt index 455d4e6d346d..0b33bfe7dde9 100644 --- a/Documentation/cgroups/cgroups.txt +++ b/Documentation/cgroups/cgroups.txt | |||
| @@ -227,7 +227,14 @@ as the path relative to the root of the cgroup file system. | |||
| 227 | Each cgroup is represented by a directory in the cgroup file system | 227 | Each cgroup is represented by a directory in the cgroup file system |
| 228 | containing the following files describing that cgroup: | 228 | containing the following files describing that cgroup: |
| 229 | 229 | ||
| 230 | - tasks: list of tasks (by pid) attached to that cgroup | 230 | - tasks: list of tasks (by pid) attached to that cgroup. This list |
| 231 | is not guaranteed to be sorted. Writing a thread id into this file | ||
| 232 | moves the thread into this cgroup. | ||
| 233 | - cgroup.procs: list of tgids in the cgroup. This list is not | ||
| 234 | guaranteed to be sorted or free of duplicate tgids, and userspace | ||
| 235 | should sort/uniquify the list if this property is required. | ||
| 236 | Writing a tgid into this file moves all threads with that tgid into | ||
| 237 | this cgroup. | ||
| 231 | - notify_on_release flag: run the release agent on exit? | 238 | - notify_on_release flag: run the release agent on exit? |
| 232 | - release_agent: the path to use for release notifications (this file | 239 | - release_agent: the path to use for release notifications (this file |
| 233 | exists in the top cgroup only) | 240 | exists in the top cgroup only) |
| @@ -374,7 +381,7 @@ Now you want to do something with this cgroup. | |||
| 374 | 381 | ||
| 375 | In this directory you can find several files: | 382 | In this directory you can find several files: |
| 376 | # ls | 383 | # ls |
| 377 | notify_on_release tasks | 384 | cgroup.procs notify_on_release tasks |
| 378 | (plus whatever files added by the attached subsystems) | 385 | (plus whatever files added by the attached subsystems) |
| 379 | 386 | ||
| 380 | Now attach your shell to this cgroup: | 387 | Now attach your shell to this cgroup: |
diff --git a/Documentation/isdn/INTERFACE.CAPI b/Documentation/isdn/INTERFACE.CAPI index 686e107923ec..5fe8de5cc727 100644 --- a/Documentation/isdn/INTERFACE.CAPI +++ b/Documentation/isdn/INTERFACE.CAPI | |||
| @@ -60,10 +60,9 @@ open() operation on regular files or character devices. | |||
| 60 | 60 | ||
| 61 | After a successful return from register_appl(), CAPI messages from the | 61 | After a successful return from register_appl(), CAPI messages from the |
| 62 | application may be passed to the driver for the device via calls to the | 62 | application may be passed to the driver for the device via calls to the |
| 63 | send_message() callback function. The CAPI message to send is stored in the | 63 | send_message() callback function. Conversely, the driver may call Kernel |
| 64 | data portion of an skb. Conversely, the driver may call Kernel CAPI's | 64 | CAPI's capi_ctr_handle_message() function to pass a received CAPI message to |
| 65 | capi_ctr_handle_message() function to pass a received CAPI message to Kernel | 65 | Kernel CAPI for forwarding to an application, specifying its ApplID. |
| 66 | CAPI for forwarding to an application, specifying its ApplID. | ||
| 67 | 66 | ||
| 68 | Deregistration requests (CAPI operation CAPI_RELEASE) from applications are | 67 | Deregistration requests (CAPI operation CAPI_RELEASE) from applications are |
| 69 | forwarded as calls to the release_appl() callback function, passing the same | 68 | forwarded as calls to the release_appl() callback function, passing the same |
| @@ -142,6 +141,7 @@ u16 (*send_message)(struct capi_ctr *ctrlr, struct sk_buff *skb) | |||
| 142 | to accepting or queueing the message. Errors occurring during the | 141 | to accepting or queueing the message. Errors occurring during the |
| 143 | actual processing of the message should be signaled with an | 142 | actual processing of the message should be signaled with an |
| 144 | appropriate reply message. | 143 | appropriate reply message. |
| 144 | May be called in process or interrupt context. | ||
| 145 | Calls to this function are not serialized by Kernel CAPI, ie. it must | 145 | Calls to this function are not serialized by Kernel CAPI, ie. it must |
| 146 | be prepared to be re-entered. | 146 | be prepared to be re-entered. |
| 147 | 147 | ||
| @@ -154,7 +154,8 @@ read_proc_t *ctr_read_proc | |||
| 154 | system entry, /proc/capi/controllers/<n>; will be called with a | 154 | system entry, /proc/capi/controllers/<n>; will be called with a |
| 155 | pointer to the device's capi_ctr structure as the last (data) argument | 155 | pointer to the device's capi_ctr structure as the last (data) argument |
| 156 | 156 | ||
| 157 | Note: Callback functions are never called in interrupt context. | 157 | Note: Callback functions except send_message() are never called in interrupt |
| 158 | context. | ||
| 158 | 159 | ||
| 159 | - to be filled in before calling capi_ctr_ready(): | 160 | - to be filled in before calling capi_ctr_ready(): |
| 160 | 161 | ||
| @@ -171,14 +172,40 @@ u8 serial[CAPI_SERIAL_LEN] | |||
| 171 | value to return for CAPI_GET_SERIAL | 172 | value to return for CAPI_GET_SERIAL |
| 172 | 173 | ||
| 173 | 174 | ||
| 174 | 4.3 The _cmsg Structure | 175 | 4.3 SKBs |
| 176 | |||
| 177 | CAPI messages are passed between Kernel CAPI and the driver via send_message() | ||
| 178 | and capi_ctr_handle_message(), stored in the data portion of a socket buffer | ||
| 179 | (skb). Each skb contains a single CAPI message coded according to the CAPI 2.0 | ||
| 180 | standard. | ||
| 181 | |||
| 182 | For the data transfer messages, DATA_B3_REQ and DATA_B3_IND, the actual | ||
| 183 | payload data immediately follows the CAPI message itself within the same skb. | ||
| 184 | The Data and Data64 parameters are not used for processing. The Data64 | ||
| 185 | parameter may be omitted by setting the length field of the CAPI message to 22 | ||
| 186 | instead of 30. | ||
| 187 | |||
| 188 | |||
| 189 | 4.4 The _cmsg Structure | ||
| 175 | 190 | ||
| 176 | (declared in <linux/isdn/capiutil.h>) | 191 | (declared in <linux/isdn/capiutil.h>) |
| 177 | 192 | ||
| 178 | The _cmsg structure stores the contents of a CAPI 2.0 message in an easily | 193 | The _cmsg structure stores the contents of a CAPI 2.0 message in an easily |
| 179 | accessible form. It contains members for all possible CAPI 2.0 parameters, of | 194 | accessible form. It contains members for all possible CAPI 2.0 parameters, |
| 180 | which only those appearing in the message type currently being processed are | 195 | including subparameters of the Additional Info and B Protocol structured |
| 181 | actually used. Unused members should be set to zero. | 196 | parameters, with the following exceptions: |
| 197 | |||
| 198 | * second Calling party number (CONNECT_IND) | ||
| 199 | |||
| 200 | * Data64 (DATA_B3_REQ and DATA_B3_IND) | ||
| 201 | |||
| 202 | * Sending complete (subparameter of Additional Info, CONNECT_REQ and INFO_REQ) | ||
| 203 | |||
| 204 | * Global Configuration (subparameter of B Protocol, CONNECT_REQ, CONNECT_RESP | ||
| 205 | and SELECT_B_PROTOCOL_REQ) | ||
| 206 | |||
| 207 | Only those parameters appearing in the message type currently being processed | ||
| 208 | are actually used. Unused members should be set to zero. | ||
| 182 | 209 | ||
| 183 | Members are named after the CAPI 2.0 standard names of the parameters they | 210 | Members are named after the CAPI 2.0 standard names of the parameters they |
| 184 | represent. See <linux/isdn/capiutil.h> for the exact spelling. Member data | 211 | represent. See <linux/isdn/capiutil.h> for the exact spelling. Member data |
| @@ -190,18 +217,19 @@ u16 for CAPI parameters of type 'word' | |||
| 190 | 217 | ||
| 191 | u32 for CAPI parameters of type 'dword' | 218 | u32 for CAPI parameters of type 'dword' |
| 192 | 219 | ||
| 193 | _cstruct for CAPI parameters of type 'struct' not containing any | 220 | _cstruct for CAPI parameters of type 'struct' |
| 194 | variably-sized (struct) subparameters (eg. 'Called Party Number') | ||
| 195 | The member is a pointer to a buffer containing the parameter in | 221 | The member is a pointer to a buffer containing the parameter in |
| 196 | CAPI encoding (length + content). It may also be NULL, which will | 222 | CAPI encoding (length + content). It may also be NULL, which will |
| 197 | be taken to represent an empty (zero length) parameter. | 223 | be taken to represent an empty (zero length) parameter. |
| 224 | Subparameters are stored in encoded form within the content part. | ||
| 198 | 225 | ||
| 199 | _cmstruct for CAPI parameters of type 'struct' containing 'struct' | 226 | _cmstruct alternative representation for CAPI parameters of type 'struct' |
| 200 | subparameters ('Additional Info' and 'B Protocol') | 227 | (used only for the 'Additional Info' and 'B Protocol' parameters) |
| 201 | The representation is a single byte containing one of the values: | 228 | The representation is a single byte containing one of the values: |
| 202 | CAPI_DEFAULT: the parameter is empty | 229 | CAPI_DEFAULT: The parameter is empty/absent. |
| 203 | CAPI_COMPOSE: the values of the subparameters are stored | 230 | CAPI_COMPOSE: The parameter is present. |
| 204 | individually in the corresponding _cmsg structure members | 231 | Subparameter values are stored individually in the corresponding |
| 232 | _cmsg structure members. | ||
| 205 | 233 | ||
| 206 | Functions capi_cmsg2message() and capi_message2cmsg() are provided to convert | 234 | Functions capi_cmsg2message() and capi_message2cmsg() are provided to convert |
| 207 | messages between their transport encoding described in the CAPI 2.0 standard | 235 | messages between their transport encoding described in the CAPI 2.0 standard |
| @@ -297,3 +325,26 @@ char *capi_cmd2str(u8 Command, u8 Subcommand) | |||
| 297 | be NULL if the command/subcommand is not one of those defined in the | 325 | be NULL if the command/subcommand is not one of those defined in the |
| 298 | CAPI 2.0 standard. | 326 | CAPI 2.0 standard. |
| 299 | 327 | ||
| 328 | |||
| 329 | 7. Debugging | ||
| 330 | |||
| 331 | The module kernelcapi has a module parameter showcapimsgs controlling some | ||
| 332 | debugging output produced by the module. It can only be set when the module is | ||
| 333 | loaded, via a parameter "showcapimsgs=<n>" to the modprobe command, either on | ||
| 334 | the command line or in the configuration file. | ||
| 335 | |||
| 336 | If the lowest bit of showcapimsgs is set, kernelcapi logs controller and | ||
| 337 | application up and down events. | ||
| 338 | |||
| 339 | In addition, every registered CAPI controller has an associated traceflag | ||
| 340 | parameter controlling how CAPI messages sent from and to tha controller are | ||
| 341 | logged. The traceflag parameter is initialized with the value of the | ||
| 342 | showcapimsgs parameter when the controller is registered, but can later be | ||
| 343 | changed via the MANUFACTURER_REQ command KCAPI_CMD_TRACE. | ||
| 344 | |||
| 345 | If the value of traceflag is non-zero, CAPI messages are logged. | ||
| 346 | DATA_B3 messages are only logged if the value of traceflag is > 2. | ||
| 347 | |||
| 348 | If the lowest bit of traceflag is set, only the command/subcommand and message | ||
| 349 | length are logged. Otherwise, kernelcapi logs a readable representation of | ||
| 350 | the entire message. | ||
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt index 6fa7292947e5..9107b387e91f 100644 --- a/Documentation/kernel-parameters.txt +++ b/Documentation/kernel-parameters.txt | |||
| @@ -671,6 +671,7 @@ and is between 256 and 4096 characters. It is defined in the file | |||
| 671 | earlyprintk= [X86,SH,BLACKFIN] | 671 | earlyprintk= [X86,SH,BLACKFIN] |
| 672 | earlyprintk=vga | 672 | earlyprintk=vga |
| 673 | earlyprintk=serial[,ttySn[,baudrate]] | 673 | earlyprintk=serial[,ttySn[,baudrate]] |
| 674 | earlyprintk=ttySn[,baudrate] | ||
| 674 | earlyprintk=dbgp[debugController#] | 675 | earlyprintk=dbgp[debugController#] |
| 675 | 676 | ||
| 676 | Append ",keep" to not disable it when the real console | 677 | Append ",keep" to not disable it when the real console |
diff --git a/Documentation/networking/pktgen.txt b/Documentation/networking/pktgen.txt index c6cf4a3c16e0..61bb645d50e0 100644 --- a/Documentation/networking/pktgen.txt +++ b/Documentation/networking/pktgen.txt | |||
| @@ -90,6 +90,11 @@ Examples: | |||
| 90 | pgset "dstmac 00:00:00:00:00:00" sets MAC destination address | 90 | pgset "dstmac 00:00:00:00:00:00" sets MAC destination address |
| 91 | pgset "srcmac 00:00:00:00:00:00" sets MAC source address | 91 | pgset "srcmac 00:00:00:00:00:00" sets MAC source address |
| 92 | 92 | ||
| 93 | pgset "queue_map_min 0" Sets the min value of tx queue interval | ||
| 94 | pgset "queue_map_max 7" Sets the max value of tx queue interval, for multiqueue devices | ||
| 95 | To select queue 1 of a given device, | ||
| 96 | use queue_map_min=1 and queue_map_max=1 | ||
| 97 | |||
| 93 | pgset "src_mac_count 1" Sets the number of MACs we'll range through. | 98 | pgset "src_mac_count 1" Sets the number of MACs we'll range through. |
| 94 | The 'minimum' MAC is what you set with srcmac. | 99 | The 'minimum' MAC is what you set with srcmac. |
| 95 | 100 | ||
| @@ -101,6 +106,9 @@ Examples: | |||
| 101 | IPDST_RND, UDPSRC_RND, | 106 | IPDST_RND, UDPSRC_RND, |
| 102 | UDPDST_RND, MACSRC_RND, MACDST_RND | 107 | UDPDST_RND, MACSRC_RND, MACDST_RND |
| 103 | MPLS_RND, VID_RND, SVID_RND | 108 | MPLS_RND, VID_RND, SVID_RND |
| 109 | QUEUE_MAP_RND # queue map random | ||
| 110 | QUEUE_MAP_CPU # queue map mirrors smp_processor_id() | ||
| 111 | |||
| 104 | 112 | ||
| 105 | pgset "udp_src_min 9" set UDP source port min, If < udp_src_max, then | 113 | pgset "udp_src_min 9" set UDP source port min, If < udp_src_max, then |
| 106 | cycle through the port range. | 114 | cycle through the port range. |
diff --git a/Documentation/vm/ksm.txt b/Documentation/vm/ksm.txt index 72a22f65960e..262d8e6793a3 100644 --- a/Documentation/vm/ksm.txt +++ b/Documentation/vm/ksm.txt | |||
| @@ -52,15 +52,15 @@ The KSM daemon is controlled by sysfs files in /sys/kernel/mm/ksm/, | |||
| 52 | readable by all but writable only by root: | 52 | readable by all but writable only by root: |
| 53 | 53 | ||
| 54 | max_kernel_pages - set to maximum number of kernel pages that KSM may use | 54 | max_kernel_pages - set to maximum number of kernel pages that KSM may use |
| 55 | e.g. "echo 2000 > /sys/kernel/mm/ksm/max_kernel_pages" | 55 | e.g. "echo 100000 > /sys/kernel/mm/ksm/max_kernel_pages" |
| 56 | Value 0 imposes no limit on the kernel pages KSM may use; | 56 | Value 0 imposes no limit on the kernel pages KSM may use; |
| 57 | but note that any process using MADV_MERGEABLE can cause | 57 | but note that any process using MADV_MERGEABLE can cause |
| 58 | KSM to allocate these pages, unswappable until it exits. | 58 | KSM to allocate these pages, unswappable until it exits. |
| 59 | Default: 2000 (chosen for demonstration purposes) | 59 | Default: quarter of memory (chosen to not pin too much) |
| 60 | 60 | ||
| 61 | pages_to_scan - how many present pages to scan before ksmd goes to sleep | 61 | pages_to_scan - how many present pages to scan before ksmd goes to sleep |
| 62 | e.g. "echo 200 > /sys/kernel/mm/ksm/pages_to_scan" | 62 | e.g. "echo 100 > /sys/kernel/mm/ksm/pages_to_scan" |
| 63 | Default: 200 (chosen for demonstration purposes) | 63 | Default: 100 (chosen for demonstration purposes) |
| 64 | 64 | ||
| 65 | sleep_millisecs - how many milliseconds ksmd should sleep before next scan | 65 | sleep_millisecs - how many milliseconds ksmd should sleep before next scan |
| 66 | e.g. "echo 20 > /sys/kernel/mm/ksm/sleep_millisecs" | 66 | e.g. "echo 20 > /sys/kernel/mm/ksm/sleep_millisecs" |
| @@ -70,7 +70,8 @@ run - set 0 to stop ksmd from running but keep merged pages, | |||
| 70 | set 1 to run ksmd e.g. "echo 1 > /sys/kernel/mm/ksm/run", | 70 | set 1 to run ksmd e.g. "echo 1 > /sys/kernel/mm/ksm/run", |
| 71 | set 2 to stop ksmd and unmerge all pages currently merged, | 71 | set 2 to stop ksmd and unmerge all pages currently merged, |
| 72 | but leave mergeable areas registered for next run | 72 | but leave mergeable areas registered for next run |
| 73 | Default: 1 (for immediate use by apps which register) | 73 | Default: 0 (must be changed to 1 to activate KSM, |
| 74 | except if CONFIG_SYSFS is disabled) | ||
| 74 | 75 | ||
| 75 | The effectiveness of KSM and MADV_MERGEABLE is shown in /sys/kernel/mm/ksm/: | 76 | The effectiveness of KSM and MADV_MERGEABLE is shown in /sys/kernel/mm/ksm/: |
| 76 | 77 | ||
| @@ -86,4 +87,4 @@ pages_volatile embraces several different kinds of activity, but a high | |||
| 86 | proportion there would also indicate poor use of madvise MADV_MERGEABLE. | 87 | proportion there would also indicate poor use of madvise MADV_MERGEABLE. |
| 87 | 88 | ||
| 88 | Izik Eidus, | 89 | Izik Eidus, |
| 89 | Hugh Dickins, 30 July 2009 | 90 | Hugh Dickins, 24 Sept 2009 |
diff --git a/Documentation/vm/page-types.c b/Documentation/vm/page-types.c index fa1a30d9e9d5..3ec4f2a22585 100644 --- a/Documentation/vm/page-types.c +++ b/Documentation/vm/page-types.c | |||
| @@ -2,7 +2,10 @@ | |||
| 2 | * page-types: Tool for querying page flags | 2 | * page-types: Tool for querying page flags |
| 3 | * | 3 | * |
| 4 | * Copyright (C) 2009 Intel corporation | 4 | * Copyright (C) 2009 Intel corporation |
| 5 | * Copyright (C) 2009 Wu Fengguang <fengguang.wu@intel.com> | 5 | * |
| 6 | * Authors: Wu Fengguang <fengguang.wu@intel.com> | ||
| 7 | * | ||
| 8 | * Released under the General Public License (GPL). | ||
| 6 | */ | 9 | */ |
| 7 | 10 | ||
| 8 | #define _LARGEFILE64_SOURCE | 11 | #define _LARGEFILE64_SOURCE |
| @@ -69,7 +72,9 @@ | |||
| 69 | #define KPF_COMPOUND_TAIL 16 | 72 | #define KPF_COMPOUND_TAIL 16 |
| 70 | #define KPF_HUGE 17 | 73 | #define KPF_HUGE 17 |
| 71 | #define KPF_UNEVICTABLE 18 | 74 | #define KPF_UNEVICTABLE 18 |
| 75 | #define KPF_HWPOISON 19 | ||
| 72 | #define KPF_NOPAGE 20 | 76 | #define KPF_NOPAGE 20 |
| 77 | #define KPF_KSM 21 | ||
| 73 | 78 | ||
| 74 | /* [32-] kernel hacking assistances */ | 79 | /* [32-] kernel hacking assistances */ |
| 75 | #define KPF_RESERVED 32 | 80 | #define KPF_RESERVED 32 |
| @@ -116,7 +121,9 @@ static char *page_flag_names[] = { | |||
| 116 | [KPF_COMPOUND_TAIL] = "T:compound_tail", | 121 | [KPF_COMPOUND_TAIL] = "T:compound_tail", |
| 117 | [KPF_HUGE] = "G:huge", | 122 | [KPF_HUGE] = "G:huge", |
| 118 | [KPF_UNEVICTABLE] = "u:unevictable", | 123 | [KPF_UNEVICTABLE] = "u:unevictable", |
| 124 | [KPF_HWPOISON] = "X:hwpoison", | ||
| 119 | [KPF_NOPAGE] = "n:nopage", | 125 | [KPF_NOPAGE] = "n:nopage", |
| 126 | [KPF_KSM] = "x:ksm", | ||
| 120 | 127 | ||
| 121 | [KPF_RESERVED] = "r:reserved", | 128 | [KPF_RESERVED] = "r:reserved", |
| 122 | [KPF_MLOCKED] = "m:mlocked", | 129 | [KPF_MLOCKED] = "m:mlocked", |
| @@ -152,9 +159,6 @@ static unsigned long opt_size[MAX_ADDR_RANGES]; | |||
| 152 | static int nr_vmas; | 159 | static int nr_vmas; |
| 153 | static unsigned long pg_start[MAX_VMAS]; | 160 | static unsigned long pg_start[MAX_VMAS]; |
| 154 | static unsigned long pg_end[MAX_VMAS]; | 161 | static unsigned long pg_end[MAX_VMAS]; |
| 155 | static unsigned long voffset; | ||
| 156 | |||
| 157 | static int pagemap_fd; | ||
| 158 | 162 | ||
| 159 | #define MAX_BIT_FILTERS 64 | 163 | #define MAX_BIT_FILTERS 64 |
| 160 | static int nr_bit_filters; | 164 | static int nr_bit_filters; |
| @@ -163,9 +167,16 @@ static uint64_t opt_bits[MAX_BIT_FILTERS]; | |||
| 163 | 167 | ||
| 164 | static int page_size; | 168 | static int page_size; |
| 165 | 169 | ||
| 166 | #define PAGES_BATCH (64 << 10) /* 64k pages */ | 170 | static int pagemap_fd; |
| 167 | static int kpageflags_fd; | 171 | static int kpageflags_fd; |
| 168 | 172 | ||
| 173 | static int opt_hwpoison; | ||
| 174 | static int opt_unpoison; | ||
| 175 | |||
| 176 | static char *hwpoison_debug_fs = "/debug/hwpoison"; | ||
| 177 | static int hwpoison_inject_fd; | ||
| 178 | static int hwpoison_forget_fd; | ||
| 179 | |||
| 169 | #define HASH_SHIFT 13 | 180 | #define HASH_SHIFT 13 |
| 170 | #define HASH_SIZE (1 << HASH_SHIFT) | 181 | #define HASH_SIZE (1 << HASH_SHIFT) |
| 171 | #define HASH_MASK (HASH_SIZE - 1) | 182 | #define HASH_MASK (HASH_SIZE - 1) |
| @@ -207,6 +218,74 @@ static void fatal(const char *x, ...) | |||
| 207 | exit(EXIT_FAILURE); | 218 | exit(EXIT_FAILURE); |
| 208 | } | 219 | } |
| 209 | 220 | ||
| 221 | int checked_open(const char *pathname, int flags) | ||
| 222 | { | ||
| 223 | int fd = open(pathname, flags); | ||
| 224 | |||
| 225 | if (fd < 0) { | ||
| 226 | perror(pathname); | ||
| 227 | exit(EXIT_FAILURE); | ||
| 228 | } | ||
| 229 | |||
| 230 | return fd; | ||
| 231 | } | ||
| 232 | |||
| 233 | /* | ||
| 234 | * pagemap/kpageflags routines | ||
| 235 | */ | ||
| 236 | |||
| 237 | static unsigned long do_u64_read(int fd, char *name, | ||
| 238 | uint64_t *buf, | ||
| 239 | unsigned long index, | ||
| 240 | unsigned long count) | ||
| 241 | { | ||
| 242 | long bytes; | ||
| 243 | |||
| 244 | if (index > ULONG_MAX / 8) | ||
| 245 | fatal("index overflow: %lu\n", index); | ||
| 246 | |||
| 247 | if (lseek(fd, index * 8, SEEK_SET) < 0) { | ||
| 248 | perror(name); | ||
| 249 | exit(EXIT_FAILURE); | ||
| 250 | } | ||
| 251 | |||
| 252 | bytes = read(fd, buf, count * 8); | ||
| 253 | if (bytes < 0) { | ||
| 254 | perror(name); | ||
| 255 | exit(EXIT_FAILURE); | ||
| 256 | } | ||
| 257 | if (bytes % 8) | ||
| 258 | fatal("partial read: %lu bytes\n", bytes); | ||
| 259 | |||
| 260 | return bytes / 8; | ||
| 261 | } | ||
| 262 | |||
| 263 | static unsigned long kpageflags_read(uint64_t *buf, | ||
| 264 | unsigned long index, | ||
| 265 | unsigned long pages) | ||
| 266 | { | ||
| 267 | return do_u64_read(kpageflags_fd, PROC_KPAGEFLAGS, buf, index, pages); | ||
| 268 | } | ||
| 269 | |||
| 270 | static unsigned long pagemap_read(uint64_t *buf, | ||
| 271 | unsigned long index, | ||
| 272 | unsigned long pages) | ||
| 273 | { | ||
| 274 | return do_u64_read(pagemap_fd, "/proc/pid/pagemap", buf, index, pages); | ||
| 275 | } | ||
| 276 | |||
| 277 | static unsigned long pagemap_pfn(uint64_t val) | ||
| 278 | { | ||
| 279 | unsigned long pfn; | ||
| 280 | |||
| 281 | if (val & PM_PRESENT) | ||
| 282 | pfn = PM_PFRAME(val); | ||
| 283 | else | ||
| 284 | pfn = 0; | ||
| 285 | |||
| 286 | return pfn; | ||
| 287 | } | ||
| 288 | |||
| 210 | 289 | ||
| 211 | /* | 290 | /* |
| 212 | * page flag names | 291 | * page flag names |
| @@ -255,7 +334,8 @@ static char *page_flag_longname(uint64_t flags) | |||
| 255 | * page list and summary | 334 | * page list and summary |
| 256 | */ | 335 | */ |
| 257 | 336 | ||
| 258 | static void show_page_range(unsigned long offset, uint64_t flags) | 337 | static void show_page_range(unsigned long voffset, |
| 338 | unsigned long offset, uint64_t flags) | ||
| 259 | { | 339 | { |
| 260 | static uint64_t flags0; | 340 | static uint64_t flags0; |
| 261 | static unsigned long voff; | 341 | static unsigned long voff; |
| @@ -281,7 +361,8 @@ static void show_page_range(unsigned long offset, uint64_t flags) | |||
| 281 | count = 1; | 361 | count = 1; |
| 282 | } | 362 | } |
| 283 | 363 | ||
| 284 | static void show_page(unsigned long offset, uint64_t flags) | 364 | static void show_page(unsigned long voffset, |
| 365 | unsigned long offset, uint64_t flags) | ||
| 285 | { | 366 | { |
| 286 | if (opt_pid) | 367 | if (opt_pid) |
| 287 | printf("%lx\t", voffset); | 368 | printf("%lx\t", voffset); |
| @@ -362,6 +443,62 @@ static uint64_t well_known_flags(uint64_t flags) | |||
| 362 | return flags; | 443 | return flags; |
| 363 | } | 444 | } |
| 364 | 445 | ||
| 446 | static uint64_t kpageflags_flags(uint64_t flags) | ||
| 447 | { | ||
| 448 | flags = expand_overloaded_flags(flags); | ||
| 449 | |||
| 450 | if (!opt_raw) | ||
| 451 | flags = well_known_flags(flags); | ||
| 452 | |||
| 453 | return flags; | ||
| 454 | } | ||
| 455 | |||
| 456 | /* | ||
| 457 | * page actions | ||
| 458 | */ | ||
| 459 | |||
| 460 | static void prepare_hwpoison_fd(void) | ||
| 461 | { | ||
| 462 | char buf[100]; | ||
| 463 | |||
| 464 | if (opt_hwpoison && !hwpoison_inject_fd) { | ||
| 465 | sprintf(buf, "%s/corrupt-pfn", hwpoison_debug_fs); | ||
| 466 | hwpoison_inject_fd = checked_open(buf, O_WRONLY); | ||
| 467 | } | ||
| 468 | |||
| 469 | if (opt_unpoison && !hwpoison_forget_fd) { | ||
| 470 | sprintf(buf, "%s/renew-pfn", hwpoison_debug_fs); | ||
| 471 | hwpoison_forget_fd = checked_open(buf, O_WRONLY); | ||
| 472 | } | ||
| 473 | } | ||
| 474 | |||
| 475 | static int hwpoison_page(unsigned long offset) | ||
| 476 | { | ||
| 477 | char buf[100]; | ||
| 478 | int len; | ||
| 479 | |||
| 480 | len = sprintf(buf, "0x%lx\n", offset); | ||
| 481 | len = write(hwpoison_inject_fd, buf, len); | ||
| 482 | if (len < 0) { | ||
| 483 | perror("hwpoison inject"); | ||
| 484 | return len; | ||
| 485 | } | ||
| 486 | return 0; | ||
| 487 | } | ||
| 488 | |||
| 489 | static int unpoison_page(unsigned long offset) | ||
| 490 | { | ||
| 491 | char buf[100]; | ||
| 492 | int len; | ||
| 493 | |||
| 494 | len = sprintf(buf, "0x%lx\n", offset); | ||
| 495 | len = write(hwpoison_forget_fd, buf, len); | ||
| 496 | if (len < 0) { | ||
| 497 | perror("hwpoison forget"); | ||
| 498 | return len; | ||
| 499 | } | ||
| 500 | return 0; | ||
| 501 | } | ||
| 365 | 502 | ||
| 366 | /* | 503 | /* |
| 367 | * page frame walker | 504 | * page frame walker |
| @@ -394,104 +531,83 @@ static int hash_slot(uint64_t flags) | |||
| 394 | exit(EXIT_FAILURE); | 531 | exit(EXIT_FAILURE); |
| 395 | } | 532 | } |
| 396 | 533 | ||
| 397 | static void add_page(unsigned long offset, uint64_t flags) | 534 | static void add_page(unsigned long voffset, |
| 535 | unsigned long offset, uint64_t flags) | ||
| 398 | { | 536 | { |
| 399 | flags = expand_overloaded_flags(flags); | 537 | flags = kpageflags_flags(flags); |
| 400 | |||
| 401 | if (!opt_raw) | ||
| 402 | flags = well_known_flags(flags); | ||
| 403 | 538 | ||
| 404 | if (!bit_mask_ok(flags)) | 539 | if (!bit_mask_ok(flags)) |
| 405 | return; | 540 | return; |
| 406 | 541 | ||
| 542 | if (opt_hwpoison) | ||
| 543 | hwpoison_page(offset); | ||
| 544 | if (opt_unpoison) | ||
| 545 | unpoison_page(offset); | ||
| 546 | |||
| 407 | if (opt_list == 1) | 547 | if (opt_list == 1) |
| 408 | show_page_range(offset, flags); | 548 | show_page_range(voffset, offset, flags); |
| 409 | else if (opt_list == 2) | 549 | else if (opt_list == 2) |
| 410 | show_page(offset, flags); | 550 | show_page(voffset, offset, flags); |
| 411 | 551 | ||
| 412 | nr_pages[hash_slot(flags)]++; | 552 | nr_pages[hash_slot(flags)]++; |
| 413 | total_pages++; | 553 | total_pages++; |
| 414 | } | 554 | } |
| 415 | 555 | ||
| 416 | static void walk_pfn(unsigned long index, unsigned long count) | 556 | #define KPAGEFLAGS_BATCH (64 << 10) /* 64k pages */ |
| 557 | static void walk_pfn(unsigned long voffset, | ||
| 558 | unsigned long index, | ||
| 559 | unsigned long count) | ||
| 417 | { | 560 | { |
| 561 | uint64_t buf[KPAGEFLAGS_BATCH]; | ||
| 418 | unsigned long batch; | 562 | unsigned long batch; |
| 419 | unsigned long n; | 563 | unsigned long pages; |
| 420 | unsigned long i; | 564 | unsigned long i; |
| 421 | 565 | ||
| 422 | if (index > ULONG_MAX / KPF_BYTES) | ||
| 423 | fatal("index overflow: %lu\n", index); | ||
| 424 | |||
| 425 | lseek(kpageflags_fd, index * KPF_BYTES, SEEK_SET); | ||
| 426 | |||
| 427 | while (count) { | 566 | while (count) { |
| 428 | uint64_t kpageflags_buf[KPF_BYTES * PAGES_BATCH]; | 567 | batch = min_t(unsigned long, count, KPAGEFLAGS_BATCH); |
| 429 | 568 | pages = kpageflags_read(buf, index, batch); | |
| 430 | batch = min_t(unsigned long, count, PAGES_BATCH); | 569 | if (pages == 0) |
| 431 | n = read(kpageflags_fd, kpageflags_buf, batch * KPF_BYTES); | ||
| 432 | if (n == 0) | ||
| 433 | break; | 570 | break; |
| 434 | if (n < 0) { | ||
| 435 | perror(PROC_KPAGEFLAGS); | ||
| 436 | exit(EXIT_FAILURE); | ||
| 437 | } | ||
| 438 | 571 | ||
| 439 | if (n % KPF_BYTES != 0) | 572 | for (i = 0; i < pages; i++) |
| 440 | fatal("partial read: %lu bytes\n", n); | 573 | add_page(voffset + i, index + i, buf[i]); |
| 441 | n = n / KPF_BYTES; | ||
| 442 | 574 | ||
| 443 | for (i = 0; i < n; i++) | 575 | index += pages; |
| 444 | add_page(index + i, kpageflags_buf[i]); | 576 | count -= pages; |
| 445 | |||
| 446 | index += batch; | ||
| 447 | count -= batch; | ||
| 448 | } | 577 | } |
| 449 | } | 578 | } |
| 450 | 579 | ||
| 451 | 580 | #define PAGEMAP_BATCH (64 << 10) | |
| 452 | #define PAGEMAP_BATCH 4096 | 581 | static void walk_vma(unsigned long index, unsigned long count) |
| 453 | static unsigned long task_pfn(unsigned long pgoff) | ||
| 454 | { | 582 | { |
| 455 | static uint64_t buf[PAGEMAP_BATCH]; | 583 | uint64_t buf[PAGEMAP_BATCH]; |
| 456 | static unsigned long start; | 584 | unsigned long batch; |
| 457 | static long count; | 585 | unsigned long pages; |
| 458 | uint64_t pfn; | 586 | unsigned long pfn; |
| 587 | unsigned long i; | ||
| 459 | 588 | ||
| 460 | if (pgoff < start || pgoff >= start + count) { | 589 | while (count) { |
| 461 | if (lseek64(pagemap_fd, | 590 | batch = min_t(unsigned long, count, PAGEMAP_BATCH); |
| 462 | (uint64_t)pgoff * PM_ENTRY_BYTES, | 591 | pages = pagemap_read(buf, index, batch); |
| 463 | SEEK_SET) < 0) { | 592 | if (pages == 0) |
| 464 | perror("pagemap seek"); | 593 | break; |
| 465 | exit(EXIT_FAILURE); | ||
| 466 | } | ||
| 467 | count = read(pagemap_fd, buf, sizeof(buf)); | ||
| 468 | if (count == 0) | ||
| 469 | return 0; | ||
| 470 | if (count < 0) { | ||
| 471 | perror("pagemap read"); | ||
| 472 | exit(EXIT_FAILURE); | ||
| 473 | } | ||
| 474 | if (count % PM_ENTRY_BYTES) { | ||
| 475 | fatal("pagemap read not aligned.\n"); | ||
| 476 | exit(EXIT_FAILURE); | ||
| 477 | } | ||
| 478 | count /= PM_ENTRY_BYTES; | ||
| 479 | start = pgoff; | ||
| 480 | } | ||
| 481 | 594 | ||
| 482 | pfn = buf[pgoff - start]; | 595 | for (i = 0; i < pages; i++) { |
| 483 | if (pfn & PM_PRESENT) | 596 | pfn = pagemap_pfn(buf[i]); |
| 484 | pfn = PM_PFRAME(pfn); | 597 | if (pfn) |
| 485 | else | 598 | walk_pfn(index + i, pfn, 1); |
| 486 | pfn = 0; | 599 | } |
| 487 | 600 | ||
| 488 | return pfn; | 601 | index += pages; |
| 602 | count -= pages; | ||
| 603 | } | ||
| 489 | } | 604 | } |
| 490 | 605 | ||
| 491 | static void walk_task(unsigned long index, unsigned long count) | 606 | static void walk_task(unsigned long index, unsigned long count) |
| 492 | { | 607 | { |
| 493 | int i = 0; | ||
| 494 | const unsigned long end = index + count; | 608 | const unsigned long end = index + count; |
| 609 | unsigned long start; | ||
| 610 | int i = 0; | ||
| 495 | 611 | ||
| 496 | while (index < end) { | 612 | while (index < end) { |
| 497 | 613 | ||
| @@ -501,15 +617,11 @@ static void walk_task(unsigned long index, unsigned long count) | |||
| 501 | if (pg_start[i] >= end) | 617 | if (pg_start[i] >= end) |
| 502 | return; | 618 | return; |
| 503 | 619 | ||
| 504 | voffset = max_t(unsigned long, pg_start[i], index); | 620 | start = max_t(unsigned long, pg_start[i], index); |
| 505 | index = min_t(unsigned long, pg_end[i], end); | 621 | index = min_t(unsigned long, pg_end[i], end); |
| 506 | 622 | ||
| 507 | assert(voffset < index); | 623 | assert(start < index); |
| 508 | for (; voffset < index; voffset++) { | 624 | walk_vma(start, index - start); |
| 509 | unsigned long pfn = task_pfn(voffset); | ||
| 510 | if (pfn) | ||
| 511 | walk_pfn(pfn, 1); | ||
| 512 | } | ||
| 513 | } | 625 | } |
| 514 | } | 626 | } |
| 515 | 627 | ||
| @@ -527,18 +639,14 @@ static void walk_addr_ranges(void) | |||
| 527 | { | 639 | { |
| 528 | int i; | 640 | int i; |
| 529 | 641 | ||
| 530 | kpageflags_fd = open(PROC_KPAGEFLAGS, O_RDONLY); | 642 | kpageflags_fd = checked_open(PROC_KPAGEFLAGS, O_RDONLY); |
| 531 | if (kpageflags_fd < 0) { | ||
| 532 | perror(PROC_KPAGEFLAGS); | ||
| 533 | exit(EXIT_FAILURE); | ||
| 534 | } | ||
| 535 | 643 | ||
| 536 | if (!nr_addr_ranges) | 644 | if (!nr_addr_ranges) |
| 537 | add_addr_range(0, ULONG_MAX); | 645 | add_addr_range(0, ULONG_MAX); |
| 538 | 646 | ||
| 539 | for (i = 0; i < nr_addr_ranges; i++) | 647 | for (i = 0; i < nr_addr_ranges; i++) |
| 540 | if (!opt_pid) | 648 | if (!opt_pid) |
| 541 | walk_pfn(opt_offset[i], opt_size[i]); | 649 | walk_pfn(0, opt_offset[i], opt_size[i]); |
| 542 | else | 650 | else |
| 543 | walk_task(opt_offset[i], opt_size[i]); | 651 | walk_task(opt_offset[i], opt_size[i]); |
| 544 | 652 | ||
| @@ -575,6 +683,8 @@ static void usage(void) | |||
| 575 | " -l|--list Show page details in ranges\n" | 683 | " -l|--list Show page details in ranges\n" |
| 576 | " -L|--list-each Show page details one by one\n" | 684 | " -L|--list-each Show page details one by one\n" |
| 577 | " -N|--no-summary Don't show summay info\n" | 685 | " -N|--no-summary Don't show summay info\n" |
| 686 | " -X|--hwpoison hwpoison pages\n" | ||
| 687 | " -x|--unpoison unpoison pages\n" | ||
| 578 | " -h|--help Show this usage message\n" | 688 | " -h|--help Show this usage message\n" |
| 579 | "addr-spec:\n" | 689 | "addr-spec:\n" |
| 580 | " N one page at offset N (unit: pages)\n" | 690 | " N one page at offset N (unit: pages)\n" |
| @@ -624,11 +734,7 @@ static void parse_pid(const char *str) | |||
| 624 | opt_pid = parse_number(str); | 734 | opt_pid = parse_number(str); |
| 625 | 735 | ||
| 626 | sprintf(buf, "/proc/%d/pagemap", opt_pid); | 736 | sprintf(buf, "/proc/%d/pagemap", opt_pid); |
| 627 | pagemap_fd = open(buf, O_RDONLY); | 737 | pagemap_fd = checked_open(buf, O_RDONLY); |
| 628 | if (pagemap_fd < 0) { | ||
| 629 | perror(buf); | ||
| 630 | exit(EXIT_FAILURE); | ||
| 631 | } | ||
| 632 | 738 | ||
| 633 | sprintf(buf, "/proc/%d/maps", opt_pid); | 739 | sprintf(buf, "/proc/%d/maps", opt_pid); |
| 634 | file = fopen(buf, "r"); | 740 | file = fopen(buf, "r"); |
| @@ -788,6 +894,8 @@ static struct option opts[] = { | |||
| 788 | { "list" , 0, NULL, 'l' }, | 894 | { "list" , 0, NULL, 'l' }, |
| 789 | { "list-each" , 0, NULL, 'L' }, | 895 | { "list-each" , 0, NULL, 'L' }, |
| 790 | { "no-summary", 0, NULL, 'N' }, | 896 | { "no-summary", 0, NULL, 'N' }, |
| 897 | { "hwpoison" , 0, NULL, 'X' }, | ||
| 898 | { "unpoison" , 0, NULL, 'x' }, | ||
| 791 | { "help" , 0, NULL, 'h' }, | 899 | { "help" , 0, NULL, 'h' }, |
| 792 | { NULL , 0, NULL, 0 } | 900 | { NULL , 0, NULL, 0 } |
| 793 | }; | 901 | }; |
| @@ -799,7 +907,7 @@ int main(int argc, char *argv[]) | |||
| 799 | page_size = getpagesize(); | 907 | page_size = getpagesize(); |
| 800 | 908 | ||
| 801 | while ((c = getopt_long(argc, argv, | 909 | while ((c = getopt_long(argc, argv, |
| 802 | "rp:f:a:b:lLNh", opts, NULL)) != -1) { | 910 | "rp:f:a:b:lLNXxh", opts, NULL)) != -1) { |
| 803 | switch (c) { | 911 | switch (c) { |
| 804 | case 'r': | 912 | case 'r': |
| 805 | opt_raw = 1; | 913 | opt_raw = 1; |
| @@ -825,6 +933,14 @@ int main(int argc, char *argv[]) | |||
| 825 | case 'N': | 933 | case 'N': |
| 826 | opt_no_summary = 1; | 934 | opt_no_summary = 1; |
| 827 | break; | 935 | break; |
| 936 | case 'X': | ||
| 937 | opt_hwpoison = 1; | ||
| 938 | prepare_hwpoison_fd(); | ||
| 939 | break; | ||
| 940 | case 'x': | ||
| 941 | opt_unpoison = 1; | ||
| 942 | prepare_hwpoison_fd(); | ||
| 943 | break; | ||
| 828 | case 'h': | 944 | case 'h': |
| 829 | usage(); | 945 | usage(); |
| 830 | exit(0); | 946 | exit(0); |
| @@ -844,7 +960,7 @@ int main(int argc, char *argv[]) | |||
| 844 | walk_addr_ranges(); | 960 | walk_addr_ranges(); |
| 845 | 961 | ||
| 846 | if (opt_list == 1) | 962 | if (opt_list == 1) |
| 847 | show_page_range(0, 0); /* drain the buffer */ | 963 | show_page_range(0, 0, 0); /* drain the buffer */ |
| 848 | 964 | ||
| 849 | if (opt_no_summary) | 965 | if (opt_no_summary) |
| 850 | return 0; | 966 | return 0; |
diff --git a/Documentation/vm/pagemap.txt b/Documentation/vm/pagemap.txt index 600a304a828c..df09b9650a81 100644 --- a/Documentation/vm/pagemap.txt +++ b/Documentation/vm/pagemap.txt | |||
| @@ -57,7 +57,9 @@ There are three components to pagemap: | |||
| 57 | 16. COMPOUND_TAIL | 57 | 16. COMPOUND_TAIL |
| 58 | 16. HUGE | 58 | 16. HUGE |
| 59 | 18. UNEVICTABLE | 59 | 18. UNEVICTABLE |
| 60 | 19. HWPOISON | ||
| 60 | 20. NOPAGE | 61 | 20. NOPAGE |
| 62 | 21. KSM | ||
| 61 | 63 | ||
| 62 | Short descriptions to the page flags: | 64 | Short descriptions to the page flags: |
| 63 | 65 | ||
| @@ -86,9 +88,15 @@ Short descriptions to the page flags: | |||
| 86 | 17. HUGE | 88 | 17. HUGE |
| 87 | this is an integral part of a HugeTLB page | 89 | this is an integral part of a HugeTLB page |
| 88 | 90 | ||
| 91 | 19. HWPOISON | ||
| 92 | hardware detected memory corruption on this page: don't touch the data! | ||
| 93 | |||
| 89 | 20. NOPAGE | 94 | 20. NOPAGE |
| 90 | no page frame exists at the requested address | 95 | no page frame exists at the requested address |
| 91 | 96 | ||
| 97 | 21. KSM | ||
| 98 | identical memory pages dynamically shared between one or more processes | ||
| 99 | |||
| 92 | [IO related page flags] | 100 | [IO related page flags] |
| 93 | 1. ERROR IO error occurred | 101 | 1. ERROR IO error occurred |
| 94 | 3. UPTODATE page has up-to-date data | 102 | 3. UPTODATE page has up-to-date data |
diff --git a/MAINTAINERS b/MAINTAINERS index 09a2028bab7f..e1da925b38c8 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
| @@ -3643,6 +3643,13 @@ F: Documentation/blockdev/nbd.txt | |||
| 3643 | F: drivers/block/nbd.c | 3643 | F: drivers/block/nbd.c |
| 3644 | F: include/linux/nbd.h | 3644 | F: include/linux/nbd.h |
| 3645 | 3645 | ||
| 3646 | NETWORK DROP MONITOR | ||
| 3647 | M: Neil Horman <nhorman@tuxdriver.com> | ||
| 3648 | L: netdev@vger.kernel.org | ||
| 3649 | S: Maintained | ||
| 3650 | W: https://fedorahosted.org/dropwatch/ | ||
| 3651 | F: net/core/drop_monitor.c | ||
| 3652 | |||
| 3646 | NETWORKING [GENERAL] | 3653 | NETWORKING [GENERAL] |
| 3647 | M: "David S. Miller" <davem@davemloft.net> | 3654 | M: "David S. Miller" <davem@davemloft.net> |
| 3648 | L: netdev@vger.kernel.org | 3655 | L: netdev@vger.kernel.org |
| @@ -3973,6 +3980,7 @@ F: drivers/block/paride/ | |||
| 3973 | PARISC ARCHITECTURE | 3980 | PARISC ARCHITECTURE |
| 3974 | M: Kyle McMartin <kyle@mcmartin.ca> | 3981 | M: Kyle McMartin <kyle@mcmartin.ca> |
| 3975 | M: Helge Deller <deller@gmx.de> | 3982 | M: Helge Deller <deller@gmx.de> |
| 3983 | M: "James E.J. Bottomley" <jejb@parisc-linux.org> | ||
| 3976 | L: linux-parisc@vger.kernel.org | 3984 | L: linux-parisc@vger.kernel.org |
| 3977 | W: http://www.parisc-linux.org/ | 3985 | W: http://www.parisc-linux.org/ |
| 3978 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/kyle/parisc-2.6.git | 3986 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/kyle/parisc-2.6.git |
diff --git a/arch/arm/mach-omap2/clock34xx.c b/arch/arm/mach-omap2/clock34xx.c index fafcd32e6907..489556eecbd1 100644 --- a/arch/arm/mach-omap2/clock34xx.c +++ b/arch/arm/mach-omap2/clock34xx.c | |||
| @@ -338,6 +338,13 @@ static struct omap_clk omap34xx_clks[] = { | |||
| 338 | */ | 338 | */ |
| 339 | #define SDRC_MPURATE_LOOPS 96 | 339 | #define SDRC_MPURATE_LOOPS 96 |
| 340 | 340 | ||
| 341 | /* | ||
| 342 | * DPLL5_FREQ_FOR_USBHOST: USBHOST and USBTLL are the only clocks | ||
| 343 | * that are sourced by DPLL5, and both of these require this clock | ||
| 344 | * to be at 120 MHz for proper operation. | ||
| 345 | */ | ||
| 346 | #define DPLL5_FREQ_FOR_USBHOST 120000000 | ||
| 347 | |||
| 341 | /** | 348 | /** |
| 342 | * omap3430es2_clk_ssi_find_idlest - return CM_IDLEST info for SSI | 349 | * omap3430es2_clk_ssi_find_idlest - return CM_IDLEST info for SSI |
| 343 | * @clk: struct clk * being enabled | 350 | * @clk: struct clk * being enabled |
| @@ -1056,6 +1063,28 @@ void omap2_clk_prepare_for_reboot(void) | |||
| 1056 | #endif | 1063 | #endif |
| 1057 | } | 1064 | } |
| 1058 | 1065 | ||
| 1066 | static void omap3_clk_lock_dpll5(void) | ||
| 1067 | { | ||
| 1068 | struct clk *dpll5_clk; | ||
| 1069 | struct clk *dpll5_m2_clk; | ||
| 1070 | |||
| 1071 | dpll5_clk = clk_get(NULL, "dpll5_ck"); | ||
| 1072 | clk_set_rate(dpll5_clk, DPLL5_FREQ_FOR_USBHOST); | ||
| 1073 | clk_enable(dpll5_clk); | ||
| 1074 | |||
| 1075 | /* Enable autoidle to allow it to enter low power bypass */ | ||
| 1076 | omap3_dpll_allow_idle(dpll5_clk); | ||
| 1077 | |||
| 1078 | /* Program dpll5_m2_clk divider for no division */ | ||
| 1079 | dpll5_m2_clk = clk_get(NULL, "dpll5_m2_ck"); | ||
| 1080 | clk_enable(dpll5_m2_clk); | ||
| 1081 | clk_set_rate(dpll5_m2_clk, DPLL5_FREQ_FOR_USBHOST); | ||
| 1082 | |||
| 1083 | clk_disable(dpll5_m2_clk); | ||
| 1084 | clk_disable(dpll5_clk); | ||
| 1085 | return; | ||
| 1086 | } | ||
| 1087 | |||
| 1059 | /* REVISIT: Move this init stuff out into clock.c */ | 1088 | /* REVISIT: Move this init stuff out into clock.c */ |
| 1060 | 1089 | ||
| 1061 | /* | 1090 | /* |
| @@ -1148,6 +1177,12 @@ int __init omap2_clk_init(void) | |||
| 1148 | */ | 1177 | */ |
| 1149 | clk_enable_init_clocks(); | 1178 | clk_enable_init_clocks(); |
| 1150 | 1179 | ||
| 1180 | /* | ||
| 1181 | * Lock DPLL5 and put it in autoidle. | ||
| 1182 | */ | ||
| 1183 | if (omap_rev() >= OMAP3430_REV_ES2_0) | ||
| 1184 | omap3_clk_lock_dpll5(); | ||
| 1185 | |||
| 1151 | /* Avoid sleeping during omap2_clk_prepare_for_reboot() */ | 1186 | /* Avoid sleeping during omap2_clk_prepare_for_reboot() */ |
| 1152 | /* REVISIT: not yet ready for 343x */ | 1187 | /* REVISIT: not yet ready for 343x */ |
| 1153 | #if 0 | 1188 | #if 0 |
diff --git a/arch/arm/mach-omap2/pm-debug.c b/arch/arm/mach-omap2/pm-debug.c index 1b4c1600f8d8..2fc4d6abbd0a 100644 --- a/arch/arm/mach-omap2/pm-debug.c +++ b/arch/arm/mach-omap2/pm-debug.c | |||
| @@ -541,7 +541,7 @@ static int __init pm_dbg_init(void) | |||
| 541 | printk(KERN_ERR "%s: only OMAP3 supported\n", __func__); | 541 | printk(KERN_ERR "%s: only OMAP3 supported\n", __func__); |
| 542 | return -ENODEV; | 542 | return -ENODEV; |
| 543 | } | 543 | } |
| 544 | 544 | ||
| 545 | d = debugfs_create_dir("pm_debug", NULL); | 545 | d = debugfs_create_dir("pm_debug", NULL); |
| 546 | if (IS_ERR(d)) | 546 | if (IS_ERR(d)) |
| 547 | return PTR_ERR(d); | 547 | return PTR_ERR(d); |
| @@ -551,7 +551,7 @@ static int __init pm_dbg_init(void) | |||
| 551 | (void) debugfs_create_file("time", S_IRUGO, | 551 | (void) debugfs_create_file("time", S_IRUGO, |
| 552 | d, (void *)DEBUG_FILE_TIMERS, &debug_fops); | 552 | d, (void *)DEBUG_FILE_TIMERS, &debug_fops); |
| 553 | 553 | ||
| 554 | pwrdm_for_each(pwrdms_setup, (void *)d); | 554 | pwrdm_for_each_nolock(pwrdms_setup, (void *)d); |
| 555 | 555 | ||
| 556 | pm_dbg_dir = debugfs_create_dir("registers", d); | 556 | pm_dbg_dir = debugfs_create_dir("registers", d); |
| 557 | if (IS_ERR(pm_dbg_dir)) | 557 | if (IS_ERR(pm_dbg_dir)) |
diff --git a/arch/arm/mach-omap2/pm34xx.c b/arch/arm/mach-omap2/pm34xx.c index 0ff5a6c53aa0..378c2f618358 100644 --- a/arch/arm/mach-omap2/pm34xx.c +++ b/arch/arm/mach-omap2/pm34xx.c | |||
| @@ -51,97 +51,112 @@ static void (*_omap_sram_idle)(u32 *addr, int save_state); | |||
| 51 | 51 | ||
| 52 | static struct powerdomain *mpu_pwrdm; | 52 | static struct powerdomain *mpu_pwrdm; |
| 53 | 53 | ||
| 54 | /* PRCM Interrupt Handler for wakeups */ | 54 | /* |
| 55 | static irqreturn_t prcm_interrupt_handler (int irq, void *dev_id) | 55 | * PRCM Interrupt Handler Helper Function |
| 56 | * | ||
| 57 | * The purpose of this function is to clear any wake-up events latched | ||
| 58 | * in the PRCM PM_WKST_x registers. It is possible that a wake-up event | ||
| 59 | * may occur whilst attempting to clear a PM_WKST_x register and thus | ||
| 60 | * set another bit in this register. A while loop is used to ensure | ||
| 61 | * that any peripheral wake-up events occurring while attempting to | ||
| 62 | * clear the PM_WKST_x are detected and cleared. | ||
| 63 | */ | ||
| 64 | static int prcm_clear_mod_irqs(s16 module, u8 regs) | ||
| 56 | { | 65 | { |
| 57 | u32 wkst, irqstatus_mpu; | 66 | u32 wkst, fclk, iclk, clken; |
| 58 | u32 fclk, iclk; | 67 | u16 wkst_off = (regs == 3) ? OMAP3430ES2_PM_WKST3 : PM_WKST1; |
| 59 | 68 | u16 fclk_off = (regs == 3) ? OMAP3430ES2_CM_FCLKEN3 : CM_FCLKEN1; | |
| 60 | /* WKUP */ | 69 | u16 iclk_off = (regs == 3) ? CM_ICLKEN3 : CM_ICLKEN1; |
| 61 | wkst = prm_read_mod_reg(WKUP_MOD, PM_WKST); | 70 | u16 grpsel_off = (regs == 3) ? |
| 71 | OMAP3430ES2_PM_MPUGRPSEL3 : OMAP3430_PM_MPUGRPSEL; | ||
| 72 | int c = 0; | ||
| 73 | |||
| 74 | wkst = prm_read_mod_reg(module, wkst_off); | ||
| 75 | wkst &= prm_read_mod_reg(module, grpsel_off); | ||
| 62 | if (wkst) { | 76 | if (wkst) { |
| 63 | iclk = cm_read_mod_reg(WKUP_MOD, CM_ICLKEN); | 77 | iclk = cm_read_mod_reg(module, iclk_off); |
| 64 | fclk = cm_read_mod_reg(WKUP_MOD, CM_FCLKEN); | 78 | fclk = cm_read_mod_reg(module, fclk_off); |
| 65 | cm_set_mod_reg_bits(wkst, WKUP_MOD, CM_ICLKEN); | 79 | while (wkst) { |
| 66 | cm_set_mod_reg_bits(wkst, WKUP_MOD, CM_FCLKEN); | 80 | clken = wkst; |
| 67 | prm_write_mod_reg(wkst, WKUP_MOD, PM_WKST); | 81 | cm_set_mod_reg_bits(clken, module, iclk_off); |
| 68 | while (prm_read_mod_reg(WKUP_MOD, PM_WKST)) | 82 | /* |
| 69 | cpu_relax(); | 83 | * For USBHOST, we don't know whether HOST1 or |
| 70 | cm_write_mod_reg(iclk, WKUP_MOD, CM_ICLKEN); | 84 | * HOST2 woke us up, so enable both f-clocks |
| 71 | cm_write_mod_reg(fclk, WKUP_MOD, CM_FCLKEN); | 85 | */ |
| 86 | if (module == OMAP3430ES2_USBHOST_MOD) | ||
| 87 | clken |= 1 << OMAP3430ES2_EN_USBHOST2_SHIFT; | ||
| 88 | cm_set_mod_reg_bits(clken, module, fclk_off); | ||
| 89 | prm_write_mod_reg(wkst, module, wkst_off); | ||
| 90 | wkst = prm_read_mod_reg(module, wkst_off); | ||
| 91 | c++; | ||
| 92 | } | ||
| 93 | cm_write_mod_reg(iclk, module, iclk_off); | ||
| 94 | cm_write_mod_reg(fclk, module, fclk_off); | ||
| 72 | } | 95 | } |
| 73 | 96 | ||
| 74 | /* CORE */ | 97 | return c; |
| 75 | wkst = prm_read_mod_reg(CORE_MOD, PM_WKST1); | 98 | } |
| 76 | if (wkst) { | ||
| 77 | iclk = cm_read_mod_reg(CORE_MOD, CM_ICLKEN1); | ||
| 78 | fclk = cm_read_mod_reg(CORE_MOD, CM_FCLKEN1); | ||
| 79 | cm_set_mod_reg_bits(wkst, CORE_MOD, CM_ICLKEN1); | ||
| 80 | cm_set_mod_reg_bits(wkst, CORE_MOD, CM_FCLKEN1); | ||
| 81 | prm_write_mod_reg(wkst, CORE_MOD, PM_WKST1); | ||
| 82 | while (prm_read_mod_reg(CORE_MOD, PM_WKST1)) | ||
| 83 | cpu_relax(); | ||
| 84 | cm_write_mod_reg(iclk, CORE_MOD, CM_ICLKEN1); | ||
| 85 | cm_write_mod_reg(fclk, CORE_MOD, CM_FCLKEN1); | ||
| 86 | } | ||
| 87 | wkst = prm_read_mod_reg(CORE_MOD, OMAP3430ES2_PM_WKST3); | ||
| 88 | if (wkst) { | ||
| 89 | iclk = cm_read_mod_reg(CORE_MOD, CM_ICLKEN3); | ||
| 90 | fclk = cm_read_mod_reg(CORE_MOD, OMAP3430ES2_CM_FCLKEN3); | ||
| 91 | cm_set_mod_reg_bits(wkst, CORE_MOD, CM_ICLKEN3); | ||
| 92 | cm_set_mod_reg_bits(wkst, CORE_MOD, OMAP3430ES2_CM_FCLKEN3); | ||
| 93 | prm_write_mod_reg(wkst, CORE_MOD, OMAP3430ES2_PM_WKST3); | ||
| 94 | while (prm_read_mod_reg(CORE_MOD, OMAP3430ES2_PM_WKST3)) | ||
| 95 | cpu_relax(); | ||
| 96 | cm_write_mod_reg(iclk, CORE_MOD, CM_ICLKEN3); | ||
| 97 | cm_write_mod_reg(fclk, CORE_MOD, OMAP3430ES2_CM_FCLKEN3); | ||
| 98 | } | ||
| 99 | 99 | ||
| 100 | /* PER */ | 100 | static int _prcm_int_handle_wakeup(void) |
| 101 | wkst = prm_read_mod_reg(OMAP3430_PER_MOD, PM_WKST); | 101 | { |
| 102 | if (wkst) { | 102 | int c; |
| 103 | iclk = cm_read_mod_reg(OMAP3430_PER_MOD, CM_ICLKEN); | ||
| 104 | fclk = cm_read_mod_reg(OMAP3430_PER_MOD, CM_FCLKEN); | ||
| 105 | cm_set_mod_reg_bits(wkst, OMAP3430_PER_MOD, CM_ICLKEN); | ||
| 106 | cm_set_mod_reg_bits(wkst, OMAP3430_PER_MOD, CM_FCLKEN); | ||
| 107 | prm_write_mod_reg(wkst, OMAP3430_PER_MOD, PM_WKST); | ||
| 108 | while (prm_read_mod_reg(OMAP3430_PER_MOD, PM_WKST)) | ||
| 109 | cpu_relax(); | ||
| 110 | cm_write_mod_reg(iclk, OMAP3430_PER_MOD, CM_ICLKEN); | ||
| 111 | cm_write_mod_reg(fclk, OMAP3430_PER_MOD, CM_FCLKEN); | ||
| 112 | } | ||
| 113 | 103 | ||
| 104 | c = prcm_clear_mod_irqs(WKUP_MOD, 1); | ||
| 105 | c += prcm_clear_mod_irqs(CORE_MOD, 1); | ||
| 106 | c += prcm_clear_mod_irqs(OMAP3430_PER_MOD, 1); | ||
| 114 | if (omap_rev() > OMAP3430_REV_ES1_0) { | 107 | if (omap_rev() > OMAP3430_REV_ES1_0) { |
| 115 | /* USBHOST */ | 108 | c += prcm_clear_mod_irqs(CORE_MOD, 3); |
| 116 | wkst = prm_read_mod_reg(OMAP3430ES2_USBHOST_MOD, PM_WKST); | 109 | c += prcm_clear_mod_irqs(OMAP3430ES2_USBHOST_MOD, 1); |
| 117 | if (wkst) { | ||
| 118 | iclk = cm_read_mod_reg(OMAP3430ES2_USBHOST_MOD, | ||
| 119 | CM_ICLKEN); | ||
| 120 | fclk = cm_read_mod_reg(OMAP3430ES2_USBHOST_MOD, | ||
| 121 | CM_FCLKEN); | ||
| 122 | cm_set_mod_reg_bits(wkst, OMAP3430ES2_USBHOST_MOD, | ||
| 123 | CM_ICLKEN); | ||
| 124 | cm_set_mod_reg_bits(wkst, OMAP3430ES2_USBHOST_MOD, | ||
| 125 | CM_FCLKEN); | ||
| 126 | prm_write_mod_reg(wkst, OMAP3430ES2_USBHOST_MOD, | ||
| 127 | PM_WKST); | ||
| 128 | while (prm_read_mod_reg(OMAP3430ES2_USBHOST_MOD, | ||
| 129 | PM_WKST)) | ||
| 130 | cpu_relax(); | ||
| 131 | cm_write_mod_reg(iclk, OMAP3430ES2_USBHOST_MOD, | ||
| 132 | CM_ICLKEN); | ||
| 133 | cm_write_mod_reg(fclk, OMAP3430ES2_USBHOST_MOD, | ||
| 134 | CM_FCLKEN); | ||
| 135 | } | ||
| 136 | } | 110 | } |
| 137 | 111 | ||
| 138 | irqstatus_mpu = prm_read_mod_reg(OCP_MOD, | 112 | return c; |
| 139 | OMAP3_PRM_IRQSTATUS_MPU_OFFSET); | 113 | } |
| 140 | prm_write_mod_reg(irqstatus_mpu, OCP_MOD, | 114 | |
| 141 | OMAP3_PRM_IRQSTATUS_MPU_OFFSET); | 115 | /* |
| 116 | * PRCM Interrupt Handler | ||
| 117 | * | ||
| 118 | * The PRM_IRQSTATUS_MPU register indicates if there are any pending | ||
| 119 | * interrupts from the PRCM for the MPU. These bits must be cleared in | ||
| 120 | * order to clear the PRCM interrupt. The PRCM interrupt handler is | ||
| 121 | * implemented to simply clear the PRM_IRQSTATUS_MPU in order to clear | ||
| 122 | * the PRCM interrupt. Please note that bit 0 of the PRM_IRQSTATUS_MPU | ||
| 123 | * register indicates that a wake-up event is pending for the MPU and | ||
| 124 | * this bit can only be cleared if the all the wake-up events latched | ||
| 125 | * in the various PM_WKST_x registers have been cleared. The interrupt | ||
| 126 | * handler is implemented using a do-while loop so that if a wake-up | ||
| 127 | * event occurred during the processing of the prcm interrupt handler | ||
| 128 | * (setting a bit in the corresponding PM_WKST_x register and thus | ||
| 129 | * preventing us from clearing bit 0 of the PRM_IRQSTATUS_MPU register) | ||
| 130 | * this would be handled. | ||
| 131 | */ | ||
| 132 | static irqreturn_t prcm_interrupt_handler (int irq, void *dev_id) | ||
| 133 | { | ||
| 134 | u32 irqstatus_mpu; | ||
| 135 | int c = 0; | ||
| 136 | |||
| 137 | do { | ||
| 138 | irqstatus_mpu = prm_read_mod_reg(OCP_MOD, | ||
| 139 | OMAP3_PRM_IRQSTATUS_MPU_OFFSET); | ||
| 140 | |||
| 141 | if (irqstatus_mpu & (OMAP3430_WKUP_ST | OMAP3430_IO_ST)) { | ||
| 142 | c = _prcm_int_handle_wakeup(); | ||
| 143 | |||
| 144 | /* | ||
| 145 | * Is the MPU PRCM interrupt handler racing with the | ||
| 146 | * IVA2 PRCM interrupt handler ? | ||
| 147 | */ | ||
| 148 | WARN(c == 0, "prcm: WARNING: PRCM indicated MPU wakeup " | ||
| 149 | "but no wakeup sources are marked\n"); | ||
| 150 | } else { | ||
| 151 | /* XXX we need to expand our PRCM interrupt handler */ | ||
| 152 | WARN(1, "prcm: WARNING: PRCM interrupt received, but " | ||
| 153 | "no code to handle it (%08x)\n", irqstatus_mpu); | ||
| 154 | } | ||
| 155 | |||
| 156 | prm_write_mod_reg(irqstatus_mpu, OCP_MOD, | ||
| 157 | OMAP3_PRM_IRQSTATUS_MPU_OFFSET); | ||
| 142 | 158 | ||
| 143 | while (prm_read_mod_reg(OCP_MOD, OMAP3_PRM_IRQSTATUS_MPU_OFFSET)) | 159 | } while (prm_read_mod_reg(OCP_MOD, OMAP3_PRM_IRQSTATUS_MPU_OFFSET)); |
| 144 | cpu_relax(); | ||
| 145 | 160 | ||
| 146 | return IRQ_HANDLED; | 161 | return IRQ_HANDLED; |
| 147 | } | 162 | } |
| @@ -624,6 +639,16 @@ static void __init prcm_setup_regs(void) | |||
| 624 | prm_write_mod_reg(OMAP3430_IO_EN | OMAP3430_WKUP_EN, | 639 | prm_write_mod_reg(OMAP3430_IO_EN | OMAP3430_WKUP_EN, |
| 625 | OCP_MOD, OMAP3_PRM_IRQENABLE_MPU_OFFSET); | 640 | OCP_MOD, OMAP3_PRM_IRQENABLE_MPU_OFFSET); |
| 626 | 641 | ||
| 642 | /* Enable GPIO wakeups in PER */ | ||
| 643 | prm_write_mod_reg(OMAP3430_EN_GPIO2 | OMAP3430_EN_GPIO3 | | ||
| 644 | OMAP3430_EN_GPIO4 | OMAP3430_EN_GPIO5 | | ||
| 645 | OMAP3430_EN_GPIO6, OMAP3430_PER_MOD, PM_WKEN); | ||
| 646 | /* and allow them to wake up MPU */ | ||
| 647 | prm_write_mod_reg(OMAP3430_GRPSEL_GPIO2 | OMAP3430_EN_GPIO3 | | ||
| 648 | OMAP3430_GRPSEL_GPIO4 | OMAP3430_EN_GPIO5 | | ||
| 649 | OMAP3430_GRPSEL_GPIO6, | ||
| 650 | OMAP3430_PER_MOD, OMAP3430_PM_MPUGRPSEL); | ||
| 651 | |||
| 627 | /* Don't attach IVA interrupts */ | 652 | /* Don't attach IVA interrupts */ |
| 628 | prm_write_mod_reg(0, WKUP_MOD, OMAP3430_PM_IVAGRPSEL); | 653 | prm_write_mod_reg(0, WKUP_MOD, OMAP3430_PM_IVAGRPSEL); |
| 629 | prm_write_mod_reg(0, CORE_MOD, OMAP3430_PM_IVAGRPSEL1); | 654 | prm_write_mod_reg(0, CORE_MOD, OMAP3430_PM_IVAGRPSEL1); |
diff --git a/arch/arm/mach-omap2/powerdomain.c b/arch/arm/mach-omap2/powerdomain.c index 2594cbff3947..f00289abd30f 100644 --- a/arch/arm/mach-omap2/powerdomain.c +++ b/arch/arm/mach-omap2/powerdomain.c | |||
| @@ -273,35 +273,50 @@ struct powerdomain *pwrdm_lookup(const char *name) | |||
| 273 | } | 273 | } |
| 274 | 274 | ||
| 275 | /** | 275 | /** |
| 276 | * pwrdm_for_each - call function on each registered clockdomain | 276 | * pwrdm_for_each_nolock - call function on each registered clockdomain |
| 277 | * @fn: callback function * | 277 | * @fn: callback function * |
| 278 | * | 278 | * |
| 279 | * Call the supplied function for each registered powerdomain. The | 279 | * Call the supplied function for each registered powerdomain. The |
| 280 | * callback function can return anything but 0 to bail out early from | 280 | * callback function can return anything but 0 to bail out early from |
| 281 | * the iterator. The callback function is called with the pwrdm_rwlock | 281 | * the iterator. Returns the last return value of the callback function, which |
| 282 | * held for reading, so no powerdomain structure manipulation | 282 | * should be 0 for success or anything else to indicate failure; or -EINVAL if |
| 283 | * functions should be called from the callback, although hardware | 283 | * the function pointer is null. |
| 284 | * powerdomain control functions are fine. Returns the last return | ||
| 285 | * value of the callback function, which should be 0 for success or | ||
| 286 | * anything else to indicate failure; or -EINVAL if the function | ||
| 287 | * pointer is null. | ||
| 288 | */ | 284 | */ |
| 289 | int pwrdm_for_each(int (*fn)(struct powerdomain *pwrdm, void *user), | 285 | int pwrdm_for_each_nolock(int (*fn)(struct powerdomain *pwrdm, void *user), |
| 290 | void *user) | 286 | void *user) |
| 291 | { | 287 | { |
| 292 | struct powerdomain *temp_pwrdm; | 288 | struct powerdomain *temp_pwrdm; |
| 293 | unsigned long flags; | ||
| 294 | int ret = 0; | 289 | int ret = 0; |
| 295 | 290 | ||
| 296 | if (!fn) | 291 | if (!fn) |
| 297 | return -EINVAL; | 292 | return -EINVAL; |
| 298 | 293 | ||
| 299 | read_lock_irqsave(&pwrdm_rwlock, flags); | ||
| 300 | list_for_each_entry(temp_pwrdm, &pwrdm_list, node) { | 294 | list_for_each_entry(temp_pwrdm, &pwrdm_list, node) { |
| 301 | ret = (*fn)(temp_pwrdm, user); | 295 | ret = (*fn)(temp_pwrdm, user); |
| 302 | if (ret) | 296 | if (ret) |
| 303 | break; | 297 | break; |
| 304 | } | 298 | } |
| 299 | |||
| 300 | return ret; | ||
| 301 | } | ||
| 302 | |||
| 303 | /** | ||
| 304 | * pwrdm_for_each - call function on each registered clockdomain | ||
| 305 | * @fn: callback function * | ||
| 306 | * | ||
| 307 | * This function is the same as 'pwrdm_for_each_nolock()', but keeps the | ||
| 308 | * &pwrdm_rwlock locked for reading, so no powerdomain structure manipulation | ||
| 309 | * functions should be called from the callback, although hardware powerdomain | ||
| 310 | * control functions are fine. | ||
| 311 | */ | ||
| 312 | int pwrdm_for_each(int (*fn)(struct powerdomain *pwrdm, void *user), | ||
| 313 | void *user) | ||
| 314 | { | ||
| 315 | unsigned long flags; | ||
| 316 | int ret; | ||
| 317 | |||
| 318 | read_lock_irqsave(&pwrdm_rwlock, flags); | ||
| 319 | ret = pwrdm_for_each_nolock(fn, user); | ||
| 305 | read_unlock_irqrestore(&pwrdm_rwlock, flags); | 320 | read_unlock_irqrestore(&pwrdm_rwlock, flags); |
| 306 | 321 | ||
| 307 | return ret; | 322 | return ret; |
diff --git a/arch/arm/plat-omap/include/mach/cpu.h b/arch/arm/plat-omap/include/mach/cpu.h index 11e73d9e8928..f129efb3075e 100644 --- a/arch/arm/plat-omap/include/mach/cpu.h +++ b/arch/arm/plat-omap/include/mach/cpu.h | |||
| @@ -303,32 +303,21 @@ IS_OMAP_TYPE(3430, 0x3430) | |||
| 303 | #define cpu_is_omap2430() 0 | 303 | #define cpu_is_omap2430() 0 |
| 304 | #define cpu_is_omap3430() 0 | 304 | #define cpu_is_omap3430() 0 |
| 305 | 305 | ||
| 306 | #if defined(MULTI_OMAP1) | ||
| 307 | # if defined(CONFIG_ARCH_OMAP730) | ||
| 308 | # undef cpu_is_omap730 | ||
| 309 | # define cpu_is_omap730() is_omap730() | ||
| 310 | # endif | ||
| 311 | # if defined(CONFIG_ARCH_OMAP850) | ||
| 312 | # undef cpu_is_omap850 | ||
| 313 | # define cpu_is_omap850() is_omap850() | ||
| 314 | # endif | ||
| 315 | #else | ||
| 316 | # if defined(CONFIG_ARCH_OMAP730) | ||
| 317 | # undef cpu_is_omap730 | ||
| 318 | # define cpu_is_omap730() 1 | ||
| 319 | # endif | ||
| 320 | #endif | ||
| 321 | #else | ||
| 322 | # if defined(CONFIG_ARCH_OMAP850) | ||
| 323 | # undef cpu_is_omap850 | ||
| 324 | # define cpu_is_omap850() 1 | ||
| 325 | # endif | ||
| 326 | #endif | ||
| 327 | |||
| 328 | /* | 306 | /* |
| 329 | * Whether we have MULTI_OMAP1 or not, we still need to distinguish | 307 | * Whether we have MULTI_OMAP1 or not, we still need to distinguish |
| 330 | * between 330 vs. 1510 and 1611B/5912 vs. 1710. | 308 | * between 730 vs 850, 330 vs. 1510 and 1611B/5912 vs. 1710. |
| 331 | */ | 309 | */ |
| 310 | |||
| 311 | #if defined(CONFIG_ARCH_OMAP730) | ||
| 312 | # undef cpu_is_omap730 | ||
| 313 | # define cpu_is_omap730() is_omap730() | ||
| 314 | #endif | ||
| 315 | |||
| 316 | #if defined(CONFIG_ARCH_OMAP850) | ||
| 317 | # undef cpu_is_omap850 | ||
| 318 | # define cpu_is_omap850() is_omap850() | ||
| 319 | #endif | ||
| 320 | |||
| 332 | #if defined(CONFIG_ARCH_OMAP15XX) | 321 | #if defined(CONFIG_ARCH_OMAP15XX) |
| 333 | # undef cpu_is_omap310 | 322 | # undef cpu_is_omap310 |
| 334 | # undef cpu_is_omap1510 | 323 | # undef cpu_is_omap1510 |
| @@ -433,3 +422,5 @@ IS_OMAP_TYPE(3430, 0x3430) | |||
| 433 | 422 | ||
| 434 | int omap_chip_is(struct omap_chip_id oci); | 423 | int omap_chip_is(struct omap_chip_id oci); |
| 435 | void omap2_check_revision(void); | 424 | void omap2_check_revision(void); |
| 425 | |||
| 426 | #endif | ||
diff --git a/arch/arm/plat-omap/include/mach/powerdomain.h b/arch/arm/plat-omap/include/mach/powerdomain.h index 6271d8556a40..fa6461423bd0 100644 --- a/arch/arm/plat-omap/include/mach/powerdomain.h +++ b/arch/arm/plat-omap/include/mach/powerdomain.h | |||
| @@ -135,6 +135,8 @@ struct powerdomain *pwrdm_lookup(const char *name); | |||
| 135 | 135 | ||
| 136 | int pwrdm_for_each(int (*fn)(struct powerdomain *pwrdm, void *user), | 136 | int pwrdm_for_each(int (*fn)(struct powerdomain *pwrdm, void *user), |
| 137 | void *user); | 137 | void *user); |
| 138 | int pwrdm_for_each_nolock(int (*fn)(struct powerdomain *pwrdm, void *user), | ||
| 139 | void *user); | ||
| 138 | 140 | ||
| 139 | int pwrdm_add_clkdm(struct powerdomain *pwrdm, struct clockdomain *clkdm); | 141 | int pwrdm_add_clkdm(struct powerdomain *pwrdm, struct clockdomain *clkdm); |
| 140 | int pwrdm_del_clkdm(struct powerdomain *pwrdm, struct clockdomain *clkdm); | 142 | int pwrdm_del_clkdm(struct powerdomain *pwrdm, struct clockdomain *clkdm); |
diff --git a/arch/arm/plat-omap/iovmm.c b/arch/arm/plat-omap/iovmm.c index 57f7122a0919..dc3fac3dd0ea 100644 --- a/arch/arm/plat-omap/iovmm.c +++ b/arch/arm/plat-omap/iovmm.c | |||
| @@ -47,7 +47,7 @@ | |||
| 47 | * 'va': mpu virtual address | 47 | * 'va': mpu virtual address |
| 48 | * | 48 | * |
| 49 | * 'c': contiguous memory area | 49 | * 'c': contiguous memory area |
| 50 | * 'd': dicontiguous memory area | 50 | * 'd': discontiguous memory area |
| 51 | * 'a': anonymous memory allocation | 51 | * 'a': anonymous memory allocation |
| 52 | * '()': optional feature | 52 | * '()': optional feature |
| 53 | * | 53 | * |
| @@ -363,8 +363,9 @@ void *da_to_va(struct iommu *obj, u32 da) | |||
| 363 | goto out; | 363 | goto out; |
| 364 | } | 364 | } |
| 365 | va = area->va; | 365 | va = area->va; |
| 366 | mutex_unlock(&obj->mmap_lock); | ||
| 367 | out: | 366 | out: |
| 367 | mutex_unlock(&obj->mmap_lock); | ||
| 368 | |||
| 368 | return va; | 369 | return va; |
| 369 | } | 370 | } |
| 370 | EXPORT_SYMBOL_GPL(da_to_va); | 371 | EXPORT_SYMBOL_GPL(da_to_va); |
| @@ -398,7 +399,7 @@ static inline void sgtable_drain_vmalloc(struct sg_table *sgt) | |||
| 398 | { | 399 | { |
| 399 | /* | 400 | /* |
| 400 | * Actually this is not necessary at all, just exists for | 401 | * Actually this is not necessary at all, just exists for |
| 401 | * consistency of the code readibility. | 402 | * consistency of the code readability. |
| 402 | */ | 403 | */ |
| 403 | BUG_ON(!sgt); | 404 | BUG_ON(!sgt); |
| 404 | } | 405 | } |
| @@ -434,7 +435,7 @@ static inline void sgtable_drain_kmalloc(struct sg_table *sgt) | |||
| 434 | { | 435 | { |
| 435 | /* | 436 | /* |
| 436 | * Actually this is not necessary at all, just exists for | 437 | * Actually this is not necessary at all, just exists for |
| 437 | * consistency of the code readibility | 438 | * consistency of the code readability |
| 438 | */ | 439 | */ |
| 439 | BUG_ON(!sgt); | 440 | BUG_ON(!sgt); |
| 440 | } | 441 | } |
diff --git a/arch/arm/plat-omap/sram.c b/arch/arm/plat-omap/sram.c index 925f64711c37..75d1f26e5b17 100644 --- a/arch/arm/plat-omap/sram.c +++ b/arch/arm/plat-omap/sram.c | |||
| @@ -270,7 +270,8 @@ void * omap_sram_push(void * start, unsigned long size) | |||
| 270 | omap_sram_ceil -= size; | 270 | omap_sram_ceil -= size; |
| 271 | omap_sram_ceil = ROUND_DOWN(omap_sram_ceil, sizeof(void *)); | 271 | omap_sram_ceil = ROUND_DOWN(omap_sram_ceil, sizeof(void *)); |
| 272 | memcpy((void *)omap_sram_ceil, start, size); | 272 | memcpy((void *)omap_sram_ceil, start, size); |
| 273 | flush_icache_range((unsigned long)start, (unsigned long)(start + size)); | 273 | flush_icache_range((unsigned long)omap_sram_ceil, |
| 274 | (unsigned long)(omap_sram_ceil + size)); | ||
| 274 | 275 | ||
| 275 | return (void *)omap_sram_ceil; | 276 | return (void *)omap_sram_ceil; |
| 276 | } | 277 | } |
diff --git a/arch/m68knommu/kernel/asm-offsets.c b/arch/m68knommu/kernel/asm-offsets.c index 594ee0e657fe..9a8876f715d8 100644 --- a/arch/m68knommu/kernel/asm-offsets.c +++ b/arch/m68knommu/kernel/asm-offsets.c | |||
| @@ -45,25 +45,25 @@ int main(void) | |||
| 45 | DEFINE(THREAD_FPSTATE, offsetof(struct thread_struct, fpstate)); | 45 | DEFINE(THREAD_FPSTATE, offsetof(struct thread_struct, fpstate)); |
| 46 | 46 | ||
| 47 | /* offsets into the pt_regs */ | 47 | /* offsets into the pt_regs */ |
| 48 | DEFINE(PT_D0, offsetof(struct pt_regs, d0)); | 48 | DEFINE(PT_OFF_D0, offsetof(struct pt_regs, d0)); |
| 49 | DEFINE(PT_ORIG_D0, offsetof(struct pt_regs, orig_d0)); | 49 | DEFINE(PT_OFF_ORIG_D0, offsetof(struct pt_regs, orig_d0)); |
| 50 | DEFINE(PT_D1, offsetof(struct pt_regs, d1)); | 50 | DEFINE(PT_OFF_D1, offsetof(struct pt_regs, d1)); |
| 51 | DEFINE(PT_D2, offsetof(struct pt_regs, d2)); | 51 | DEFINE(PT_OFF_D2, offsetof(struct pt_regs, d2)); |
| 52 | DEFINE(PT_D3, offsetof(struct pt_regs, d3)); | 52 | DEFINE(PT_OFF_D3, offsetof(struct pt_regs, d3)); |
| 53 | DEFINE(PT_D4, offsetof(struct pt_regs, d4)); | 53 | DEFINE(PT_OFF_D4, offsetof(struct pt_regs, d4)); |
| 54 | DEFINE(PT_D5, offsetof(struct pt_regs, d5)); | 54 | DEFINE(PT_OFF_D5, offsetof(struct pt_regs, d5)); |
| 55 | DEFINE(PT_A0, offsetof(struct pt_regs, a0)); | 55 | DEFINE(PT_OFF_A0, offsetof(struct pt_regs, a0)); |
| 56 | DEFINE(PT_A1, offsetof(struct pt_regs, a1)); | 56 | DEFINE(PT_OFF_A1, offsetof(struct pt_regs, a1)); |
| 57 | DEFINE(PT_A2, offsetof(struct pt_regs, a2)); | 57 | DEFINE(PT_OFF_A2, offsetof(struct pt_regs, a2)); |
| 58 | DEFINE(PT_PC, offsetof(struct pt_regs, pc)); | 58 | DEFINE(PT_OFF_PC, offsetof(struct pt_regs, pc)); |
| 59 | DEFINE(PT_SR, offsetof(struct pt_regs, sr)); | 59 | DEFINE(PT_OFF_SR, offsetof(struct pt_regs, sr)); |
| 60 | 60 | ||
| 61 | #ifdef CONFIG_COLDFIRE | 61 | #ifdef CONFIG_COLDFIRE |
| 62 | /* bitfields are a bit difficult */ | 62 | /* bitfields are a bit difficult */ |
| 63 | DEFINE(PT_FORMATVEC, offsetof(struct pt_regs, sr) - 2); | 63 | DEFINE(PT_OFF_FORMATVEC, offsetof(struct pt_regs, sr) - 2); |
| 64 | #else | 64 | #else |
| 65 | /* bitfields are a bit difficult */ | 65 | /* bitfields are a bit difficult */ |
| 66 | DEFINE(PT_VECTOR, offsetof(struct pt_regs, pc) + 4); | 66 | DEFINE(PT_OFF_VECTOR, offsetof(struct pt_regs, pc) + 4); |
| 67 | #endif | 67 | #endif |
| 68 | 68 | ||
| 69 | /* signal defines */ | 69 | /* signal defines */ |
diff --git a/arch/m68knommu/kernel/entry.S b/arch/m68knommu/kernel/entry.S index f56faa5c9cd9..56043ade3941 100644 --- a/arch/m68knommu/kernel/entry.S +++ b/arch/m68knommu/kernel/entry.S | |||
| @@ -46,7 +46,7 @@ | |||
| 46 | ENTRY(buserr) | 46 | ENTRY(buserr) |
| 47 | SAVE_ALL | 47 | SAVE_ALL |
| 48 | moveq #-1,%d0 | 48 | moveq #-1,%d0 |
| 49 | movel %d0,%sp@(PT_ORIG_D0) | 49 | movel %d0,%sp@(PT_OFF_ORIG_D0) |
| 50 | movel %sp,%sp@- /* stack frame pointer argument */ | 50 | movel %sp,%sp@- /* stack frame pointer argument */ |
| 51 | jsr buserr_c | 51 | jsr buserr_c |
| 52 | addql #4,%sp | 52 | addql #4,%sp |
| @@ -55,7 +55,7 @@ ENTRY(buserr) | |||
| 55 | ENTRY(trap) | 55 | ENTRY(trap) |
| 56 | SAVE_ALL | 56 | SAVE_ALL |
| 57 | moveq #-1,%d0 | 57 | moveq #-1,%d0 |
| 58 | movel %d0,%sp@(PT_ORIG_D0) | 58 | movel %d0,%sp@(PT_OFF_ORIG_D0) |
| 59 | movel %sp,%sp@- /* stack frame pointer argument */ | 59 | movel %sp,%sp@- /* stack frame pointer argument */ |
| 60 | jsr trap_c | 60 | jsr trap_c |
| 61 | addql #4,%sp | 61 | addql #4,%sp |
| @@ -67,7 +67,7 @@ ENTRY(trap) | |||
| 67 | ENTRY(dbginterrupt) | 67 | ENTRY(dbginterrupt) |
| 68 | SAVE_ALL | 68 | SAVE_ALL |
| 69 | moveq #-1,%d0 | 69 | moveq #-1,%d0 |
| 70 | movel %d0,%sp@(PT_ORIG_D0) | 70 | movel %d0,%sp@(PT_OFF_ORIG_D0) |
| 71 | movel %sp,%sp@- /* stack frame pointer argument */ | 71 | movel %sp,%sp@- /* stack frame pointer argument */ |
| 72 | jsr dbginterrupt_c | 72 | jsr dbginterrupt_c |
| 73 | addql #4,%sp | 73 | addql #4,%sp |
diff --git a/arch/m68knommu/mm/init.c b/arch/m68knommu/mm/init.c index b1703c67a4f1..f3236d0b522d 100644 --- a/arch/m68knommu/mm/init.c +++ b/arch/m68knommu/mm/init.c | |||
| @@ -162,7 +162,7 @@ void free_initrd_mem(unsigned long start, unsigned long end) | |||
| 162 | totalram_pages++; | 162 | totalram_pages++; |
| 163 | pages++; | 163 | pages++; |
| 164 | } | 164 | } |
| 165 | printk (KERN_NOTICE "Freeing initrd memory: %dk freed\n", pages); | 165 | printk (KERN_NOTICE "Freeing initrd memory: %dk freed\n", pages * (PAGE_SIZE / 1024)); |
| 166 | } | 166 | } |
| 167 | #endif | 167 | #endif |
| 168 | 168 | ||
diff --git a/arch/m68knommu/platform/5206e/config.c b/arch/m68knommu/platform/5206e/config.c index 0f41ba82a3b5..942397984c66 100644 --- a/arch/m68knommu/platform/5206e/config.c +++ b/arch/m68knommu/platform/5206e/config.c | |||
| @@ -17,7 +17,6 @@ | |||
| 17 | #include <asm/mcfsim.h> | 17 | #include <asm/mcfsim.h> |
| 18 | #include <asm/mcfuart.h> | 18 | #include <asm/mcfuart.h> |
| 19 | #include <asm/mcfdma.h> | 19 | #include <asm/mcfdma.h> |
| 20 | #include <asm/mcfuart.h> | ||
| 21 | 20 | ||
| 22 | /***************************************************************************/ | 21 | /***************************************************************************/ |
| 23 | 22 | ||
diff --git a/arch/m68knommu/platform/68328/entry.S b/arch/m68knommu/platform/68328/entry.S index b1aef72f3baf..9d80d2c42866 100644 --- a/arch/m68knommu/platform/68328/entry.S +++ b/arch/m68knommu/platform/68328/entry.S | |||
| @@ -39,17 +39,17 @@ | |||
| 39 | .globl inthandler7 | 39 | .globl inthandler7 |
| 40 | 40 | ||
| 41 | badsys: | 41 | badsys: |
| 42 | movel #-ENOSYS,%sp@(PT_D0) | 42 | movel #-ENOSYS,%sp@(PT_OFF_D0) |
| 43 | jra ret_from_exception | 43 | jra ret_from_exception |
| 44 | 44 | ||
| 45 | do_trace: | 45 | do_trace: |
| 46 | movel #-ENOSYS,%sp@(PT_D0) /* needed for strace*/ | 46 | movel #-ENOSYS,%sp@(PT_OFF_D0) /* needed for strace*/ |
| 47 | subql #4,%sp | 47 | subql #4,%sp |
| 48 | SAVE_SWITCH_STACK | 48 | SAVE_SWITCH_STACK |
| 49 | jbsr syscall_trace | 49 | jbsr syscall_trace |
| 50 | RESTORE_SWITCH_STACK | 50 | RESTORE_SWITCH_STACK |
| 51 | addql #4,%sp | 51 | addql #4,%sp |
| 52 | movel %sp@(PT_ORIG_D0),%d1 | 52 | movel %sp@(PT_OFF_ORIG_D0),%d1 |
| 53 | movel #-ENOSYS,%d0 | 53 | movel #-ENOSYS,%d0 |
| 54 | cmpl #NR_syscalls,%d1 | 54 | cmpl #NR_syscalls,%d1 |
| 55 | jcc 1f | 55 | jcc 1f |
| @@ -57,7 +57,7 @@ do_trace: | |||
| 57 | lea sys_call_table, %a0 | 57 | lea sys_call_table, %a0 |
| 58 | jbsr %a0@(%d1) | 58 | jbsr %a0@(%d1) |
| 59 | 59 | ||
| 60 | 1: movel %d0,%sp@(PT_D0) /* save the return value */ | 60 | 1: movel %d0,%sp@(PT_OFF_D0) /* save the return value */ |
| 61 | subql #4,%sp /* dummy return address */ | 61 | subql #4,%sp /* dummy return address */ |
| 62 | SAVE_SWITCH_STACK | 62 | SAVE_SWITCH_STACK |
| 63 | jbsr syscall_trace | 63 | jbsr syscall_trace |
| @@ -75,7 +75,7 @@ ENTRY(system_call) | |||
| 75 | jbsr set_esp0 | 75 | jbsr set_esp0 |
| 76 | addql #4,%sp | 76 | addql #4,%sp |
| 77 | 77 | ||
| 78 | movel %sp@(PT_ORIG_D0),%d0 | 78 | movel %sp@(PT_OFF_ORIG_D0),%d0 |
| 79 | 79 | ||
| 80 | movel %sp,%d1 /* get thread_info pointer */ | 80 | movel %sp,%d1 /* get thread_info pointer */ |
| 81 | andl #-THREAD_SIZE,%d1 | 81 | andl #-THREAD_SIZE,%d1 |
| @@ -88,10 +88,10 @@ ENTRY(system_call) | |||
| 88 | lea sys_call_table,%a0 | 88 | lea sys_call_table,%a0 |
| 89 | movel %a0@(%d0), %a0 | 89 | movel %a0@(%d0), %a0 |
| 90 | jbsr %a0@ | 90 | jbsr %a0@ |
| 91 | movel %d0,%sp@(PT_D0) /* save the return value*/ | 91 | movel %d0,%sp@(PT_OFF_D0) /* save the return value*/ |
| 92 | 92 | ||
| 93 | ret_from_exception: | 93 | ret_from_exception: |
| 94 | btst #5,%sp@(PT_SR) /* check if returning to kernel*/ | 94 | btst #5,%sp@(PT_OFF_SR) /* check if returning to kernel*/ |
| 95 | jeq Luser_return /* if so, skip resched, signals*/ | 95 | jeq Luser_return /* if so, skip resched, signals*/ |
| 96 | 96 | ||
| 97 | Lkernel_return: | 97 | Lkernel_return: |
| @@ -133,7 +133,7 @@ Lreturn: | |||
| 133 | */ | 133 | */ |
| 134 | inthandler1: | 134 | inthandler1: |
| 135 | SAVE_ALL | 135 | SAVE_ALL |
| 136 | movew %sp@(PT_VECTOR), %d0 | 136 | movew %sp@(PT_OFF_VECTOR), %d0 |
| 137 | and #0x3ff, %d0 | 137 | and #0x3ff, %d0 |
| 138 | 138 | ||
| 139 | movel %sp,%sp@- | 139 | movel %sp,%sp@- |
| @@ -144,7 +144,7 @@ inthandler1: | |||
| 144 | 144 | ||
| 145 | inthandler2: | 145 | inthandler2: |
| 146 | SAVE_ALL | 146 | SAVE_ALL |
| 147 | movew %sp@(PT_VECTOR), %d0 | 147 | movew %sp@(PT_OFF_VECTOR), %d0 |
| 148 | and #0x3ff, %d0 | 148 | and #0x3ff, %d0 |
| 149 | 149 | ||
| 150 | movel %sp,%sp@- | 150 | movel %sp,%sp@- |
| @@ -155,7 +155,7 @@ inthandler2: | |||
| 155 | 155 | ||
| 156 | inthandler3: | 156 | inthandler3: |
| 157 | SAVE_ALL | 157 | SAVE_ALL |
| 158 | movew %sp@(PT_VECTOR), %d0 | 158 | movew %sp@(PT_OFF_VECTOR), %d0 |
| 159 | and #0x3ff, %d0 | 159 | and #0x3ff, %d0 |
| 160 | 160 | ||
| 161 | movel %sp,%sp@- | 161 | movel %sp,%sp@- |
| @@ -166,7 +166,7 @@ inthandler3: | |||
| 166 | 166 | ||
| 167 | inthandler4: | 167 | inthandler4: |
| 168 | SAVE_ALL | 168 | SAVE_ALL |
| 169 | movew %sp@(PT_VECTOR), %d0 | 169 | movew %sp@(PT_OFF_VECTOR), %d0 |
| 170 | and #0x3ff, %d0 | 170 | and #0x3ff, %d0 |
| 171 | 171 | ||
| 172 | movel %sp,%sp@- | 172 | movel %sp,%sp@- |
| @@ -177,7 +177,7 @@ inthandler4: | |||
| 177 | 177 | ||
| 178 | inthandler5: | 178 | inthandler5: |
| 179 | SAVE_ALL | 179 | SAVE_ALL |
| 180 | movew %sp@(PT_VECTOR), %d0 | 180 | movew %sp@(PT_OFF_VECTOR), %d0 |
| 181 | and #0x3ff, %d0 | 181 | and #0x3ff, %d0 |
| 182 | 182 | ||
| 183 | movel %sp,%sp@- | 183 | movel %sp,%sp@- |
| @@ -188,7 +188,7 @@ inthandler5: | |||
| 188 | 188 | ||
| 189 | inthandler6: | 189 | inthandler6: |
| 190 | SAVE_ALL | 190 | SAVE_ALL |
| 191 | movew %sp@(PT_VECTOR), %d0 | 191 | movew %sp@(PT_OFF_VECTOR), %d0 |
| 192 | and #0x3ff, %d0 | 192 | and #0x3ff, %d0 |
| 193 | 193 | ||
| 194 | movel %sp,%sp@- | 194 | movel %sp,%sp@- |
| @@ -199,7 +199,7 @@ inthandler6: | |||
| 199 | 199 | ||
| 200 | inthandler7: | 200 | inthandler7: |
| 201 | SAVE_ALL | 201 | SAVE_ALL |
| 202 | movew %sp@(PT_VECTOR), %d0 | 202 | movew %sp@(PT_OFF_VECTOR), %d0 |
| 203 | and #0x3ff, %d0 | 203 | and #0x3ff, %d0 |
| 204 | 204 | ||
| 205 | movel %sp,%sp@- | 205 | movel %sp,%sp@- |
| @@ -210,7 +210,7 @@ inthandler7: | |||
| 210 | 210 | ||
| 211 | inthandler: | 211 | inthandler: |
| 212 | SAVE_ALL | 212 | SAVE_ALL |
| 213 | movew %sp@(PT_VECTOR), %d0 | 213 | movew %sp@(PT_OFF_VECTOR), %d0 |
| 214 | and #0x3ff, %d0 | 214 | and #0x3ff, %d0 |
| 215 | 215 | ||
| 216 | movel %sp,%sp@- | 216 | movel %sp,%sp@- |
| @@ -224,7 +224,7 @@ ret_from_interrupt: | |||
| 224 | 2: | 224 | 2: |
| 225 | RESTORE_ALL | 225 | RESTORE_ALL |
| 226 | 1: | 226 | 1: |
| 227 | moveb %sp@(PT_SR), %d0 | 227 | moveb %sp@(PT_OFF_SR), %d0 |
| 228 | and #7, %d0 | 228 | and #7, %d0 |
| 229 | jhi 2b | 229 | jhi 2b |
| 230 | 230 | ||
diff --git a/arch/m68knommu/platform/68360/entry.S b/arch/m68knommu/platform/68360/entry.S index 55dfefe38642..6d3460a39cac 100644 --- a/arch/m68knommu/platform/68360/entry.S +++ b/arch/m68knommu/platform/68360/entry.S | |||
| @@ -35,17 +35,17 @@ | |||
| 35 | .globl inthandler | 35 | .globl inthandler |
| 36 | 36 | ||
| 37 | badsys: | 37 | badsys: |
| 38 | movel #-ENOSYS,%sp@(PT_D0) | 38 | movel #-ENOSYS,%sp@(PT_OFF_D0) |
| 39 | jra ret_from_exception | 39 | jra ret_from_exception |
| 40 | 40 | ||
| 41 | do_trace: | 41 | do_trace: |
| 42 | movel #-ENOSYS,%sp@(PT_D0) /* needed for strace*/ | 42 | movel #-ENOSYS,%sp@(PT_OFF_D0) /* needed for strace*/ |
| 43 | subql #4,%sp | 43 | subql #4,%sp |
| 44 | SAVE_SWITCH_STACK | 44 | SAVE_SWITCH_STACK |
| 45 | jbsr syscall_trace | 45 | jbsr syscall_trace |
| 46 | RESTORE_SWITCH_STACK | 46 | RESTORE_SWITCH_STACK |
| 47 | addql #4,%sp | 47 | addql #4,%sp |
| 48 | movel %sp@(PT_ORIG_D0),%d1 | 48 | movel %sp@(PT_OFF_ORIG_D0),%d1 |
| 49 | movel #-ENOSYS,%d0 | 49 | movel #-ENOSYS,%d0 |
| 50 | cmpl #NR_syscalls,%d1 | 50 | cmpl #NR_syscalls,%d1 |
| 51 | jcc 1f | 51 | jcc 1f |
| @@ -53,7 +53,7 @@ do_trace: | |||
| 53 | lea sys_call_table, %a0 | 53 | lea sys_call_table, %a0 |
| 54 | jbsr %a0@(%d1) | 54 | jbsr %a0@(%d1) |
| 55 | 55 | ||
| 56 | 1: movel %d0,%sp@(PT_D0) /* save the return value */ | 56 | 1: movel %d0,%sp@(PT_OFF_D0) /* save the return value */ |
| 57 | subql #4,%sp /* dummy return address */ | 57 | subql #4,%sp /* dummy return address */ |
| 58 | SAVE_SWITCH_STACK | 58 | SAVE_SWITCH_STACK |
| 59 | jbsr syscall_trace | 59 | jbsr syscall_trace |
| @@ -79,10 +79,10 @@ ENTRY(system_call) | |||
| 79 | lea sys_call_table,%a0 | 79 | lea sys_call_table,%a0 |
| 80 | movel %a0@(%d0), %a0 | 80 | movel %a0@(%d0), %a0 |
| 81 | jbsr %a0@ | 81 | jbsr %a0@ |
| 82 | movel %d0,%sp@(PT_D0) /* save the return value*/ | 82 | movel %d0,%sp@(PT_OFF_D0) /* save the return value*/ |
| 83 | 83 | ||
| 84 | ret_from_exception: | 84 | ret_from_exception: |
| 85 | btst #5,%sp@(PT_SR) /* check if returning to kernel*/ | 85 | btst #5,%sp@(PT_OFF_SR) /* check if returning to kernel*/ |
| 86 | jeq Luser_return /* if so, skip resched, signals*/ | 86 | jeq Luser_return /* if so, skip resched, signals*/ |
| 87 | 87 | ||
| 88 | Lkernel_return: | 88 | Lkernel_return: |
| @@ -124,7 +124,7 @@ Lreturn: | |||
| 124 | */ | 124 | */ |
| 125 | inthandler: | 125 | inthandler: |
| 126 | SAVE_ALL | 126 | SAVE_ALL |
| 127 | movew %sp@(PT_VECTOR), %d0 | 127 | movew %sp@(PT_OFF_VECTOR), %d0 |
| 128 | and.l #0x3ff, %d0 | 128 | and.l #0x3ff, %d0 |
| 129 | lsr.l #0x02, %d0 | 129 | lsr.l #0x02, %d0 |
| 130 | 130 | ||
| @@ -139,7 +139,7 @@ ret_from_interrupt: | |||
| 139 | 2: | 139 | 2: |
| 140 | RESTORE_ALL | 140 | RESTORE_ALL |
| 141 | 1: | 141 | 1: |
| 142 | moveb %sp@(PT_SR), %d0 | 142 | moveb %sp@(PT_OFF_SR), %d0 |
| 143 | and #7, %d0 | 143 | and #7, %d0 |
| 144 | jhi 2b | 144 | jhi 2b |
| 145 | /* check if we need to do software interrupts */ | 145 | /* check if we need to do software interrupts */ |
diff --git a/arch/m68knommu/platform/coldfire/entry.S b/arch/m68knommu/platform/coldfire/entry.S index 3b471c0da24a..dd7d591f70ea 100644 --- a/arch/m68knommu/platform/coldfire/entry.S +++ b/arch/m68knommu/platform/coldfire/entry.S | |||
| @@ -81,11 +81,11 @@ ENTRY(system_call) | |||
| 81 | 81 | ||
| 82 | movel %d3,%a0 | 82 | movel %d3,%a0 |
| 83 | jbsr %a0@ | 83 | jbsr %a0@ |
| 84 | movel %d0,%sp@(PT_D0) /* save the return value */ | 84 | movel %d0,%sp@(PT_OFF_D0) /* save the return value */ |
| 85 | jra ret_from_exception | 85 | jra ret_from_exception |
| 86 | 1: | 86 | 1: |
| 87 | movel #-ENOSYS,%d2 /* strace needs -ENOSYS in PT_D0 */ | 87 | movel #-ENOSYS,%d2 /* strace needs -ENOSYS in PT_OFF_D0 */ |
| 88 | movel %d2,PT_D0(%sp) /* on syscall entry */ | 88 | movel %d2,PT_OFF_D0(%sp) /* on syscall entry */ |
| 89 | subql #4,%sp | 89 | subql #4,%sp |
| 90 | SAVE_SWITCH_STACK | 90 | SAVE_SWITCH_STACK |
| 91 | jbsr syscall_trace | 91 | jbsr syscall_trace |
| @@ -93,7 +93,7 @@ ENTRY(system_call) | |||
| 93 | addql #4,%sp | 93 | addql #4,%sp |
| 94 | movel %d3,%a0 | 94 | movel %d3,%a0 |
| 95 | jbsr %a0@ | 95 | jbsr %a0@ |
| 96 | movel %d0,%sp@(PT_D0) /* save the return value */ | 96 | movel %d0,%sp@(PT_OFF_D0) /* save the return value */ |
| 97 | subql #4,%sp /* dummy return address */ | 97 | subql #4,%sp /* dummy return address */ |
| 98 | SAVE_SWITCH_STACK | 98 | SAVE_SWITCH_STACK |
| 99 | jbsr syscall_trace | 99 | jbsr syscall_trace |
| @@ -104,7 +104,7 @@ ret_from_signal: | |||
| 104 | 104 | ||
| 105 | ret_from_exception: | 105 | ret_from_exception: |
| 106 | move #0x2700,%sr /* disable intrs */ | 106 | move #0x2700,%sr /* disable intrs */ |
| 107 | btst #5,%sp@(PT_SR) /* check if returning to kernel */ | 107 | btst #5,%sp@(PT_OFF_SR) /* check if returning to kernel */ |
| 108 | jeq Luser_return /* if so, skip resched, signals */ | 108 | jeq Luser_return /* if so, skip resched, signals */ |
| 109 | 109 | ||
| 110 | #ifdef CONFIG_PREEMPT | 110 | #ifdef CONFIG_PREEMPT |
| @@ -142,8 +142,8 @@ Luser_return: | |||
| 142 | Lreturn: | 142 | Lreturn: |
| 143 | move #0x2700,%sr /* disable intrs */ | 143 | move #0x2700,%sr /* disable intrs */ |
| 144 | movel sw_usp,%a0 /* get usp */ | 144 | movel sw_usp,%a0 /* get usp */ |
| 145 | movel %sp@(PT_PC),%a0@- /* copy exception program counter */ | 145 | movel %sp@(PT_OFF_PC),%a0@- /* copy exception program counter */ |
| 146 | movel %sp@(PT_FORMATVEC),%a0@-/* copy exception format/vector/sr */ | 146 | movel %sp@(PT_OFF_FORMATVEC),%a0@- /* copy exception format/vector/sr */ |
| 147 | moveml %sp@,%d1-%d5/%a0-%a2 | 147 | moveml %sp@,%d1-%d5/%a0-%a2 |
| 148 | lea %sp@(32),%sp /* space for 8 regs */ | 148 | lea %sp@(32),%sp /* space for 8 regs */ |
| 149 | movel %sp@+,%d0 | 149 | movel %sp@+,%d0 |
| @@ -181,9 +181,9 @@ Lsignal_return: | |||
| 181 | ENTRY(inthandler) | 181 | ENTRY(inthandler) |
| 182 | SAVE_ALL | 182 | SAVE_ALL |
| 183 | moveq #-1,%d0 | 183 | moveq #-1,%d0 |
| 184 | movel %d0,%sp@(PT_ORIG_D0) | 184 | movel %d0,%sp@(PT_OFF_ORIG_D0) |
| 185 | 185 | ||
| 186 | movew %sp@(PT_FORMATVEC),%d0 /* put exception # in d0 */ | 186 | movew %sp@(PT_OFF_FORMATVEC),%d0 /* put exception # in d0 */ |
| 187 | andl #0x03fc,%d0 /* mask out vector only */ | 187 | andl #0x03fc,%d0 /* mask out vector only */ |
| 188 | 188 | ||
| 189 | movel %sp,%sp@- /* push regs arg */ | 189 | movel %sp,%sp@- /* push regs arg */ |
| @@ -203,7 +203,7 @@ ENTRY(inthandler) | |||
| 203 | ENTRY(fasthandler) | 203 | ENTRY(fasthandler) |
| 204 | SAVE_LOCAL | 204 | SAVE_LOCAL |
| 205 | 205 | ||
| 206 | movew %sp@(PT_FORMATVEC),%d0 | 206 | movew %sp@(PT_OFF_FORMATVEC),%d0 |
| 207 | andl #0x03fc,%d0 /* mask out vector only */ | 207 | andl #0x03fc,%d0 /* mask out vector only */ |
| 208 | 208 | ||
| 209 | movel %sp,%sp@- /* push regs arg */ | 209 | movel %sp,%sp@- /* push regs arg */ |
diff --git a/arch/microblaze/kernel/entry.S b/arch/microblaze/kernel/entry.S index acc1f05d1e2c..e3ecb36dd554 100644 --- a/arch/microblaze/kernel/entry.S +++ b/arch/microblaze/kernel/entry.S | |||
| @@ -592,6 +592,8 @@ C_ENTRY(full_exception_trap): | |||
| 592 | nop | 592 | nop |
| 593 | mfs r7, rfsr; /* save FSR */ | 593 | mfs r7, rfsr; /* save FSR */ |
| 594 | nop | 594 | nop |
| 595 | mts rfsr, r0; /* Clear sticky fsr */ | ||
| 596 | nop | ||
| 595 | la r12, r0, full_exception | 597 | la r12, r0, full_exception |
| 596 | set_vms; | 598 | set_vms; |
| 597 | rtbd r12, 0; | 599 | rtbd r12, 0; |
diff --git a/arch/microblaze/kernel/hw_exception_handler.S b/arch/microblaze/kernel/hw_exception_handler.S index 6b0288ebccd6..2b86c03aa841 100644 --- a/arch/microblaze/kernel/hw_exception_handler.S +++ b/arch/microblaze/kernel/hw_exception_handler.S | |||
| @@ -384,7 +384,7 @@ handle_other_ex: /* Handle Other exceptions here */ | |||
| 384 | addk r8, r17, r0; /* Load exception address */ | 384 | addk r8, r17, r0; /* Load exception address */ |
| 385 | bralid r15, full_exception; /* Branch to the handler */ | 385 | bralid r15, full_exception; /* Branch to the handler */ |
| 386 | nop; | 386 | nop; |
| 387 | mts r0, rfsr; /* Clear sticky fsr */ | 387 | mts rfsr, r0; /* Clear sticky fsr */ |
| 388 | nop | 388 | nop |
| 389 | 389 | ||
| 390 | /* | 390 | /* |
diff --git a/arch/microblaze/kernel/process.c b/arch/microblaze/kernel/process.c index 4201c743cc9f..c592d475b3d8 100644 --- a/arch/microblaze/kernel/process.c +++ b/arch/microblaze/kernel/process.c | |||
| @@ -235,7 +235,9 @@ void start_thread(struct pt_regs *regs, unsigned long pc, unsigned long usp) | |||
| 235 | regs->pc = pc; | 235 | regs->pc = pc; |
| 236 | regs->r1 = usp; | 236 | regs->r1 = usp; |
| 237 | regs->pt_mode = 0; | 237 | regs->pt_mode = 0; |
| 238 | #ifdef CONFIG_MMU | ||
| 238 | regs->msr |= MSR_UMS; | 239 | regs->msr |= MSR_UMS; |
| 240 | #endif | ||
| 239 | } | 241 | } |
| 240 | 242 | ||
| 241 | #ifdef CONFIG_MMU | 243 | #ifdef CONFIG_MMU |
diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig index f388dc68f605..524d9352f17e 100644 --- a/arch/parisc/Kconfig +++ b/arch/parisc/Kconfig | |||
| @@ -18,6 +18,7 @@ config PARISC | |||
| 18 | select BUG | 18 | select BUG |
| 19 | select HAVE_PERF_EVENTS | 19 | select HAVE_PERF_EVENTS |
| 20 | select GENERIC_ATOMIC64 if !64BIT | 20 | select GENERIC_ATOMIC64 if !64BIT |
| 21 | select HAVE_ARCH_TRACEHOOK | ||
| 21 | help | 22 | help |
| 22 | The PA-RISC microprocessor is designed by Hewlett-Packard and used | 23 | The PA-RISC microprocessor is designed by Hewlett-Packard and used |
| 23 | in many of their workstations & servers (HP9000 700 and 800 series, | 24 | in many of their workstations & servers (HP9000 700 and 800 series, |
diff --git a/arch/parisc/include/asm/fixmap.h b/arch/parisc/include/asm/fixmap.h index de3fe3a18229..6fec4d4a1a18 100644 --- a/arch/parisc/include/asm/fixmap.h +++ b/arch/parisc/include/asm/fixmap.h | |||
| @@ -21,9 +21,9 @@ | |||
| 21 | #define KERNEL_MAP_END (TMPALIAS_MAP_START) | 21 | #define KERNEL_MAP_END (TMPALIAS_MAP_START) |
| 22 | 22 | ||
| 23 | #ifndef __ASSEMBLY__ | 23 | #ifndef __ASSEMBLY__ |
| 24 | extern void *vmalloc_start; | 24 | extern void *parisc_vmalloc_start; |
| 25 | #define PCXL_DMA_MAP_SIZE (8*1024*1024) | 25 | #define PCXL_DMA_MAP_SIZE (8*1024*1024) |
| 26 | #define VMALLOC_START ((unsigned long)vmalloc_start) | 26 | #define VMALLOC_START ((unsigned long)parisc_vmalloc_start) |
| 27 | #define VMALLOC_END (KERNEL_MAP_END) | 27 | #define VMALLOC_END (KERNEL_MAP_END) |
| 28 | #endif /*__ASSEMBLY__*/ | 28 | #endif /*__ASSEMBLY__*/ |
| 29 | 29 | ||
diff --git a/arch/parisc/include/asm/hardirq.h b/arch/parisc/include/asm/hardirq.h index ce93133d5112..0d68184a76cb 100644 --- a/arch/parisc/include/asm/hardirq.h +++ b/arch/parisc/include/asm/hardirq.h | |||
| @@ -1,29 +1,11 @@ | |||
| 1 | /* hardirq.h: PA-RISC hard IRQ support. | 1 | /* hardirq.h: PA-RISC hard IRQ support. |
| 2 | * | 2 | * |
| 3 | * Copyright (C) 2001 Matthew Wilcox <matthew@wil.cx> | 3 | * Copyright (C) 2001 Matthew Wilcox <matthew@wil.cx> |
| 4 | * | ||
| 5 | * The locking is really quite interesting. There's a cpu-local | ||
| 6 | * count of how many interrupts are being handled, and a global | ||
| 7 | * lock. An interrupt can only be serviced if the global lock | ||
| 8 | * is free. You can't be sure no more interrupts are being | ||
| 9 | * serviced until you've acquired the lock and then checked | ||
| 10 | * all the per-cpu interrupt counts are all zero. It's a specialised | ||
| 11 | * br_lock, and that's exactly how Sparc does it. We don't because | ||
| 12 | * it's more locking for us. This way is lock-free in the interrupt path. | ||
| 13 | */ | 4 | */ |
| 14 | 5 | ||
| 15 | #ifndef _PARISC_HARDIRQ_H | 6 | #ifndef _PARISC_HARDIRQ_H |
| 16 | #define _PARISC_HARDIRQ_H | 7 | #define _PARISC_HARDIRQ_H |
| 17 | 8 | ||
| 18 | #include <linux/threads.h> | 9 | #include <asm-generic/hardirq.h> |
| 19 | #include <linux/irq.h> | ||
| 20 | |||
| 21 | typedef struct { | ||
| 22 | unsigned long __softirq_pending; /* set_bit is used on this */ | ||
| 23 | } ____cacheline_aligned irq_cpustat_t; | ||
| 24 | |||
| 25 | #include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */ | ||
| 26 | |||
| 27 | void ack_bad_irq(unsigned int irq); | ||
| 28 | 10 | ||
| 29 | #endif /* _PARISC_HARDIRQ_H */ | 11 | #endif /* _PARISC_HARDIRQ_H */ |
diff --git a/arch/parisc/include/asm/ptrace.h b/arch/parisc/include/asm/ptrace.h index 302f68dc889c..aead40b16dd8 100644 --- a/arch/parisc/include/asm/ptrace.h +++ b/arch/parisc/include/asm/ptrace.h | |||
| @@ -59,8 +59,11 @@ void user_enable_block_step(struct task_struct *task); | |||
| 59 | #define user_mode(regs) (((regs)->iaoq[0] & 3) ? 1 : 0) | 59 | #define user_mode(regs) (((regs)->iaoq[0] & 3) ? 1 : 0) |
| 60 | #define user_space(regs) (((regs)->iasq[1] != 0) ? 1 : 0) | 60 | #define user_space(regs) (((regs)->iasq[1] != 0) ? 1 : 0) |
| 61 | #define instruction_pointer(regs) ((regs)->iaoq[0] & ~3) | 61 | #define instruction_pointer(regs) ((regs)->iaoq[0] & ~3) |
| 62 | #define user_stack_pointer(regs) ((regs)->gr[30]) | ||
| 62 | unsigned long profile_pc(struct pt_regs *); | 63 | unsigned long profile_pc(struct pt_regs *); |
| 63 | extern void show_regs(struct pt_regs *); | 64 | extern void show_regs(struct pt_regs *); |
| 64 | #endif | 65 | |
| 66 | |||
| 67 | #endif /* __KERNEL__ */ | ||
| 65 | 68 | ||
| 66 | #endif | 69 | #endif |
diff --git a/arch/parisc/include/asm/syscall.h b/arch/parisc/include/asm/syscall.h new file mode 100644 index 000000000000..8bdfd2c8c39f --- /dev/null +++ b/arch/parisc/include/asm/syscall.h | |||
| @@ -0,0 +1,40 @@ | |||
| 1 | /* syscall.h */ | ||
| 2 | |||
| 3 | #ifndef _ASM_PARISC_SYSCALL_H_ | ||
| 4 | #define _ASM_PARISC_SYSCALL_H_ | ||
| 5 | |||
| 6 | #include <linux/err.h> | ||
| 7 | #include <asm/ptrace.h> | ||
| 8 | |||
| 9 | static inline long syscall_get_nr(struct task_struct *tsk, | ||
| 10 | struct pt_regs *regs) | ||
| 11 | { | ||
| 12 | return regs->gr[20]; | ||
| 13 | } | ||
| 14 | |||
| 15 | static inline void syscall_get_arguments(struct task_struct *tsk, | ||
| 16 | struct pt_regs *regs, unsigned int i, | ||
| 17 | unsigned int n, unsigned long *args) | ||
| 18 | { | ||
| 19 | BUG_ON(i); | ||
| 20 | |||
| 21 | switch (n) { | ||
| 22 | case 6: | ||
| 23 | args[5] = regs->gr[21]; | ||
| 24 | case 5: | ||
| 25 | args[4] = regs->gr[22]; | ||
| 26 | case 4: | ||
| 27 | args[3] = regs->gr[23]; | ||
| 28 | case 3: | ||
| 29 | args[2] = regs->gr[24]; | ||
| 30 | case 2: | ||
| 31 | args[1] = regs->gr[25]; | ||
| 32 | case 1: | ||
| 33 | args[0] = regs->gr[26]; | ||
| 34 | break; | ||
| 35 | default: | ||
| 36 | BUG(); | ||
| 37 | } | ||
| 38 | } | ||
| 39 | |||
| 40 | #endif /*_ASM_PARISC_SYSCALL_H_*/ | ||
diff --git a/arch/parisc/include/asm/thread_info.h b/arch/parisc/include/asm/thread_info.h index ac775a76bff7..7ecc1039cfed 100644 --- a/arch/parisc/include/asm/thread_info.h +++ b/arch/parisc/include/asm/thread_info.h | |||
| @@ -32,6 +32,11 @@ struct thread_info { | |||
| 32 | #define init_thread_info (init_thread_union.thread_info) | 32 | #define init_thread_info (init_thread_union.thread_info) |
| 33 | #define init_stack (init_thread_union.stack) | 33 | #define init_stack (init_thread_union.stack) |
| 34 | 34 | ||
| 35 | /* how to get the thread information struct from C */ | ||
| 36 | #define current_thread_info() ((struct thread_info *)mfctl(30)) | ||
| 37 | |||
| 38 | #endif /* !__ASSEMBLY */ | ||
| 39 | |||
| 35 | /* thread information allocation */ | 40 | /* thread information allocation */ |
| 36 | 41 | ||
| 37 | #define THREAD_SIZE_ORDER 2 | 42 | #define THREAD_SIZE_ORDER 2 |
| @@ -40,11 +45,6 @@ struct thread_info { | |||
| 40 | #define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER) | 45 | #define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER) |
| 41 | #define THREAD_SHIFT (PAGE_SHIFT + THREAD_SIZE_ORDER) | 46 | #define THREAD_SHIFT (PAGE_SHIFT + THREAD_SIZE_ORDER) |
| 42 | 47 | ||
| 43 | /* how to get the thread information struct from C */ | ||
| 44 | #define current_thread_info() ((struct thread_info *)mfctl(30)) | ||
| 45 | |||
| 46 | #endif /* !__ASSEMBLY */ | ||
| 47 | |||
| 48 | #define PREEMPT_ACTIVE_BIT 28 | 48 | #define PREEMPT_ACTIVE_BIT 28 |
| 49 | #define PREEMPT_ACTIVE (1 << PREEMPT_ACTIVE_BIT) | 49 | #define PREEMPT_ACTIVE (1 << PREEMPT_ACTIVE_BIT) |
| 50 | 50 | ||
| @@ -60,6 +60,8 @@ struct thread_info { | |||
| 60 | #define TIF_RESTORE_SIGMASK 6 /* restore saved signal mask */ | 60 | #define TIF_RESTORE_SIGMASK 6 /* restore saved signal mask */ |
| 61 | #define TIF_FREEZE 7 /* is freezing for suspend */ | 61 | #define TIF_FREEZE 7 /* is freezing for suspend */ |
| 62 | #define TIF_NOTIFY_RESUME 8 /* callback before returning to user */ | 62 | #define TIF_NOTIFY_RESUME 8 /* callback before returning to user */ |
| 63 | #define TIF_SINGLESTEP 9 /* single stepping? */ | ||
| 64 | #define TIF_BLOCKSTEP 10 /* branch stepping? */ | ||
| 63 | 65 | ||
| 64 | #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) | 66 | #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) |
| 65 | #define _TIF_SIGPENDING (1 << TIF_SIGPENDING) | 67 | #define _TIF_SIGPENDING (1 << TIF_SIGPENDING) |
| @@ -69,6 +71,8 @@ struct thread_info { | |||
| 69 | #define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK) | 71 | #define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK) |
| 70 | #define _TIF_FREEZE (1 << TIF_FREEZE) | 72 | #define _TIF_FREEZE (1 << TIF_FREEZE) |
| 71 | #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) | 73 | #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) |
| 74 | #define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP) | ||
| 75 | #define _TIF_BLOCKSTEP (1 << TIF_BLOCKSTEP) | ||
| 72 | 76 | ||
| 73 | #define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | \ | 77 | #define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | \ |
| 74 | _TIF_NEED_RESCHED | _TIF_RESTORE_SIGMASK) | 78 | _TIF_NEED_RESCHED | _TIF_RESTORE_SIGMASK) |
diff --git a/arch/parisc/kernel/asm-offsets.c b/arch/parisc/kernel/asm-offsets.c index 699cf8ef2118..fcd3c707bf12 100644 --- a/arch/parisc/kernel/asm-offsets.c +++ b/arch/parisc/kernel/asm-offsets.c | |||
| @@ -270,8 +270,8 @@ int main(void) | |||
| 270 | DEFINE(DTLB_OFF_COUNT, offsetof(struct pdc_cache_info, dt_off_count)); | 270 | DEFINE(DTLB_OFF_COUNT, offsetof(struct pdc_cache_info, dt_off_count)); |
| 271 | DEFINE(DTLB_LOOP, offsetof(struct pdc_cache_info, dt_loop)); | 271 | DEFINE(DTLB_LOOP, offsetof(struct pdc_cache_info, dt_loop)); |
| 272 | BLANK(); | 272 | BLANK(); |
| 273 | DEFINE(PA_BLOCKSTEP_BIT, 31-PT_BLOCKSTEP_BIT); | 273 | DEFINE(TIF_BLOCKSTEP_PA_BIT, 31-TIF_BLOCKSTEP); |
| 274 | DEFINE(PA_SINGLESTEP_BIT, 31-PT_SINGLESTEP_BIT); | 274 | DEFINE(TIF_SINGLESTEP_PA_BIT, 31-TIF_SINGLESTEP); |
| 275 | BLANK(); | 275 | BLANK(); |
| 276 | DEFINE(ASM_PMD_SHIFT, PMD_SHIFT); | 276 | DEFINE(ASM_PMD_SHIFT, PMD_SHIFT); |
| 277 | DEFINE(ASM_PGDIR_SHIFT, PGDIR_SHIFT); | 277 | DEFINE(ASM_PGDIR_SHIFT, PGDIR_SHIFT); |
diff --git a/arch/parisc/kernel/entry.S b/arch/parisc/kernel/entry.S index 8c4712b74dc1..3a44f7f704fa 100644 --- a/arch/parisc/kernel/entry.S +++ b/arch/parisc/kernel/entry.S | |||
| @@ -2047,12 +2047,13 @@ syscall_do_signal: | |||
| 2047 | b,n syscall_check_sig | 2047 | b,n syscall_check_sig |
| 2048 | 2048 | ||
| 2049 | syscall_restore: | 2049 | syscall_restore: |
| 2050 | /* Are we being ptraced? */ | ||
| 2051 | LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 | 2050 | LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 |
| 2052 | 2051 | ||
| 2053 | ldw TASK_PTRACE(%r1), %r19 | 2052 | /* Are we being ptraced? */ |
| 2054 | bb,< %r19,31,syscall_restore_rfi | 2053 | ldw TASK_FLAGS(%r1),%r19 |
| 2055 | nop | 2054 | ldi (_TIF_SINGLESTEP|_TIF_BLOCKSTEP),%r2 |
| 2055 | and,COND(=) %r19,%r2,%r0 | ||
| 2056 | b,n syscall_restore_rfi | ||
| 2056 | 2057 | ||
| 2057 | ldo TASK_PT_FR31(%r1),%r19 /* reload fpregs */ | 2058 | ldo TASK_PT_FR31(%r1),%r19 /* reload fpregs */ |
| 2058 | rest_fp %r19 | 2059 | rest_fp %r19 |
| @@ -2113,16 +2114,16 @@ syscall_restore_rfi: | |||
| 2113 | ldi 0x0b,%r20 /* Create new PSW */ | 2114 | ldi 0x0b,%r20 /* Create new PSW */ |
| 2114 | depi -1,13,1,%r20 /* C, Q, D, and I bits */ | 2115 | depi -1,13,1,%r20 /* C, Q, D, and I bits */ |
| 2115 | 2116 | ||
| 2116 | /* The values of PA_SINGLESTEP_BIT and PA_BLOCKSTEP_BIT are | 2117 | /* The values of SINGLESTEP_BIT and BLOCKSTEP_BIT are |
| 2117 | * set in include/linux/ptrace.h and converted to PA bitmap | 2118 | * set in thread_info.h and converted to PA bitmap |
| 2118 | * numbers in asm-offsets.c */ | 2119 | * numbers in asm-offsets.c */ |
| 2119 | 2120 | ||
| 2120 | /* if ((%r19.PA_SINGLESTEP_BIT)) { %r20.27=1} */ | 2121 | /* if ((%r19.SINGLESTEP_BIT)) { %r20.27=1} */ |
| 2121 | extru,= %r19,PA_SINGLESTEP_BIT,1,%r0 | 2122 | extru,= %r19,TIF_SINGLESTEP_PA_BIT,1,%r0 |
| 2122 | depi -1,27,1,%r20 /* R bit */ | 2123 | depi -1,27,1,%r20 /* R bit */ |
| 2123 | 2124 | ||
| 2124 | /* if ((%r19.PA_BLOCKSTEP_BIT)) { %r20.7=1} */ | 2125 | /* if ((%r19.BLOCKSTEP_BIT)) { %r20.7=1} */ |
| 2125 | extru,= %r19,PA_BLOCKSTEP_BIT,1,%r0 | 2126 | extru,= %r19,TIF_BLOCKSTEP_PA_BIT,1,%r0 |
| 2126 | depi -1,7,1,%r20 /* T bit */ | 2127 | depi -1,7,1,%r20 /* T bit */ |
| 2127 | 2128 | ||
| 2128 | STREG %r20,TASK_PT_PSW(%r1) | 2129 | STREG %r20,TASK_PT_PSW(%r1) |
diff --git a/arch/parisc/kernel/irq.c b/arch/parisc/kernel/irq.c index 330f536a9324..2e7610cb33d5 100644 --- a/arch/parisc/kernel/irq.c +++ b/arch/parisc/kernel/irq.c | |||
| @@ -423,8 +423,3 @@ void __init init_IRQ(void) | |||
| 423 | set_eiem(cpu_eiem); /* EIEM : enable all external intr */ | 423 | set_eiem(cpu_eiem); /* EIEM : enable all external intr */ |
| 424 | 424 | ||
| 425 | } | 425 | } |
| 426 | |||
| 427 | void ack_bad_irq(unsigned int irq) | ||
| 428 | { | ||
| 429 | printk(KERN_WARNING "unexpected IRQ %d\n", irq); | ||
| 430 | } | ||
diff --git a/arch/parisc/kernel/module.c b/arch/parisc/kernel/module.c index 61ee0eec4e69..212074653df7 100644 --- a/arch/parisc/kernel/module.c +++ b/arch/parisc/kernel/module.c | |||
| @@ -893,7 +893,7 @@ int module_finalize(const Elf_Ehdr *hdr, | |||
| 893 | * ourselves */ | 893 | * ourselves */ |
| 894 | for (i = 1; i < hdr->e_shnum; i++) { | 894 | for (i = 1; i < hdr->e_shnum; i++) { |
| 895 | if(sechdrs[i].sh_type == SHT_SYMTAB | 895 | if(sechdrs[i].sh_type == SHT_SYMTAB |
| 896 | && (sechdrs[i].sh_type & SHF_ALLOC)) { | 896 | && (sechdrs[i].sh_flags & SHF_ALLOC)) { |
| 897 | int strindex = sechdrs[i].sh_link; | 897 | int strindex = sechdrs[i].sh_link; |
| 898 | /* FIXME: AWFUL HACK | 898 | /* FIXME: AWFUL HACK |
| 899 | * The cast is to drop the const from | 899 | * The cast is to drop the const from |
diff --git a/arch/parisc/kernel/ptrace.c b/arch/parisc/kernel/ptrace.c index 927db3668b6f..c4f49e45129d 100644 --- a/arch/parisc/kernel/ptrace.c +++ b/arch/parisc/kernel/ptrace.c | |||
| @@ -13,6 +13,7 @@ | |||
| 13 | #include <linux/smp.h> | 13 | #include <linux/smp.h> |
| 14 | #include <linux/errno.h> | 14 | #include <linux/errno.h> |
| 15 | #include <linux/ptrace.h> | 15 | #include <linux/ptrace.h> |
| 16 | #include <linux/tracehook.h> | ||
| 16 | #include <linux/user.h> | 17 | #include <linux/user.h> |
| 17 | #include <linux/personality.h> | 18 | #include <linux/personality.h> |
| 18 | #include <linux/security.h> | 19 | #include <linux/security.h> |
| @@ -35,7 +36,8 @@ | |||
| 35 | */ | 36 | */ |
| 36 | void ptrace_disable(struct task_struct *task) | 37 | void ptrace_disable(struct task_struct *task) |
| 37 | { | 38 | { |
| 38 | task->ptrace &= ~(PT_SINGLESTEP|PT_BLOCKSTEP); | 39 | clear_tsk_thread_flag(task, TIF_SINGLESTEP); |
| 40 | clear_tsk_thread_flag(task, TIF_BLOCKSTEP); | ||
| 39 | 41 | ||
| 40 | /* make sure the trap bits are not set */ | 42 | /* make sure the trap bits are not set */ |
| 41 | pa_psw(task)->r = 0; | 43 | pa_psw(task)->r = 0; |
| @@ -55,8 +57,8 @@ void user_disable_single_step(struct task_struct *task) | |||
| 55 | 57 | ||
| 56 | void user_enable_single_step(struct task_struct *task) | 58 | void user_enable_single_step(struct task_struct *task) |
| 57 | { | 59 | { |
| 58 | task->ptrace &= ~PT_BLOCKSTEP; | 60 | clear_tsk_thread_flag(task, TIF_BLOCKSTEP); |
| 59 | task->ptrace |= PT_SINGLESTEP; | 61 | set_tsk_thread_flag(task, TIF_SINGLESTEP); |
| 60 | 62 | ||
| 61 | if (pa_psw(task)->n) { | 63 | if (pa_psw(task)->n) { |
| 62 | struct siginfo si; | 64 | struct siginfo si; |
| @@ -98,8 +100,8 @@ void user_enable_single_step(struct task_struct *task) | |||
| 98 | 100 | ||
| 99 | void user_enable_block_step(struct task_struct *task) | 101 | void user_enable_block_step(struct task_struct *task) |
| 100 | { | 102 | { |
| 101 | task->ptrace &= ~PT_SINGLESTEP; | 103 | clear_tsk_thread_flag(task, TIF_SINGLESTEP); |
| 102 | task->ptrace |= PT_BLOCKSTEP; | 104 | set_tsk_thread_flag(task, TIF_BLOCKSTEP); |
| 103 | 105 | ||
| 104 | /* Enable taken branch trap. */ | 106 | /* Enable taken branch trap. */ |
| 105 | pa_psw(task)->r = 0; | 107 | pa_psw(task)->r = 0; |
| @@ -263,22 +265,20 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request, | |||
| 263 | } | 265 | } |
| 264 | #endif | 266 | #endif |
| 265 | 267 | ||
| 268 | long do_syscall_trace_enter(struct pt_regs *regs) | ||
| 269 | { | ||
| 270 | if (test_thread_flag(TIF_SYSCALL_TRACE) && | ||
| 271 | tracehook_report_syscall_entry(regs)) | ||
| 272 | return -1L; | ||
| 273 | |||
| 274 | return regs->gr[20]; | ||
| 275 | } | ||
| 266 | 276 | ||
| 267 | void syscall_trace(void) | 277 | void do_syscall_trace_exit(struct pt_regs *regs) |
| 268 | { | 278 | { |
| 269 | if (!test_thread_flag(TIF_SYSCALL_TRACE)) | 279 | int stepping = test_thread_flag(TIF_SINGLESTEP) || |
| 270 | return; | 280 | test_thread_flag(TIF_BLOCKSTEP); |
| 271 | if (!(current->ptrace & PT_PTRACED)) | 281 | |
| 272 | return; | 282 | if (stepping || test_thread_flag(TIF_SYSCALL_TRACE)) |
| 273 | ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD) | 283 | tracehook_report_syscall_exit(regs, stepping); |
| 274 | ? 0x80 : 0)); | ||
| 275 | /* | ||
| 276 | * this isn't the same as continuing with a signal, but it will do | ||
| 277 | * for normal use. strace only continues with a signal if the | ||
| 278 | * stopping signal is not SIGTRAP. -brl | ||
| 279 | */ | ||
| 280 | if (current->exit_code) { | ||
| 281 | send_sig(current->exit_code, current, 1); | ||
| 282 | current->exit_code = 0; | ||
| 283 | } | ||
| 284 | } | 284 | } |
diff --git a/arch/parisc/kernel/signal.c b/arch/parisc/kernel/signal.c index 8eb3c63c407a..e8467e4aa8d1 100644 --- a/arch/parisc/kernel/signal.c +++ b/arch/parisc/kernel/signal.c | |||
| @@ -21,6 +21,7 @@ | |||
| 21 | #include <linux/errno.h> | 21 | #include <linux/errno.h> |
| 22 | #include <linux/wait.h> | 22 | #include <linux/wait.h> |
| 23 | #include <linux/ptrace.h> | 23 | #include <linux/ptrace.h> |
| 24 | #include <linux/tracehook.h> | ||
| 24 | #include <linux/unistd.h> | 25 | #include <linux/unistd.h> |
| 25 | #include <linux/stddef.h> | 26 | #include <linux/stddef.h> |
| 26 | #include <linux/compat.h> | 27 | #include <linux/compat.h> |
| @@ -34,7 +35,6 @@ | |||
| 34 | #include <asm/asm-offsets.h> | 35 | #include <asm/asm-offsets.h> |
| 35 | 36 | ||
| 36 | #ifdef CONFIG_COMPAT | 37 | #ifdef CONFIG_COMPAT |
| 37 | #include <linux/compat.h> | ||
| 38 | #include "signal32.h" | 38 | #include "signal32.h" |
| 39 | #endif | 39 | #endif |
| 40 | 40 | ||
| @@ -468,6 +468,9 @@ handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka, | |||
| 468 | sigaddset(¤t->blocked,sig); | 468 | sigaddset(¤t->blocked,sig); |
| 469 | recalc_sigpending(); | 469 | recalc_sigpending(); |
| 470 | spin_unlock_irq(¤t->sighand->siglock); | 470 | spin_unlock_irq(¤t->sighand->siglock); |
| 471 | |||
| 472 | tracehook_signal_handler(sig, info, ka, regs, 0); | ||
| 473 | |||
| 471 | return 1; | 474 | return 1; |
| 472 | } | 475 | } |
| 473 | 476 | ||
diff --git a/arch/parisc/kernel/syscall.S b/arch/parisc/kernel/syscall.S index 59fc1a43ec3e..f5f96021caa0 100644 --- a/arch/parisc/kernel/syscall.S +++ b/arch/parisc/kernel/syscall.S | |||
| @@ -288,18 +288,23 @@ tracesys: | |||
| 288 | STREG %r18,PT_GR18(%r2) | 288 | STREG %r18,PT_GR18(%r2) |
| 289 | /* Finished saving things for the debugger */ | 289 | /* Finished saving things for the debugger */ |
| 290 | 290 | ||
| 291 | ldil L%syscall_trace,%r1 | 291 | copy %r2,%r26 |
| 292 | ldil L%do_syscall_trace_enter,%r1 | ||
| 292 | ldil L%tracesys_next,%r2 | 293 | ldil L%tracesys_next,%r2 |
| 293 | be R%syscall_trace(%sr7,%r1) | 294 | be R%do_syscall_trace_enter(%sr7,%r1) |
| 294 | ldo R%tracesys_next(%r2),%r2 | 295 | ldo R%tracesys_next(%r2),%r2 |
| 295 | 296 | ||
| 296 | tracesys_next: | 297 | tracesys_next: |
| 298 | /* do_syscall_trace_enter either returned the syscallno, or -1L, | ||
| 299 | * so we skip restoring the PT_GR20 below, since we pulled it from | ||
| 300 | * task->thread.regs.gr[20] above. | ||
| 301 | */ | ||
| 302 | copy %ret0,%r20 | ||
| 297 | ldil L%sys_call_table,%r1 | 303 | ldil L%sys_call_table,%r1 |
| 298 | ldo R%sys_call_table(%r1), %r19 | 304 | ldo R%sys_call_table(%r1), %r19 |
| 299 | 305 | ||
| 300 | ldo -THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 /* get task ptr */ | 306 | ldo -THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 /* get task ptr */ |
| 301 | LDREG TI_TASK(%r1), %r1 | 307 | LDREG TI_TASK(%r1), %r1 |
| 302 | LDREG TASK_PT_GR20(%r1), %r20 | ||
| 303 | LDREG TASK_PT_GR26(%r1), %r26 /* Restore the users args */ | 308 | LDREG TASK_PT_GR26(%r1), %r26 /* Restore the users args */ |
| 304 | LDREG TASK_PT_GR25(%r1), %r25 | 309 | LDREG TASK_PT_GR25(%r1), %r25 |
| 305 | LDREG TASK_PT_GR24(%r1), %r24 | 310 | LDREG TASK_PT_GR24(%r1), %r24 |
| @@ -336,7 +341,8 @@ tracesys_exit: | |||
| 336 | #ifdef CONFIG_64BIT | 341 | #ifdef CONFIG_64BIT |
| 337 | ldo -16(%r30),%r29 /* Reference param save area */ | 342 | ldo -16(%r30),%r29 /* Reference param save area */ |
| 338 | #endif | 343 | #endif |
| 339 | bl syscall_trace, %r2 | 344 | ldo TASK_REGS(%r1),%r26 |
| 345 | bl do_syscall_trace_exit,%r2 | ||
| 340 | STREG %r28,TASK_PT_GR28(%r1) /* save return value now */ | 346 | STREG %r28,TASK_PT_GR28(%r1) /* save return value now */ |
| 341 | ldo -THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 /* get task ptr */ | 347 | ldo -THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 /* get task ptr */ |
| 342 | LDREG TI_TASK(%r1), %r1 | 348 | LDREG TI_TASK(%r1), %r1 |
| @@ -353,12 +359,12 @@ tracesys_exit: | |||
| 353 | 359 | ||
| 354 | tracesys_sigexit: | 360 | tracesys_sigexit: |
| 355 | ldo -THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 /* get task ptr */ | 361 | ldo -THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 /* get task ptr */ |
| 356 | LDREG 0(%r1), %r1 | 362 | LDREG TI_TASK(%r1), %r1 |
| 357 | #ifdef CONFIG_64BIT | 363 | #ifdef CONFIG_64BIT |
| 358 | ldo -16(%r30),%r29 /* Reference param save area */ | 364 | ldo -16(%r30),%r29 /* Reference param save area */ |
| 359 | #endif | 365 | #endif |
| 360 | bl syscall_trace, %r2 | 366 | bl do_syscall_trace_exit,%r2 |
| 361 | nop | 367 | ldo TASK_REGS(%r1),%r26 |
| 362 | 368 | ||
| 363 | ldil L%syscall_exit_rfi,%r1 | 369 | ldil L%syscall_exit_rfi,%r1 |
| 364 | be,n R%syscall_exit_rfi(%sr7,%r1) | 370 | be,n R%syscall_exit_rfi(%sr7,%r1) |
diff --git a/arch/parisc/kernel/vmlinux.lds.S b/arch/parisc/kernel/vmlinux.lds.S index 775be2791bc2..fda4baa059b5 100644 --- a/arch/parisc/kernel/vmlinux.lds.S +++ b/arch/parisc/kernel/vmlinux.lds.S | |||
| @@ -28,6 +28,7 @@ | |||
| 28 | #include <asm/cache.h> | 28 | #include <asm/cache.h> |
| 29 | #include <asm/page.h> | 29 | #include <asm/page.h> |
| 30 | #include <asm/asm-offsets.h> | 30 | #include <asm/asm-offsets.h> |
| 31 | #include <asm/thread_info.h> | ||
| 31 | 32 | ||
| 32 | /* ld script to make hppa Linux kernel */ | 33 | /* ld script to make hppa Linux kernel */ |
| 33 | #ifndef CONFIG_64BIT | 34 | #ifndef CONFIG_64BIT |
| @@ -134,6 +135,15 @@ SECTIONS | |||
| 134 | __init_begin = .; | 135 | __init_begin = .; |
| 135 | INIT_TEXT_SECTION(16384) | 136 | INIT_TEXT_SECTION(16384) |
| 136 | INIT_DATA_SECTION(16) | 137 | INIT_DATA_SECTION(16) |
| 138 | /* we have to discard exit text and such at runtime, not link time */ | ||
| 139 | .exit.text : | ||
| 140 | { | ||
| 141 | EXIT_TEXT | ||
| 142 | } | ||
| 143 | .exit.data : | ||
| 144 | { | ||
| 145 | EXIT_DATA | ||
| 146 | } | ||
| 137 | 147 | ||
| 138 | PERCPU(PAGE_SIZE) | 148 | PERCPU(PAGE_SIZE) |
| 139 | . = ALIGN(PAGE_SIZE); | 149 | . = ALIGN(PAGE_SIZE); |
diff --git a/arch/parisc/mm/init.c b/arch/parisc/mm/init.c index d5aca31fddbb..13b6e3e59b99 100644 --- a/arch/parisc/mm/init.c +++ b/arch/parisc/mm/init.c | |||
| @@ -434,8 +434,8 @@ void mark_rodata_ro(void) | |||
| 434 | #define SET_MAP_OFFSET(x) ((void *)(((unsigned long)(x) + VM_MAP_OFFSET) \ | 434 | #define SET_MAP_OFFSET(x) ((void *)(((unsigned long)(x) + VM_MAP_OFFSET) \ |
| 435 | & ~(VM_MAP_OFFSET-1))) | 435 | & ~(VM_MAP_OFFSET-1))) |
| 436 | 436 | ||
| 437 | void *vmalloc_start __read_mostly; | 437 | void *parisc_vmalloc_start __read_mostly; |
| 438 | EXPORT_SYMBOL(vmalloc_start); | 438 | EXPORT_SYMBOL(parisc_vmalloc_start); |
| 439 | 439 | ||
| 440 | #ifdef CONFIG_PA11 | 440 | #ifdef CONFIG_PA11 |
| 441 | unsigned long pcxl_dma_start __read_mostly; | 441 | unsigned long pcxl_dma_start __read_mostly; |
| @@ -496,13 +496,14 @@ void __init mem_init(void) | |||
| 496 | #ifdef CONFIG_PA11 | 496 | #ifdef CONFIG_PA11 |
| 497 | if (hppa_dma_ops == &pcxl_dma_ops) { | 497 | if (hppa_dma_ops == &pcxl_dma_ops) { |
| 498 | pcxl_dma_start = (unsigned long)SET_MAP_OFFSET(MAP_START); | 498 | pcxl_dma_start = (unsigned long)SET_MAP_OFFSET(MAP_START); |
| 499 | vmalloc_start = SET_MAP_OFFSET(pcxl_dma_start + PCXL_DMA_MAP_SIZE); | 499 | parisc_vmalloc_start = SET_MAP_OFFSET(pcxl_dma_start |
| 500 | + PCXL_DMA_MAP_SIZE); | ||
| 500 | } else { | 501 | } else { |
| 501 | pcxl_dma_start = 0; | 502 | pcxl_dma_start = 0; |
| 502 | vmalloc_start = SET_MAP_OFFSET(MAP_START); | 503 | parisc_vmalloc_start = SET_MAP_OFFSET(MAP_START); |
| 503 | } | 504 | } |
| 504 | #else | 505 | #else |
| 505 | vmalloc_start = SET_MAP_OFFSET(MAP_START); | 506 | parisc_vmalloc_start = SET_MAP_OFFSET(MAP_START); |
| 506 | #endif | 507 | #endif |
| 507 | 508 | ||
| 508 | printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, %dk reserved, %dk data, %dk init)\n", | 509 | printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, %dk reserved, %dk data, %dk init)\n", |
diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h index ec5eee7c25d8..06cce8285ba0 100644 --- a/arch/s390/kvm/kvm-s390.h +++ b/arch/s390/kvm/kvm-s390.h | |||
| @@ -58,7 +58,7 @@ int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu, | |||
| 58 | int kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code); | 58 | int kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code); |
| 59 | int kvm_s390_inject_sigp_stop(struct kvm_vcpu *vcpu, int action); | 59 | int kvm_s390_inject_sigp_stop(struct kvm_vcpu *vcpu, int action); |
| 60 | 60 | ||
| 61 | static inline int kvm_s390_vcpu_get_memsize(struct kvm_vcpu *vcpu) | 61 | static inline long kvm_s390_vcpu_get_memsize(struct kvm_vcpu *vcpu) |
| 62 | { | 62 | { |
| 63 | return vcpu->arch.sie_block->gmslm | 63 | return vcpu->arch.sie_block->gmslm |
| 64 | - vcpu->arch.sie_block->gmsor | 64 | - vcpu->arch.sie_block->gmsor |
diff --git a/arch/sparc/include/asm/hardirq_32.h b/arch/sparc/include/asm/hardirq_32.h index 4f63ed8df551..162007643cdc 100644 --- a/arch/sparc/include/asm/hardirq_32.h +++ b/arch/sparc/include/asm/hardirq_32.h | |||
| @@ -7,17 +7,7 @@ | |||
| 7 | #ifndef __SPARC_HARDIRQ_H | 7 | #ifndef __SPARC_HARDIRQ_H |
| 8 | #define __SPARC_HARDIRQ_H | 8 | #define __SPARC_HARDIRQ_H |
| 9 | 9 | ||
| 10 | #include <linux/threads.h> | ||
| 11 | #include <linux/spinlock.h> | ||
| 12 | #include <linux/cache.h> | ||
| 13 | |||
| 14 | /* entry.S is sensitive to the offsets of these fields */ /* XXX P3 Is it? */ | ||
| 15 | typedef struct { | ||
| 16 | unsigned int __softirq_pending; | ||
| 17 | } ____cacheline_aligned irq_cpustat_t; | ||
| 18 | |||
| 19 | #include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */ | ||
| 20 | |||
| 21 | #define HARDIRQ_BITS 8 | 10 | #define HARDIRQ_BITS 8 |
| 11 | #include <asm-generic/hardirq.h> | ||
| 22 | 12 | ||
| 23 | #endif /* __SPARC_HARDIRQ_H */ | 13 | #endif /* __SPARC_HARDIRQ_H */ |
diff --git a/arch/sparc/include/asm/irq_32.h b/arch/sparc/include/asm/irq_32.h index ea43057d4763..cbf4801deaaf 100644 --- a/arch/sparc/include/asm/irq_32.h +++ b/arch/sparc/include/asm/irq_32.h | |||
| @@ -6,10 +6,10 @@ | |||
| 6 | #ifndef _SPARC_IRQ_H | 6 | #ifndef _SPARC_IRQ_H |
| 7 | #define _SPARC_IRQ_H | 7 | #define _SPARC_IRQ_H |
| 8 | 8 | ||
| 9 | #include <linux/interrupt.h> | ||
| 10 | |||
| 11 | #define NR_IRQS 16 | 9 | #define NR_IRQS 16 |
| 12 | 10 | ||
| 11 | #include <linux/interrupt.h> | ||
| 12 | |||
| 13 | #define irq_canonicalize(irq) (irq) | 13 | #define irq_canonicalize(irq) (irq) |
| 14 | 14 | ||
| 15 | extern void __init init_IRQ(void); | 15 | extern void __init init_IRQ(void); |
diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h index 0ff92fa22064..f3cb790fa2ae 100644 --- a/arch/sparc/include/asm/pgtable_64.h +++ b/arch/sparc/include/asm/pgtable_64.h | |||
| @@ -41,8 +41,8 @@ | |||
| 41 | #define LOW_OBP_ADDRESS _AC(0x00000000f0000000,UL) | 41 | #define LOW_OBP_ADDRESS _AC(0x00000000f0000000,UL) |
| 42 | #define HI_OBP_ADDRESS _AC(0x0000000100000000,UL) | 42 | #define HI_OBP_ADDRESS _AC(0x0000000100000000,UL) |
| 43 | #define VMALLOC_START _AC(0x0000000100000000,UL) | 43 | #define VMALLOC_START _AC(0x0000000100000000,UL) |
| 44 | #define VMALLOC_END _AC(0x0000000200000000,UL) | 44 | #define VMALLOC_END _AC(0x0000010000000000,UL) |
| 45 | #define VMEMMAP_BASE _AC(0x0000000200000000,UL) | 45 | #define VMEMMAP_BASE _AC(0x0000010000000000,UL) |
| 46 | 46 | ||
| 47 | #define vmemmap ((struct page *)VMEMMAP_BASE) | 47 | #define vmemmap ((struct page *)VMEMMAP_BASE) |
| 48 | 48 | ||
diff --git a/arch/sparc/kernel/ktlb.S b/arch/sparc/kernel/ktlb.S index 3ea6e8cde8c5..1d361477d7d6 100644 --- a/arch/sparc/kernel/ktlb.S +++ b/arch/sparc/kernel/ktlb.S | |||
| @@ -280,8 +280,8 @@ kvmap_dtlb_nonlinear: | |||
| 280 | 280 | ||
| 281 | #ifdef CONFIG_SPARSEMEM_VMEMMAP | 281 | #ifdef CONFIG_SPARSEMEM_VMEMMAP |
| 282 | /* Do not use the TSB for vmemmap. */ | 282 | /* Do not use the TSB for vmemmap. */ |
| 283 | mov (VMEMMAP_BASE >> 24), %g5 | 283 | mov (VMEMMAP_BASE >> 40), %g5 |
| 284 | sllx %g5, 24, %g5 | 284 | sllx %g5, 40, %g5 |
| 285 | cmp %g4,%g5 | 285 | cmp %g4,%g5 |
| 286 | bgeu,pn %xcc, kvmap_vmemmap | 286 | bgeu,pn %xcc, kvmap_vmemmap |
| 287 | nop | 287 | nop |
| @@ -293,8 +293,8 @@ kvmap_dtlb_tsbmiss: | |||
| 293 | sethi %hi(MODULES_VADDR), %g5 | 293 | sethi %hi(MODULES_VADDR), %g5 |
| 294 | cmp %g4, %g5 | 294 | cmp %g4, %g5 |
| 295 | blu,pn %xcc, kvmap_dtlb_longpath | 295 | blu,pn %xcc, kvmap_dtlb_longpath |
| 296 | mov (VMALLOC_END >> 24), %g5 | 296 | mov (VMALLOC_END >> 40), %g5 |
| 297 | sllx %g5, 24, %g5 | 297 | sllx %g5, 40, %g5 |
| 298 | cmp %g4, %g5 | 298 | cmp %g4, %g5 |
| 299 | bgeu,pn %xcc, kvmap_dtlb_longpath | 299 | bgeu,pn %xcc, kvmap_dtlb_longpath |
| 300 | nop | 300 | nop |
diff --git a/arch/sparc/kernel/perf_event.c b/arch/sparc/kernel/perf_event.c index 2d6a1b10c81d..04db92743896 100644 --- a/arch/sparc/kernel/perf_event.c +++ b/arch/sparc/kernel/perf_event.c | |||
| @@ -56,7 +56,8 @@ struct cpu_hw_events { | |||
| 56 | struct perf_event *events[MAX_HWEVENTS]; | 56 | struct perf_event *events[MAX_HWEVENTS]; |
| 57 | unsigned long used_mask[BITS_TO_LONGS(MAX_HWEVENTS)]; | 57 | unsigned long used_mask[BITS_TO_LONGS(MAX_HWEVENTS)]; |
| 58 | unsigned long active_mask[BITS_TO_LONGS(MAX_HWEVENTS)]; | 58 | unsigned long active_mask[BITS_TO_LONGS(MAX_HWEVENTS)]; |
| 59 | int enabled; | 59 | u64 pcr; |
| 60 | int enabled; | ||
| 60 | }; | 61 | }; |
| 61 | DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = { .enabled = 1, }; | 62 | DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = { .enabled = 1, }; |
| 62 | 63 | ||
| @@ -68,8 +69,30 @@ struct perf_event_map { | |||
| 68 | #define PIC_LOWER 0x02 | 69 | #define PIC_LOWER 0x02 |
| 69 | }; | 70 | }; |
| 70 | 71 | ||
| 72 | static unsigned long perf_event_encode(const struct perf_event_map *pmap) | ||
| 73 | { | ||
| 74 | return ((unsigned long) pmap->encoding << 16) | pmap->pic_mask; | ||
| 75 | } | ||
| 76 | |||
| 77 | static void perf_event_decode(unsigned long val, u16 *enc, u8 *msk) | ||
| 78 | { | ||
| 79 | *msk = val & 0xff; | ||
| 80 | *enc = val >> 16; | ||
| 81 | } | ||
| 82 | |||
| 83 | #define C(x) PERF_COUNT_HW_CACHE_##x | ||
| 84 | |||
| 85 | #define CACHE_OP_UNSUPPORTED 0xfffe | ||
| 86 | #define CACHE_OP_NONSENSE 0xffff | ||
| 87 | |||
| 88 | typedef struct perf_event_map cache_map_t | ||
| 89 | [PERF_COUNT_HW_CACHE_MAX] | ||
| 90 | [PERF_COUNT_HW_CACHE_OP_MAX] | ||
| 91 | [PERF_COUNT_HW_CACHE_RESULT_MAX]; | ||
| 92 | |||
| 71 | struct sparc_pmu { | 93 | struct sparc_pmu { |
| 72 | const struct perf_event_map *(*event_map)(int); | 94 | const struct perf_event_map *(*event_map)(int); |
| 95 | const cache_map_t *cache_map; | ||
| 73 | int max_events; | 96 | int max_events; |
| 74 | int upper_shift; | 97 | int upper_shift; |
| 75 | int lower_shift; | 98 | int lower_shift; |
| @@ -80,21 +103,109 @@ struct sparc_pmu { | |||
| 80 | int lower_nop; | 103 | int lower_nop; |
| 81 | }; | 104 | }; |
| 82 | 105 | ||
| 83 | static const struct perf_event_map ultra3i_perfmon_event_map[] = { | 106 | static const struct perf_event_map ultra3_perfmon_event_map[] = { |
| 84 | [PERF_COUNT_HW_CPU_CYCLES] = { 0x0000, PIC_UPPER | PIC_LOWER }, | 107 | [PERF_COUNT_HW_CPU_CYCLES] = { 0x0000, PIC_UPPER | PIC_LOWER }, |
| 85 | [PERF_COUNT_HW_INSTRUCTIONS] = { 0x0001, PIC_UPPER | PIC_LOWER }, | 108 | [PERF_COUNT_HW_INSTRUCTIONS] = { 0x0001, PIC_UPPER | PIC_LOWER }, |
| 86 | [PERF_COUNT_HW_CACHE_REFERENCES] = { 0x0009, PIC_LOWER }, | 109 | [PERF_COUNT_HW_CACHE_REFERENCES] = { 0x0009, PIC_LOWER }, |
| 87 | [PERF_COUNT_HW_CACHE_MISSES] = { 0x0009, PIC_UPPER }, | 110 | [PERF_COUNT_HW_CACHE_MISSES] = { 0x0009, PIC_UPPER }, |
| 88 | }; | 111 | }; |
| 89 | 112 | ||
| 90 | static const struct perf_event_map *ultra3i_event_map(int event_id) | 113 | static const struct perf_event_map *ultra3_event_map(int event_id) |
| 91 | { | 114 | { |
| 92 | return &ultra3i_perfmon_event_map[event_id]; | 115 | return &ultra3_perfmon_event_map[event_id]; |
| 93 | } | 116 | } |
| 94 | 117 | ||
| 95 | static const struct sparc_pmu ultra3i_pmu = { | 118 | static const cache_map_t ultra3_cache_map = { |
| 96 | .event_map = ultra3i_event_map, | 119 | [C(L1D)] = { |
| 97 | .max_events = ARRAY_SIZE(ultra3i_perfmon_event_map), | 120 | [C(OP_READ)] = { |
| 121 | [C(RESULT_ACCESS)] = { 0x09, PIC_LOWER, }, | ||
| 122 | [C(RESULT_MISS)] = { 0x09, PIC_UPPER, }, | ||
| 123 | }, | ||
| 124 | [C(OP_WRITE)] = { | ||
| 125 | [C(RESULT_ACCESS)] = { 0x0a, PIC_LOWER }, | ||
| 126 | [C(RESULT_MISS)] = { 0x0a, PIC_UPPER }, | ||
| 127 | }, | ||
| 128 | [C(OP_PREFETCH)] = { | ||
| 129 | [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, | ||
| 130 | [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED }, | ||
| 131 | }, | ||
| 132 | }, | ||
| 133 | [C(L1I)] = { | ||
| 134 | [C(OP_READ)] = { | ||
| 135 | [C(RESULT_ACCESS)] = { 0x09, PIC_LOWER, }, | ||
| 136 | [C(RESULT_MISS)] = { 0x09, PIC_UPPER, }, | ||
| 137 | }, | ||
| 138 | [ C(OP_WRITE) ] = { | ||
| 139 | [ C(RESULT_ACCESS) ] = { CACHE_OP_NONSENSE }, | ||
| 140 | [ C(RESULT_MISS) ] = { CACHE_OP_NONSENSE }, | ||
| 141 | }, | ||
| 142 | [ C(OP_PREFETCH) ] = { | ||
| 143 | [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, | ||
| 144 | [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, | ||
| 145 | }, | ||
| 146 | }, | ||
| 147 | [C(LL)] = { | ||
| 148 | [C(OP_READ)] = { | ||
| 149 | [C(RESULT_ACCESS)] = { 0x0c, PIC_LOWER, }, | ||
| 150 | [C(RESULT_MISS)] = { 0x0c, PIC_UPPER, }, | ||
| 151 | }, | ||
| 152 | [C(OP_WRITE)] = { | ||
| 153 | [C(RESULT_ACCESS)] = { 0x0c, PIC_LOWER }, | ||
| 154 | [C(RESULT_MISS)] = { 0x0c, PIC_UPPER }, | ||
| 155 | }, | ||
| 156 | [C(OP_PREFETCH)] = { | ||
| 157 | [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, | ||
| 158 | [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED }, | ||
| 159 | }, | ||
| 160 | }, | ||
| 161 | [C(DTLB)] = { | ||
| 162 | [C(OP_READ)] = { | ||
| 163 | [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, | ||
| 164 | [C(RESULT_MISS)] = { 0x12, PIC_UPPER, }, | ||
| 165 | }, | ||
| 166 | [ C(OP_WRITE) ] = { | ||
| 167 | [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, | ||
| 168 | [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, | ||
| 169 | }, | ||
| 170 | [ C(OP_PREFETCH) ] = { | ||
| 171 | [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, | ||
| 172 | [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, | ||
| 173 | }, | ||
| 174 | }, | ||
| 175 | [C(ITLB)] = { | ||
| 176 | [C(OP_READ)] = { | ||
| 177 | [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, | ||
| 178 | [C(RESULT_MISS)] = { 0x11, PIC_UPPER, }, | ||
| 179 | }, | ||
| 180 | [ C(OP_WRITE) ] = { | ||
| 181 | [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, | ||
| 182 | [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, | ||
| 183 | }, | ||
| 184 | [ C(OP_PREFETCH) ] = { | ||
| 185 | [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, | ||
| 186 | [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, | ||
| 187 | }, | ||
| 188 | }, | ||
| 189 | [C(BPU)] = { | ||
| 190 | [C(OP_READ)] = { | ||
| 191 | [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, | ||
| 192 | [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED }, | ||
| 193 | }, | ||
| 194 | [ C(OP_WRITE) ] = { | ||
| 195 | [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, | ||
| 196 | [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, | ||
| 197 | }, | ||
| 198 | [ C(OP_PREFETCH) ] = { | ||
| 199 | [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, | ||
| 200 | [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, | ||
| 201 | }, | ||
| 202 | }, | ||
| 203 | }; | ||
| 204 | |||
| 205 | static const struct sparc_pmu ultra3_pmu = { | ||
| 206 | .event_map = ultra3_event_map, | ||
| 207 | .cache_map = &ultra3_cache_map, | ||
| 208 | .max_events = ARRAY_SIZE(ultra3_perfmon_event_map), | ||
| 98 | .upper_shift = 11, | 209 | .upper_shift = 11, |
| 99 | .lower_shift = 4, | 210 | .lower_shift = 4, |
| 100 | .event_mask = 0x3f, | 211 | .event_mask = 0x3f, |
| @@ -102,6 +213,121 @@ static const struct sparc_pmu ultra3i_pmu = { | |||
| 102 | .lower_nop = 0x14, | 213 | .lower_nop = 0x14, |
| 103 | }; | 214 | }; |
| 104 | 215 | ||
| 216 | /* Niagara1 is very limited. The upper PIC is hard-locked to count | ||
| 217 | * only instructions, so it is free running which creates all kinds of | ||
| 218 | * problems. Some hardware designs make one wonder if the creator | ||
| 219 | * even looked at how this stuff gets used by software. | ||
| 220 | */ | ||
| 221 | static const struct perf_event_map niagara1_perfmon_event_map[] = { | ||
| 222 | [PERF_COUNT_HW_CPU_CYCLES] = { 0x00, PIC_UPPER }, | ||
| 223 | [PERF_COUNT_HW_INSTRUCTIONS] = { 0x00, PIC_UPPER }, | ||
| 224 | [PERF_COUNT_HW_CACHE_REFERENCES] = { 0, PIC_NONE }, | ||
| 225 | [PERF_COUNT_HW_CACHE_MISSES] = { 0x03, PIC_LOWER }, | ||
| 226 | }; | ||
| 227 | |||
| 228 | static const struct perf_event_map *niagara1_event_map(int event_id) | ||
| 229 | { | ||
| 230 | return &niagara1_perfmon_event_map[event_id]; | ||
| 231 | } | ||
| 232 | |||
| 233 | static const cache_map_t niagara1_cache_map = { | ||
| 234 | [C(L1D)] = { | ||
| 235 | [C(OP_READ)] = { | ||
| 236 | [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, | ||
| 237 | [C(RESULT_MISS)] = { 0x03, PIC_LOWER, }, | ||
| 238 | }, | ||
| 239 | [C(OP_WRITE)] = { | ||
| 240 | [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, | ||
| 241 | [C(RESULT_MISS)] = { 0x03, PIC_LOWER, }, | ||
| 242 | }, | ||
| 243 | [C(OP_PREFETCH)] = { | ||
| 244 | [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, | ||
| 245 | [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED }, | ||
| 246 | }, | ||
| 247 | }, | ||
| 248 | [C(L1I)] = { | ||
| 249 | [C(OP_READ)] = { | ||
| 250 | [C(RESULT_ACCESS)] = { 0x00, PIC_UPPER }, | ||
| 251 | [C(RESULT_MISS)] = { 0x02, PIC_LOWER, }, | ||
| 252 | }, | ||
| 253 | [ C(OP_WRITE) ] = { | ||
| 254 | [ C(RESULT_ACCESS) ] = { CACHE_OP_NONSENSE }, | ||
| 255 | [ C(RESULT_MISS) ] = { CACHE_OP_NONSENSE }, | ||
| 256 | }, | ||
| 257 | [ C(OP_PREFETCH) ] = { | ||
| 258 | [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, | ||
| 259 | [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, | ||
| 260 | }, | ||
| 261 | }, | ||
| 262 | [C(LL)] = { | ||
| 263 | [C(OP_READ)] = { | ||
| 264 | [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, | ||
| 265 | [C(RESULT_MISS)] = { 0x07, PIC_LOWER, }, | ||
| 266 | }, | ||
| 267 | [C(OP_WRITE)] = { | ||
| 268 | [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, | ||
| 269 | [C(RESULT_MISS)] = { 0x07, PIC_LOWER, }, | ||
| 270 | }, | ||
| 271 | [C(OP_PREFETCH)] = { | ||
| 272 | [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, | ||
| 273 | [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED }, | ||
| 274 | }, | ||
| 275 | }, | ||
| 276 | [C(DTLB)] = { | ||
| 277 | [C(OP_READ)] = { | ||
| 278 | [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, | ||
| 279 | [C(RESULT_MISS)] = { 0x05, PIC_LOWER, }, | ||
| 280 | }, | ||
| 281 | [ C(OP_WRITE) ] = { | ||
| 282 | [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, | ||
| 283 | [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, | ||
| 284 | }, | ||
| 285 | [ C(OP_PREFETCH) ] = { | ||
| 286 | [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, | ||
| 287 | [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, | ||
| 288 | }, | ||
| 289 | }, | ||
| 290 | [C(ITLB)] = { | ||
| 291 | [C(OP_READ)] = { | ||
| 292 | [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, | ||
| 293 | [C(RESULT_MISS)] = { 0x04, PIC_LOWER, }, | ||
| 294 | }, | ||
| 295 | [ C(OP_WRITE) ] = { | ||
| 296 | [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, | ||
| 297 | [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, | ||
| 298 | }, | ||
| 299 | [ C(OP_PREFETCH) ] = { | ||
| 300 | [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, | ||
| 301 | [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, | ||
| 302 | }, | ||
| 303 | }, | ||
| 304 | [C(BPU)] = { | ||
| 305 | [C(OP_READ)] = { | ||
| 306 | [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, | ||
| 307 | [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED }, | ||
| 308 | }, | ||
| 309 | [ C(OP_WRITE) ] = { | ||
| 310 | [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, | ||
| 311 | [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, | ||
| 312 | }, | ||
| 313 | [ C(OP_PREFETCH) ] = { | ||
| 314 | [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, | ||
| 315 | [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, | ||
| 316 | }, | ||
| 317 | }, | ||
| 318 | }; | ||
| 319 | |||
| 320 | static const struct sparc_pmu niagara1_pmu = { | ||
| 321 | .event_map = niagara1_event_map, | ||
| 322 | .cache_map = &niagara1_cache_map, | ||
| 323 | .max_events = ARRAY_SIZE(niagara1_perfmon_event_map), | ||
| 324 | .upper_shift = 0, | ||
| 325 | .lower_shift = 4, | ||
| 326 | .event_mask = 0x7, | ||
| 327 | .upper_nop = 0x0, | ||
| 328 | .lower_nop = 0x0, | ||
| 329 | }; | ||
| 330 | |||
| 105 | static const struct perf_event_map niagara2_perfmon_event_map[] = { | 331 | static const struct perf_event_map niagara2_perfmon_event_map[] = { |
| 106 | [PERF_COUNT_HW_CPU_CYCLES] = { 0x02ff, PIC_UPPER | PIC_LOWER }, | 332 | [PERF_COUNT_HW_CPU_CYCLES] = { 0x02ff, PIC_UPPER | PIC_LOWER }, |
| 107 | [PERF_COUNT_HW_INSTRUCTIONS] = { 0x02ff, PIC_UPPER | PIC_LOWER }, | 333 | [PERF_COUNT_HW_INSTRUCTIONS] = { 0x02ff, PIC_UPPER | PIC_LOWER }, |
| @@ -116,8 +342,96 @@ static const struct perf_event_map *niagara2_event_map(int event_id) | |||
| 116 | return &niagara2_perfmon_event_map[event_id]; | 342 | return &niagara2_perfmon_event_map[event_id]; |
| 117 | } | 343 | } |
| 118 | 344 | ||
| 345 | static const cache_map_t niagara2_cache_map = { | ||
| 346 | [C(L1D)] = { | ||
| 347 | [C(OP_READ)] = { | ||
| 348 | [C(RESULT_ACCESS)] = { 0x0208, PIC_UPPER | PIC_LOWER, }, | ||
| 349 | [C(RESULT_MISS)] = { 0x0302, PIC_UPPER | PIC_LOWER, }, | ||
| 350 | }, | ||
| 351 | [C(OP_WRITE)] = { | ||
| 352 | [C(RESULT_ACCESS)] = { 0x0210, PIC_UPPER | PIC_LOWER, }, | ||
| 353 | [C(RESULT_MISS)] = { 0x0302, PIC_UPPER | PIC_LOWER, }, | ||
| 354 | }, | ||
| 355 | [C(OP_PREFETCH)] = { | ||
| 356 | [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, | ||
| 357 | [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED }, | ||
| 358 | }, | ||
| 359 | }, | ||
| 360 | [C(L1I)] = { | ||
| 361 | [C(OP_READ)] = { | ||
| 362 | [C(RESULT_ACCESS)] = { 0x02ff, PIC_UPPER | PIC_LOWER, }, | ||
| 363 | [C(RESULT_MISS)] = { 0x0301, PIC_UPPER | PIC_LOWER, }, | ||
| 364 | }, | ||
| 365 | [ C(OP_WRITE) ] = { | ||
| 366 | [ C(RESULT_ACCESS) ] = { CACHE_OP_NONSENSE }, | ||
| 367 | [ C(RESULT_MISS) ] = { CACHE_OP_NONSENSE }, | ||
| 368 | }, | ||
| 369 | [ C(OP_PREFETCH) ] = { | ||
| 370 | [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, | ||
| 371 | [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, | ||
| 372 | }, | ||
| 373 | }, | ||
| 374 | [C(LL)] = { | ||
| 375 | [C(OP_READ)] = { | ||
| 376 | [C(RESULT_ACCESS)] = { 0x0208, PIC_UPPER | PIC_LOWER, }, | ||
| 377 | [C(RESULT_MISS)] = { 0x0330, PIC_UPPER | PIC_LOWER, }, | ||
| 378 | }, | ||
| 379 | [C(OP_WRITE)] = { | ||
| 380 | [C(RESULT_ACCESS)] = { 0x0210, PIC_UPPER | PIC_LOWER, }, | ||
| 381 | [C(RESULT_MISS)] = { 0x0320, PIC_UPPER | PIC_LOWER, }, | ||
| 382 | }, | ||
| 383 | [C(OP_PREFETCH)] = { | ||
| 384 | [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, | ||
| 385 | [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED }, | ||
| 386 | }, | ||
| 387 | }, | ||
| 388 | [C(DTLB)] = { | ||
| 389 | [C(OP_READ)] = { | ||
| 390 | [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, | ||
| 391 | [C(RESULT_MISS)] = { 0x0b08, PIC_UPPER | PIC_LOWER, }, | ||
| 392 | }, | ||
| 393 | [ C(OP_WRITE) ] = { | ||
| 394 | [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, | ||
| 395 | [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, | ||
| 396 | }, | ||
| 397 | [ C(OP_PREFETCH) ] = { | ||
| 398 | [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, | ||
| 399 | [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, | ||
| 400 | }, | ||
| 401 | }, | ||
| 402 | [C(ITLB)] = { | ||
| 403 | [C(OP_READ)] = { | ||
| 404 | [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, | ||
| 405 | [C(RESULT_MISS)] = { 0xb04, PIC_UPPER | PIC_LOWER, }, | ||
| 406 | }, | ||
| 407 | [ C(OP_WRITE) ] = { | ||
| 408 | [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, | ||
| 409 | [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, | ||
| 410 | }, | ||
| 411 | [ C(OP_PREFETCH) ] = { | ||
| 412 | [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, | ||
| 413 | [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, | ||
| 414 | }, | ||
| 415 | }, | ||
| 416 | [C(BPU)] = { | ||
| 417 | [C(OP_READ)] = { | ||
| 418 | [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, | ||
| 419 | [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED }, | ||
| 420 | }, | ||
| 421 | [ C(OP_WRITE) ] = { | ||
| 422 | [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, | ||
| 423 | [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, | ||
| 424 | }, | ||
| 425 | [ C(OP_PREFETCH) ] = { | ||
| 426 | [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, | ||
| 427 | [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, | ||
| 428 | }, | ||
| 429 | }, | ||
| 430 | }; | ||
| 431 | |||
| 119 | static const struct sparc_pmu niagara2_pmu = { | 432 | static const struct sparc_pmu niagara2_pmu = { |
| 120 | .event_map = niagara2_event_map, | 433 | .event_map = niagara2_event_map, |
| 434 | .cache_map = &niagara2_cache_map, | ||
| 121 | .max_events = ARRAY_SIZE(niagara2_perfmon_event_map), | 435 | .max_events = ARRAY_SIZE(niagara2_perfmon_event_map), |
| 122 | .upper_shift = 19, | 436 | .upper_shift = 19, |
| 123 | .lower_shift = 6, | 437 | .lower_shift = 6, |
| @@ -151,23 +465,30 @@ static u64 nop_for_index(int idx) | |||
| 151 | sparc_pmu->lower_nop, idx); | 465 | sparc_pmu->lower_nop, idx); |
| 152 | } | 466 | } |
| 153 | 467 | ||
| 154 | static inline void sparc_pmu_enable_event(struct hw_perf_event *hwc, | 468 | static inline void sparc_pmu_enable_event(struct cpu_hw_events *cpuc, struct hw_perf_event *hwc, int idx) |
| 155 | int idx) | ||
| 156 | { | 469 | { |
| 157 | u64 val, mask = mask_for_index(idx); | 470 | u64 val, mask = mask_for_index(idx); |
| 158 | 471 | ||
| 159 | val = pcr_ops->read(); | 472 | val = cpuc->pcr; |
| 160 | pcr_ops->write((val & ~mask) | hwc->config); | 473 | val &= ~mask; |
| 474 | val |= hwc->config; | ||
| 475 | cpuc->pcr = val; | ||
| 476 | |||
| 477 | pcr_ops->write(cpuc->pcr); | ||
| 161 | } | 478 | } |
| 162 | 479 | ||
| 163 | static inline void sparc_pmu_disable_event(struct hw_perf_event *hwc, | 480 | static inline void sparc_pmu_disable_event(struct cpu_hw_events *cpuc, struct hw_perf_event *hwc, int idx) |
| 164 | int idx) | ||
| 165 | { | 481 | { |
| 166 | u64 mask = mask_for_index(idx); | 482 | u64 mask = mask_for_index(idx); |
| 167 | u64 nop = nop_for_index(idx); | 483 | u64 nop = nop_for_index(idx); |
| 168 | u64 val = pcr_ops->read(); | 484 | u64 val; |
| 169 | 485 | ||
| 170 | pcr_ops->write((val & ~mask) | nop); | 486 | val = cpuc->pcr; |
| 487 | val &= ~mask; | ||
| 488 | val |= nop; | ||
| 489 | cpuc->pcr = val; | ||
| 490 | |||
| 491 | pcr_ops->write(cpuc->pcr); | ||
| 171 | } | 492 | } |
| 172 | 493 | ||
| 173 | void hw_perf_enable(void) | 494 | void hw_perf_enable(void) |
| @@ -182,7 +503,7 @@ void hw_perf_enable(void) | |||
| 182 | cpuc->enabled = 1; | 503 | cpuc->enabled = 1; |
| 183 | barrier(); | 504 | barrier(); |
| 184 | 505 | ||
| 185 | val = pcr_ops->read(); | 506 | val = cpuc->pcr; |
| 186 | 507 | ||
| 187 | for (i = 0; i < MAX_HWEVENTS; i++) { | 508 | for (i = 0; i < MAX_HWEVENTS; i++) { |
| 188 | struct perf_event *cp = cpuc->events[i]; | 509 | struct perf_event *cp = cpuc->events[i]; |
| @@ -194,7 +515,9 @@ void hw_perf_enable(void) | |||
| 194 | val |= hwc->config_base; | 515 | val |= hwc->config_base; |
| 195 | } | 516 | } |
| 196 | 517 | ||
| 197 | pcr_ops->write(val); | 518 | cpuc->pcr = val; |
| 519 | |||
| 520 | pcr_ops->write(cpuc->pcr); | ||
| 198 | } | 521 | } |
| 199 | 522 | ||
| 200 | void hw_perf_disable(void) | 523 | void hw_perf_disable(void) |
| @@ -207,10 +530,12 @@ void hw_perf_disable(void) | |||
| 207 | 530 | ||
| 208 | cpuc->enabled = 0; | 531 | cpuc->enabled = 0; |
| 209 | 532 | ||
| 210 | val = pcr_ops->read(); | 533 | val = cpuc->pcr; |
| 211 | val &= ~(PCR_UTRACE | PCR_STRACE | | 534 | val &= ~(PCR_UTRACE | PCR_STRACE | |
| 212 | sparc_pmu->hv_bit | sparc_pmu->irq_bit); | 535 | sparc_pmu->hv_bit | sparc_pmu->irq_bit); |
| 213 | pcr_ops->write(val); | 536 | cpuc->pcr = val; |
| 537 | |||
| 538 | pcr_ops->write(cpuc->pcr); | ||
| 214 | } | 539 | } |
| 215 | 540 | ||
| 216 | static u32 read_pmc(int idx) | 541 | static u32 read_pmc(int idx) |
| @@ -242,7 +567,7 @@ static void write_pmc(int idx, u64 val) | |||
| 242 | } | 567 | } |
| 243 | 568 | ||
| 244 | static int sparc_perf_event_set_period(struct perf_event *event, | 569 | static int sparc_perf_event_set_period(struct perf_event *event, |
| 245 | struct hw_perf_event *hwc, int idx) | 570 | struct hw_perf_event *hwc, int idx) |
| 246 | { | 571 | { |
| 247 | s64 left = atomic64_read(&hwc->period_left); | 572 | s64 left = atomic64_read(&hwc->period_left); |
| 248 | s64 period = hwc->sample_period; | 573 | s64 period = hwc->sample_period; |
| @@ -282,19 +607,19 @@ static int sparc_pmu_enable(struct perf_event *event) | |||
| 282 | if (test_and_set_bit(idx, cpuc->used_mask)) | 607 | if (test_and_set_bit(idx, cpuc->used_mask)) |
| 283 | return -EAGAIN; | 608 | return -EAGAIN; |
| 284 | 609 | ||
| 285 | sparc_pmu_disable_event(hwc, idx); | 610 | sparc_pmu_disable_event(cpuc, hwc, idx); |
| 286 | 611 | ||
| 287 | cpuc->events[idx] = event; | 612 | cpuc->events[idx] = event; |
| 288 | set_bit(idx, cpuc->active_mask); | 613 | set_bit(idx, cpuc->active_mask); |
| 289 | 614 | ||
| 290 | sparc_perf_event_set_period(event, hwc, idx); | 615 | sparc_perf_event_set_period(event, hwc, idx); |
| 291 | sparc_pmu_enable_event(hwc, idx); | 616 | sparc_pmu_enable_event(cpuc, hwc, idx); |
| 292 | perf_event_update_userpage(event); | 617 | perf_event_update_userpage(event); |
| 293 | return 0; | 618 | return 0; |
| 294 | } | 619 | } |
| 295 | 620 | ||
| 296 | static u64 sparc_perf_event_update(struct perf_event *event, | 621 | static u64 sparc_perf_event_update(struct perf_event *event, |
| 297 | struct hw_perf_event *hwc, int idx) | 622 | struct hw_perf_event *hwc, int idx) |
| 298 | { | 623 | { |
| 299 | int shift = 64 - 32; | 624 | int shift = 64 - 32; |
| 300 | u64 prev_raw_count, new_raw_count; | 625 | u64 prev_raw_count, new_raw_count; |
| @@ -324,7 +649,7 @@ static void sparc_pmu_disable(struct perf_event *event) | |||
| 324 | int idx = hwc->idx; | 649 | int idx = hwc->idx; |
| 325 | 650 | ||
| 326 | clear_bit(idx, cpuc->active_mask); | 651 | clear_bit(idx, cpuc->active_mask); |
| 327 | sparc_pmu_disable_event(hwc, idx); | 652 | sparc_pmu_disable_event(cpuc, hwc, idx); |
| 328 | 653 | ||
| 329 | barrier(); | 654 | barrier(); |
| 330 | 655 | ||
| @@ -338,18 +663,29 @@ static void sparc_pmu_disable(struct perf_event *event) | |||
| 338 | static void sparc_pmu_read(struct perf_event *event) | 663 | static void sparc_pmu_read(struct perf_event *event) |
| 339 | { | 664 | { |
| 340 | struct hw_perf_event *hwc = &event->hw; | 665 | struct hw_perf_event *hwc = &event->hw; |
| 666 | |||
| 341 | sparc_perf_event_update(event, hwc, hwc->idx); | 667 | sparc_perf_event_update(event, hwc, hwc->idx); |
| 342 | } | 668 | } |
| 343 | 669 | ||
| 344 | static void sparc_pmu_unthrottle(struct perf_event *event) | 670 | static void sparc_pmu_unthrottle(struct perf_event *event) |
| 345 | { | 671 | { |
| 672 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | ||
| 346 | struct hw_perf_event *hwc = &event->hw; | 673 | struct hw_perf_event *hwc = &event->hw; |
| 347 | sparc_pmu_enable_event(hwc, hwc->idx); | 674 | |
| 675 | sparc_pmu_enable_event(cpuc, hwc, hwc->idx); | ||
| 348 | } | 676 | } |
| 349 | 677 | ||
| 350 | static atomic_t active_events = ATOMIC_INIT(0); | 678 | static atomic_t active_events = ATOMIC_INIT(0); |
| 351 | static DEFINE_MUTEX(pmc_grab_mutex); | 679 | static DEFINE_MUTEX(pmc_grab_mutex); |
| 352 | 680 | ||
| 681 | static void perf_stop_nmi_watchdog(void *unused) | ||
| 682 | { | ||
| 683 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | ||
| 684 | |||
| 685 | stop_nmi_watchdog(NULL); | ||
| 686 | cpuc->pcr = pcr_ops->read(); | ||
| 687 | } | ||
| 688 | |||
| 353 | void perf_event_grab_pmc(void) | 689 | void perf_event_grab_pmc(void) |
| 354 | { | 690 | { |
| 355 | if (atomic_inc_not_zero(&active_events)) | 691 | if (atomic_inc_not_zero(&active_events)) |
| @@ -358,7 +694,7 @@ void perf_event_grab_pmc(void) | |||
| 358 | mutex_lock(&pmc_grab_mutex); | 694 | mutex_lock(&pmc_grab_mutex); |
| 359 | if (atomic_read(&active_events) == 0) { | 695 | if (atomic_read(&active_events) == 0) { |
| 360 | if (atomic_read(&nmi_active) > 0) { | 696 | if (atomic_read(&nmi_active) > 0) { |
| 361 | on_each_cpu(stop_nmi_watchdog, NULL, 1); | 697 | on_each_cpu(perf_stop_nmi_watchdog, NULL, 1); |
| 362 | BUG_ON(atomic_read(&nmi_active) != 0); | 698 | BUG_ON(atomic_read(&nmi_active) != 0); |
| 363 | } | 699 | } |
| 364 | atomic_inc(&active_events); | 700 | atomic_inc(&active_events); |
| @@ -375,30 +711,160 @@ void perf_event_release_pmc(void) | |||
| 375 | } | 711 | } |
| 376 | } | 712 | } |
| 377 | 713 | ||
| 714 | static const struct perf_event_map *sparc_map_cache_event(u64 config) | ||
| 715 | { | ||
| 716 | unsigned int cache_type, cache_op, cache_result; | ||
| 717 | const struct perf_event_map *pmap; | ||
| 718 | |||
| 719 | if (!sparc_pmu->cache_map) | ||
| 720 | return ERR_PTR(-ENOENT); | ||
| 721 | |||
| 722 | cache_type = (config >> 0) & 0xff; | ||
| 723 | if (cache_type >= PERF_COUNT_HW_CACHE_MAX) | ||
| 724 | return ERR_PTR(-EINVAL); | ||
| 725 | |||
| 726 | cache_op = (config >> 8) & 0xff; | ||
| 727 | if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX) | ||
| 728 | return ERR_PTR(-EINVAL); | ||
| 729 | |||
| 730 | cache_result = (config >> 16) & 0xff; | ||
| 731 | if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX) | ||
| 732 | return ERR_PTR(-EINVAL); | ||
| 733 | |||
| 734 | pmap = &((*sparc_pmu->cache_map)[cache_type][cache_op][cache_result]); | ||
| 735 | |||
| 736 | if (pmap->encoding == CACHE_OP_UNSUPPORTED) | ||
| 737 | return ERR_PTR(-ENOENT); | ||
| 738 | |||
| 739 | if (pmap->encoding == CACHE_OP_NONSENSE) | ||
| 740 | return ERR_PTR(-EINVAL); | ||
| 741 | |||
| 742 | return pmap; | ||
| 743 | } | ||
| 744 | |||
| 378 | static void hw_perf_event_destroy(struct perf_event *event) | 745 | static void hw_perf_event_destroy(struct perf_event *event) |
| 379 | { | 746 | { |
| 380 | perf_event_release_pmc(); | 747 | perf_event_release_pmc(); |
| 381 | } | 748 | } |
| 382 | 749 | ||
| 750 | /* Make sure all events can be scheduled into the hardware at | ||
| 751 | * the same time. This is simplified by the fact that we only | ||
| 752 | * need to support 2 simultaneous HW events. | ||
| 753 | */ | ||
| 754 | static int sparc_check_constraints(unsigned long *events, int n_ev) | ||
| 755 | { | ||
| 756 | if (n_ev <= perf_max_events) { | ||
| 757 | u8 msk1, msk2; | ||
| 758 | u16 dummy; | ||
| 759 | |||
| 760 | if (n_ev == 1) | ||
| 761 | return 0; | ||
| 762 | BUG_ON(n_ev != 2); | ||
| 763 | perf_event_decode(events[0], &dummy, &msk1); | ||
| 764 | perf_event_decode(events[1], &dummy, &msk2); | ||
| 765 | |||
| 766 | /* If both events can go on any counter, OK. */ | ||
| 767 | if (msk1 == (PIC_UPPER | PIC_LOWER) && | ||
| 768 | msk2 == (PIC_UPPER | PIC_LOWER)) | ||
| 769 | return 0; | ||
| 770 | |||
| 771 | /* If one event is limited to a specific counter, | ||
| 772 | * and the other can go on both, OK. | ||
| 773 | */ | ||
| 774 | if ((msk1 == PIC_UPPER || msk1 == PIC_LOWER) && | ||
| 775 | msk2 == (PIC_UPPER | PIC_LOWER)) | ||
| 776 | return 0; | ||
| 777 | if ((msk2 == PIC_UPPER || msk2 == PIC_LOWER) && | ||
| 778 | msk1 == (PIC_UPPER | PIC_LOWER)) | ||
| 779 | return 0; | ||
| 780 | |||
| 781 | /* If the events are fixed to different counters, OK. */ | ||
| 782 | if ((msk1 == PIC_UPPER && msk2 == PIC_LOWER) || | ||
| 783 | (msk1 == PIC_LOWER && msk2 == PIC_UPPER)) | ||
| 784 | return 0; | ||
| 785 | |||
| 786 | /* Otherwise, there is a conflict. */ | ||
| 787 | } | ||
| 788 | |||
| 789 | return -1; | ||
| 790 | } | ||
| 791 | |||
| 792 | static int check_excludes(struct perf_event **evts, int n_prev, int n_new) | ||
| 793 | { | ||
| 794 | int eu = 0, ek = 0, eh = 0; | ||
| 795 | struct perf_event *event; | ||
| 796 | int i, n, first; | ||
| 797 | |||
| 798 | n = n_prev + n_new; | ||
| 799 | if (n <= 1) | ||
| 800 | return 0; | ||
| 801 | |||
| 802 | first = 1; | ||
| 803 | for (i = 0; i < n; i++) { | ||
| 804 | event = evts[i]; | ||
| 805 | if (first) { | ||
| 806 | eu = event->attr.exclude_user; | ||
| 807 | ek = event->attr.exclude_kernel; | ||
| 808 | eh = event->attr.exclude_hv; | ||
| 809 | first = 0; | ||
| 810 | } else if (event->attr.exclude_user != eu || | ||
| 811 | event->attr.exclude_kernel != ek || | ||
| 812 | event->attr.exclude_hv != eh) { | ||
| 813 | return -EAGAIN; | ||
| 814 | } | ||
| 815 | } | ||
| 816 | |||
| 817 | return 0; | ||
| 818 | } | ||
| 819 | |||
| 820 | static int collect_events(struct perf_event *group, int max_count, | ||
| 821 | struct perf_event *evts[], unsigned long *events) | ||
| 822 | { | ||
| 823 | struct perf_event *event; | ||
| 824 | int n = 0; | ||
| 825 | |||
| 826 | if (!is_software_event(group)) { | ||
| 827 | if (n >= max_count) | ||
| 828 | return -1; | ||
| 829 | evts[n] = group; | ||
| 830 | events[n++] = group->hw.event_base; | ||
| 831 | } | ||
| 832 | list_for_each_entry(event, &group->sibling_list, group_entry) { | ||
| 833 | if (!is_software_event(event) && | ||
| 834 | event->state != PERF_EVENT_STATE_OFF) { | ||
| 835 | if (n >= max_count) | ||
| 836 | return -1; | ||
| 837 | evts[n] = event; | ||
| 838 | events[n++] = event->hw.event_base; | ||
| 839 | } | ||
| 840 | } | ||
| 841 | return n; | ||
| 842 | } | ||
| 843 | |||
| 383 | static int __hw_perf_event_init(struct perf_event *event) | 844 | static int __hw_perf_event_init(struct perf_event *event) |
| 384 | { | 845 | { |
| 385 | struct perf_event_attr *attr = &event->attr; | 846 | struct perf_event_attr *attr = &event->attr; |
| 847 | struct perf_event *evts[MAX_HWEVENTS]; | ||
| 386 | struct hw_perf_event *hwc = &event->hw; | 848 | struct hw_perf_event *hwc = &event->hw; |
| 849 | unsigned long events[MAX_HWEVENTS]; | ||
| 387 | const struct perf_event_map *pmap; | 850 | const struct perf_event_map *pmap; |
| 388 | u64 enc; | 851 | u64 enc; |
| 852 | int n; | ||
| 389 | 853 | ||
| 390 | if (atomic_read(&nmi_active) < 0) | 854 | if (atomic_read(&nmi_active) < 0) |
| 391 | return -ENODEV; | 855 | return -ENODEV; |
| 392 | 856 | ||
| 393 | if (attr->type != PERF_TYPE_HARDWARE) | 857 | if (attr->type == PERF_TYPE_HARDWARE) { |
| 858 | if (attr->config >= sparc_pmu->max_events) | ||
| 859 | return -EINVAL; | ||
| 860 | pmap = sparc_pmu->event_map(attr->config); | ||
| 861 | } else if (attr->type == PERF_TYPE_HW_CACHE) { | ||
| 862 | pmap = sparc_map_cache_event(attr->config); | ||
| 863 | if (IS_ERR(pmap)) | ||
| 864 | return PTR_ERR(pmap); | ||
| 865 | } else | ||
| 394 | return -EOPNOTSUPP; | 866 | return -EOPNOTSUPP; |
| 395 | 867 | ||
| 396 | if (attr->config >= sparc_pmu->max_events) | ||
| 397 | return -EINVAL; | ||
| 398 | |||
| 399 | perf_event_grab_pmc(); | ||
| 400 | event->destroy = hw_perf_event_destroy; | ||
| 401 | |||
| 402 | /* We save the enable bits in the config_base. So to | 868 | /* We save the enable bits in the config_base. So to |
| 403 | * turn off sampling just write 'config', and to enable | 869 | * turn off sampling just write 'config', and to enable |
| 404 | * things write 'config | config_base'. | 870 | * things write 'config | config_base'. |
| @@ -411,15 +877,39 @@ static int __hw_perf_event_init(struct perf_event *event) | |||
| 411 | if (!attr->exclude_hv) | 877 | if (!attr->exclude_hv) |
| 412 | hwc->config_base |= sparc_pmu->hv_bit; | 878 | hwc->config_base |= sparc_pmu->hv_bit; |
| 413 | 879 | ||
| 880 | hwc->event_base = perf_event_encode(pmap); | ||
| 881 | |||
| 882 | enc = pmap->encoding; | ||
| 883 | |||
| 884 | n = 0; | ||
| 885 | if (event->group_leader != event) { | ||
| 886 | n = collect_events(event->group_leader, | ||
| 887 | perf_max_events - 1, | ||
| 888 | evts, events); | ||
| 889 | if (n < 0) | ||
| 890 | return -EINVAL; | ||
| 891 | } | ||
| 892 | events[n] = hwc->event_base; | ||
| 893 | evts[n] = event; | ||
| 894 | |||
| 895 | if (check_excludes(evts, n, 1)) | ||
| 896 | return -EINVAL; | ||
| 897 | |||
| 898 | if (sparc_check_constraints(events, n + 1)) | ||
| 899 | return -EINVAL; | ||
| 900 | |||
| 901 | /* Try to do all error checking before this point, as unwinding | ||
| 902 | * state after grabbing the PMC is difficult. | ||
| 903 | */ | ||
| 904 | perf_event_grab_pmc(); | ||
| 905 | event->destroy = hw_perf_event_destroy; | ||
| 906 | |||
| 414 | if (!hwc->sample_period) { | 907 | if (!hwc->sample_period) { |
| 415 | hwc->sample_period = MAX_PERIOD; | 908 | hwc->sample_period = MAX_PERIOD; |
| 416 | hwc->last_period = hwc->sample_period; | 909 | hwc->last_period = hwc->sample_period; |
| 417 | atomic64_set(&hwc->period_left, hwc->sample_period); | 910 | atomic64_set(&hwc->period_left, hwc->sample_period); |
| 418 | } | 911 | } |
| 419 | 912 | ||
| 420 | pmap = sparc_pmu->event_map(attr->config); | ||
| 421 | |||
| 422 | enc = pmap->encoding; | ||
| 423 | if (pmap->pic_mask & PIC_UPPER) { | 913 | if (pmap->pic_mask & PIC_UPPER) { |
| 424 | hwc->idx = PIC_UPPER_INDEX; | 914 | hwc->idx = PIC_UPPER_INDEX; |
| 425 | enc <<= sparc_pmu->upper_shift; | 915 | enc <<= sparc_pmu->upper_shift; |
| @@ -472,7 +962,7 @@ void perf_event_print_debug(void) | |||
| 472 | } | 962 | } |
| 473 | 963 | ||
| 474 | static int __kprobes perf_event_nmi_handler(struct notifier_block *self, | 964 | static int __kprobes perf_event_nmi_handler(struct notifier_block *self, |
| 475 | unsigned long cmd, void *__args) | 965 | unsigned long cmd, void *__args) |
| 476 | { | 966 | { |
| 477 | struct die_args *args = __args; | 967 | struct die_args *args = __args; |
| 478 | struct perf_sample_data data; | 968 | struct perf_sample_data data; |
| @@ -513,7 +1003,7 @@ static int __kprobes perf_event_nmi_handler(struct notifier_block *self, | |||
| 513 | continue; | 1003 | continue; |
| 514 | 1004 | ||
| 515 | if (perf_event_overflow(event, 1, &data, regs)) | 1005 | if (perf_event_overflow(event, 1, &data, regs)) |
| 516 | sparc_pmu_disable_event(hwc, idx); | 1006 | sparc_pmu_disable_event(cpuc, hwc, idx); |
| 517 | } | 1007 | } |
| 518 | 1008 | ||
| 519 | return NOTIFY_STOP; | 1009 | return NOTIFY_STOP; |
| @@ -525,8 +1015,15 @@ static __read_mostly struct notifier_block perf_event_nmi_notifier = { | |||
| 525 | 1015 | ||
| 526 | static bool __init supported_pmu(void) | 1016 | static bool __init supported_pmu(void) |
| 527 | { | 1017 | { |
| 528 | if (!strcmp(sparc_pmu_type, "ultra3i")) { | 1018 | if (!strcmp(sparc_pmu_type, "ultra3") || |
| 529 | sparc_pmu = &ultra3i_pmu; | 1019 | !strcmp(sparc_pmu_type, "ultra3+") || |
| 1020 | !strcmp(sparc_pmu_type, "ultra3i") || | ||
| 1021 | !strcmp(sparc_pmu_type, "ultra4+")) { | ||
| 1022 | sparc_pmu = &ultra3_pmu; | ||
| 1023 | return true; | ||
| 1024 | } | ||
| 1025 | if (!strcmp(sparc_pmu_type, "niagara")) { | ||
| 1026 | sparc_pmu = &niagara1_pmu; | ||
| 530 | return true; | 1027 | return true; |
| 531 | } | 1028 | } |
| 532 | if (!strcmp(sparc_pmu_type, "niagara2")) { | 1029 | if (!strcmp(sparc_pmu_type, "niagara2")) { |
diff --git a/arch/sparc/oprofile/init.c b/arch/sparc/oprofile/init.c index f97cb8b6ee5f..f9024bccff16 100644 --- a/arch/sparc/oprofile/init.c +++ b/arch/sparc/oprofile/init.c | |||
| @@ -11,6 +11,7 @@ | |||
| 11 | #include <linux/oprofile.h> | 11 | #include <linux/oprofile.h> |
| 12 | #include <linux/errno.h> | 12 | #include <linux/errno.h> |
| 13 | #include <linux/init.h> | 13 | #include <linux/init.h> |
| 14 | #include <linux/param.h> /* for HZ */ | ||
| 14 | 15 | ||
| 15 | #ifdef CONFIG_SPARC64 | 16 | #ifdef CONFIG_SPARC64 |
| 16 | #include <linux/notifier.h> | 17 | #include <linux/notifier.h> |
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 8da93745c087..c876bace8fdc 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig | |||
| @@ -86,10 +86,6 @@ config STACKTRACE_SUPPORT | |||
| 86 | config HAVE_LATENCYTOP_SUPPORT | 86 | config HAVE_LATENCYTOP_SUPPORT |
| 87 | def_bool y | 87 | def_bool y |
| 88 | 88 | ||
| 89 | config FAST_CMPXCHG_LOCAL | ||
| 90 | bool | ||
| 91 | default y | ||
| 92 | |||
| 93 | config MMU | 89 | config MMU |
| 94 | def_bool y | 90 | def_bool y |
| 95 | 91 | ||
diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu index 527519b8a9f9..f2824fb8c79c 100644 --- a/arch/x86/Kconfig.cpu +++ b/arch/x86/Kconfig.cpu | |||
| @@ -400,7 +400,7 @@ config X86_TSC | |||
| 400 | 400 | ||
| 401 | config X86_CMPXCHG64 | 401 | config X86_CMPXCHG64 |
| 402 | def_bool y | 402 | def_bool y |
| 403 | depends on X86_PAE || X86_64 | 403 | depends on X86_PAE || X86_64 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MATOM |
| 404 | 404 | ||
| 405 | # this should be set for all -march=.. options where the compiler | 405 | # this should be set for all -march=.. options where the compiler |
| 406 | # generates cmov. | 406 | # generates cmov. |
| @@ -412,6 +412,7 @@ config X86_MINIMUM_CPU_FAMILY | |||
| 412 | int | 412 | int |
| 413 | default "64" if X86_64 | 413 | default "64" if X86_64 |
| 414 | default "6" if X86_32 && X86_P6_NOP | 414 | default "6" if X86_32 && X86_P6_NOP |
| 415 | default "5" if X86_32 && X86_CMPXCHG64 | ||
| 415 | default "4" if X86_32 && (X86_XADD || X86_CMPXCHG || X86_BSWAP || X86_WP_WORKS_OK) | 416 | default "4" if X86_32 && (X86_XADD || X86_CMPXCHG || X86_BSWAP || X86_WP_WORKS_OK) |
| 416 | default "3" | 417 | default "3" |
| 417 | 418 | ||
diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S index 74619c4f9fda..1733f9f65e82 100644 --- a/arch/x86/ia32/ia32entry.S +++ b/arch/x86/ia32/ia32entry.S | |||
| @@ -21,8 +21,8 @@ | |||
| 21 | #define __AUDIT_ARCH_LE 0x40000000 | 21 | #define __AUDIT_ARCH_LE 0x40000000 |
| 22 | 22 | ||
| 23 | #ifndef CONFIG_AUDITSYSCALL | 23 | #ifndef CONFIG_AUDITSYSCALL |
| 24 | #define sysexit_audit int_ret_from_sys_call | 24 | #define sysexit_audit ia32_ret_from_sys_call |
| 25 | #define sysretl_audit int_ret_from_sys_call | 25 | #define sysretl_audit ia32_ret_from_sys_call |
| 26 | #endif | 26 | #endif |
| 27 | 27 | ||
| 28 | #define IA32_NR_syscalls ((ia32_syscall_end - ia32_sys_call_table)/8) | 28 | #define IA32_NR_syscalls ((ia32_syscall_end - ia32_sys_call_table)/8) |
| @@ -39,12 +39,12 @@ | |||
| 39 | .endm | 39 | .endm |
| 40 | 40 | ||
| 41 | /* clobbers %eax */ | 41 | /* clobbers %eax */ |
| 42 | .macro CLEAR_RREGS _r9=rax | 42 | .macro CLEAR_RREGS offset=0, _r9=rax |
| 43 | xorl %eax,%eax | 43 | xorl %eax,%eax |
| 44 | movq %rax,R11(%rsp) | 44 | movq %rax,\offset+R11(%rsp) |
| 45 | movq %rax,R10(%rsp) | 45 | movq %rax,\offset+R10(%rsp) |
| 46 | movq %\_r9,R9(%rsp) | 46 | movq %\_r9,\offset+R9(%rsp) |
| 47 | movq %rax,R8(%rsp) | 47 | movq %rax,\offset+R8(%rsp) |
| 48 | .endm | 48 | .endm |
| 49 | 49 | ||
| 50 | /* | 50 | /* |
| @@ -172,6 +172,10 @@ sysexit_from_sys_call: | |||
| 172 | movl RIP-R11(%rsp),%edx /* User %eip */ | 172 | movl RIP-R11(%rsp),%edx /* User %eip */ |
| 173 | CFI_REGISTER rip,rdx | 173 | CFI_REGISTER rip,rdx |
| 174 | RESTORE_ARGS 1,24,1,1,1,1 | 174 | RESTORE_ARGS 1,24,1,1,1,1 |
| 175 | xorq %r8,%r8 | ||
| 176 | xorq %r9,%r9 | ||
| 177 | xorq %r10,%r10 | ||
| 178 | xorq %r11,%r11 | ||
| 175 | popfq | 179 | popfq |
| 176 | CFI_ADJUST_CFA_OFFSET -8 | 180 | CFI_ADJUST_CFA_OFFSET -8 |
| 177 | /*CFI_RESTORE rflags*/ | 181 | /*CFI_RESTORE rflags*/ |
| @@ -202,7 +206,7 @@ sysexit_from_sys_call: | |||
| 202 | 206 | ||
| 203 | .macro auditsys_exit exit,ebpsave=RBP | 207 | .macro auditsys_exit exit,ebpsave=RBP |
| 204 | testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags(%r10) | 208 | testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags(%r10) |
| 205 | jnz int_ret_from_sys_call | 209 | jnz ia32_ret_from_sys_call |
| 206 | TRACE_IRQS_ON | 210 | TRACE_IRQS_ON |
| 207 | sti | 211 | sti |
| 208 | movl %eax,%esi /* second arg, syscall return value */ | 212 | movl %eax,%esi /* second arg, syscall return value */ |
| @@ -218,8 +222,9 @@ sysexit_from_sys_call: | |||
| 218 | cli | 222 | cli |
| 219 | TRACE_IRQS_OFF | 223 | TRACE_IRQS_OFF |
| 220 | testl %edi,TI_flags(%r10) | 224 | testl %edi,TI_flags(%r10) |
| 221 | jnz int_with_check | 225 | jz \exit |
| 222 | jmp \exit | 226 | CLEAR_RREGS -ARGOFFSET |
| 227 | jmp int_with_check | ||
| 223 | .endm | 228 | .endm |
| 224 | 229 | ||
| 225 | sysenter_auditsys: | 230 | sysenter_auditsys: |
| @@ -329,6 +334,9 @@ sysretl_from_sys_call: | |||
| 329 | CFI_REGISTER rip,rcx | 334 | CFI_REGISTER rip,rcx |
| 330 | movl EFLAGS-ARGOFFSET(%rsp),%r11d | 335 | movl EFLAGS-ARGOFFSET(%rsp),%r11d |
| 331 | /*CFI_REGISTER rflags,r11*/ | 336 | /*CFI_REGISTER rflags,r11*/ |
| 337 | xorq %r10,%r10 | ||
| 338 | xorq %r9,%r9 | ||
| 339 | xorq %r8,%r8 | ||
| 332 | TRACE_IRQS_ON | 340 | TRACE_IRQS_ON |
| 333 | movl RSP-ARGOFFSET(%rsp),%esp | 341 | movl RSP-ARGOFFSET(%rsp),%esp |
| 334 | CFI_RESTORE rsp | 342 | CFI_RESTORE rsp |
| @@ -353,7 +361,7 @@ cstar_tracesys: | |||
| 353 | #endif | 361 | #endif |
| 354 | xchgl %r9d,%ebp | 362 | xchgl %r9d,%ebp |
| 355 | SAVE_REST | 363 | SAVE_REST |
| 356 | CLEAR_RREGS r9 | 364 | CLEAR_RREGS 0, r9 |
| 357 | movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */ | 365 | movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */ |
| 358 | movq %rsp,%rdi /* &pt_regs -> arg1 */ | 366 | movq %rsp,%rdi /* &pt_regs -> arg1 */ |
| 359 | call syscall_trace_enter | 367 | call syscall_trace_enter |
| @@ -425,6 +433,8 @@ ia32_do_call: | |||
| 425 | call *ia32_sys_call_table(,%rax,8) # xxx: rip relative | 433 | call *ia32_sys_call_table(,%rax,8) # xxx: rip relative |
| 426 | ia32_sysret: | 434 | ia32_sysret: |
| 427 | movq %rax,RAX-ARGOFFSET(%rsp) | 435 | movq %rax,RAX-ARGOFFSET(%rsp) |
| 436 | ia32_ret_from_sys_call: | ||
| 437 | CLEAR_RREGS -ARGOFFSET | ||
| 428 | jmp int_ret_from_sys_call | 438 | jmp int_ret_from_sys_call |
| 429 | 439 | ||
| 430 | ia32_tracesys: | 440 | ia32_tracesys: |
| @@ -442,8 +452,8 @@ END(ia32_syscall) | |||
| 442 | 452 | ||
| 443 | ia32_badsys: | 453 | ia32_badsys: |
| 444 | movq $0,ORIG_RAX-ARGOFFSET(%rsp) | 454 | movq $0,ORIG_RAX-ARGOFFSET(%rsp) |
| 445 | movq $-ENOSYS,RAX-ARGOFFSET(%rsp) | 455 | movq $-ENOSYS,%rax |
| 446 | jmp int_ret_from_sys_call | 456 | jmp ia32_sysret |
| 447 | 457 | ||
| 448 | quiet_ni_syscall: | 458 | quiet_ni_syscall: |
| 449 | movq $-ENOSYS,%rax | 459 | movq $-ENOSYS,%rax |
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 3be000435fad..d83892226f73 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h | |||
| @@ -796,6 +796,7 @@ asmlinkage void kvm_handle_fault_on_reboot(void); | |||
| 796 | #define KVM_ARCH_WANT_MMU_NOTIFIER | 796 | #define KVM_ARCH_WANT_MMU_NOTIFIER |
| 797 | int kvm_unmap_hva(struct kvm *kvm, unsigned long hva); | 797 | int kvm_unmap_hva(struct kvm *kvm, unsigned long hva); |
| 798 | int kvm_age_hva(struct kvm *kvm, unsigned long hva); | 798 | int kvm_age_hva(struct kvm *kvm, unsigned long hva); |
| 799 | void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte); | ||
| 799 | int cpuid_maxphyaddr(struct kvm_vcpu *vcpu); | 800 | int cpuid_maxphyaddr(struct kvm_vcpu *vcpu); |
| 800 | int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu); | 801 | int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu); |
| 801 | int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu); | 802 | int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu); |
diff --git a/arch/x86/kernel/early_printk.c b/arch/x86/kernel/early_printk.c index 41fd965c80c6..b9c830c12b4a 100644 --- a/arch/x86/kernel/early_printk.c +++ b/arch/x86/kernel/early_printk.c | |||
| @@ -206,8 +206,11 @@ static int __init setup_early_printk(char *buf) | |||
| 206 | 206 | ||
| 207 | while (*buf != '\0') { | 207 | while (*buf != '\0') { |
| 208 | if (!strncmp(buf, "serial", 6)) { | 208 | if (!strncmp(buf, "serial", 6)) { |
| 209 | early_serial_init(buf + 6); | 209 | buf += 6; |
| 210 | early_serial_init(buf); | ||
| 210 | early_console_register(&early_serial_console, keep); | 211 | early_console_register(&early_serial_console, keep); |
| 212 | if (!strncmp(buf, ",ttyS", 5)) | ||
| 213 | buf += 5; | ||
| 211 | } | 214 | } |
| 212 | if (!strncmp(buf, "ttyS", 4)) { | 215 | if (!strncmp(buf, "ttyS", 4)) { |
| 213 | early_serial_init(buf + 4); | 216 | early_serial_init(buf + 4); |
diff --git a/arch/x86/kernel/i386_ksyms_32.c b/arch/x86/kernel/i386_ksyms_32.c index 1736c5a725aa..9c3bd4a2050e 100644 --- a/arch/x86/kernel/i386_ksyms_32.c +++ b/arch/x86/kernel/i386_ksyms_32.c | |||
| @@ -15,8 +15,10 @@ EXPORT_SYMBOL(mcount); | |||
| 15 | * the export, but dont use it from C code, it is used | 15 | * the export, but dont use it from C code, it is used |
| 16 | * by assembly code and is not using C calling convention! | 16 | * by assembly code and is not using C calling convention! |
| 17 | */ | 17 | */ |
| 18 | #ifndef CONFIG_X86_CMPXCHG64 | ||
| 18 | extern void cmpxchg8b_emu(void); | 19 | extern void cmpxchg8b_emu(void); |
| 19 | EXPORT_SYMBOL(cmpxchg8b_emu); | 20 | EXPORT_SYMBOL(cmpxchg8b_emu); |
| 21 | #endif | ||
| 20 | 22 | ||
| 21 | /* Networking helper routines. */ | 23 | /* Networking helper routines. */ |
| 22 | EXPORT_SYMBOL(csum_partial_copy_generic); | 24 | EXPORT_SYMBOL(csum_partial_copy_generic); |
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index 1ae5ceba7eb2..7024224f0fc8 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c | |||
| @@ -664,7 +664,7 @@ static void start_apic_timer(struct kvm_lapic *apic) | |||
| 664 | { | 664 | { |
| 665 | ktime_t now = apic->lapic_timer.timer.base->get_time(); | 665 | ktime_t now = apic->lapic_timer.timer.base->get_time(); |
| 666 | 666 | ||
| 667 | apic->lapic_timer.period = apic_get_reg(apic, APIC_TMICT) * | 667 | apic->lapic_timer.period = (u64)apic_get_reg(apic, APIC_TMICT) * |
| 668 | APIC_BUS_CYCLE_NS * apic->divide_count; | 668 | APIC_BUS_CYCLE_NS * apic->divide_count; |
| 669 | atomic_set(&apic->lapic_timer.pending, 0); | 669 | atomic_set(&apic->lapic_timer.pending, 0); |
| 670 | 670 | ||
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index eca41ae9f453..685a4ffac8e6 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c | |||
| @@ -156,6 +156,8 @@ module_param(oos_shadow, bool, 0644); | |||
| 156 | #define CREATE_TRACE_POINTS | 156 | #define CREATE_TRACE_POINTS |
| 157 | #include "mmutrace.h" | 157 | #include "mmutrace.h" |
| 158 | 158 | ||
| 159 | #define SPTE_HOST_WRITEABLE (1ULL << PT_FIRST_AVAIL_BITS_SHIFT) | ||
| 160 | |||
| 159 | #define SHADOW_PT_INDEX(addr, level) PT64_INDEX(addr, level) | 161 | #define SHADOW_PT_INDEX(addr, level) PT64_INDEX(addr, level) |
| 160 | 162 | ||
| 161 | struct kvm_rmap_desc { | 163 | struct kvm_rmap_desc { |
| @@ -634,9 +636,7 @@ static void rmap_remove(struct kvm *kvm, u64 *spte) | |||
| 634 | if (*spte & shadow_accessed_mask) | 636 | if (*spte & shadow_accessed_mask) |
| 635 | kvm_set_pfn_accessed(pfn); | 637 | kvm_set_pfn_accessed(pfn); |
| 636 | if (is_writeble_pte(*spte)) | 638 | if (is_writeble_pte(*spte)) |
| 637 | kvm_release_pfn_dirty(pfn); | 639 | kvm_set_pfn_dirty(pfn); |
| 638 | else | ||
| 639 | kvm_release_pfn_clean(pfn); | ||
| 640 | rmapp = gfn_to_rmap(kvm, sp->gfns[spte - sp->spt], sp->role.level); | 640 | rmapp = gfn_to_rmap(kvm, sp->gfns[spte - sp->spt], sp->role.level); |
| 641 | if (!*rmapp) { | 641 | if (!*rmapp) { |
| 642 | printk(KERN_ERR "rmap_remove: %p %llx 0->BUG\n", spte, *spte); | 642 | printk(KERN_ERR "rmap_remove: %p %llx 0->BUG\n", spte, *spte); |
| @@ -748,7 +748,7 @@ static int rmap_write_protect(struct kvm *kvm, u64 gfn) | |||
| 748 | return write_protected; | 748 | return write_protected; |
| 749 | } | 749 | } |
| 750 | 750 | ||
| 751 | static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp) | 751 | static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp, u64 data) |
| 752 | { | 752 | { |
| 753 | u64 *spte; | 753 | u64 *spte; |
| 754 | int need_tlb_flush = 0; | 754 | int need_tlb_flush = 0; |
| @@ -763,8 +763,45 @@ static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp) | |||
| 763 | return need_tlb_flush; | 763 | return need_tlb_flush; |
| 764 | } | 764 | } |
| 765 | 765 | ||
| 766 | static int kvm_handle_hva(struct kvm *kvm, unsigned long hva, | 766 | static int kvm_set_pte_rmapp(struct kvm *kvm, unsigned long *rmapp, u64 data) |
| 767 | int (*handler)(struct kvm *kvm, unsigned long *rmapp)) | 767 | { |
| 768 | int need_flush = 0; | ||
| 769 | u64 *spte, new_spte; | ||
| 770 | pte_t *ptep = (pte_t *)data; | ||
| 771 | pfn_t new_pfn; | ||
| 772 | |||
| 773 | WARN_ON(pte_huge(*ptep)); | ||
| 774 | new_pfn = pte_pfn(*ptep); | ||
| 775 | spte = rmap_next(kvm, rmapp, NULL); | ||
| 776 | while (spte) { | ||
| 777 | BUG_ON(!is_shadow_present_pte(*spte)); | ||
| 778 | rmap_printk("kvm_set_pte_rmapp: spte %p %llx\n", spte, *spte); | ||
| 779 | need_flush = 1; | ||
| 780 | if (pte_write(*ptep)) { | ||
| 781 | rmap_remove(kvm, spte); | ||
| 782 | __set_spte(spte, shadow_trap_nonpresent_pte); | ||
| 783 | spte = rmap_next(kvm, rmapp, NULL); | ||
| 784 | } else { | ||
| 785 | new_spte = *spte &~ (PT64_BASE_ADDR_MASK); | ||
| 786 | new_spte |= (u64)new_pfn << PAGE_SHIFT; | ||
| 787 | |||
| 788 | new_spte &= ~PT_WRITABLE_MASK; | ||
| 789 | new_spte &= ~SPTE_HOST_WRITEABLE; | ||
| 790 | if (is_writeble_pte(*spte)) | ||
| 791 | kvm_set_pfn_dirty(spte_to_pfn(*spte)); | ||
| 792 | __set_spte(spte, new_spte); | ||
| 793 | spte = rmap_next(kvm, rmapp, spte); | ||
| 794 | } | ||
| 795 | } | ||
| 796 | if (need_flush) | ||
| 797 | kvm_flush_remote_tlbs(kvm); | ||
| 798 | |||
| 799 | return 0; | ||
| 800 | } | ||
| 801 | |||
| 802 | static int kvm_handle_hva(struct kvm *kvm, unsigned long hva, u64 data, | ||
| 803 | int (*handler)(struct kvm *kvm, unsigned long *rmapp, | ||
| 804 | u64 data)) | ||
| 768 | { | 805 | { |
| 769 | int i, j; | 806 | int i, j; |
| 770 | int retval = 0; | 807 | int retval = 0; |
| @@ -786,13 +823,15 @@ static int kvm_handle_hva(struct kvm *kvm, unsigned long hva, | |||
| 786 | if (hva >= start && hva < end) { | 823 | if (hva >= start && hva < end) { |
| 787 | gfn_t gfn_offset = (hva - start) >> PAGE_SHIFT; | 824 | gfn_t gfn_offset = (hva - start) >> PAGE_SHIFT; |
| 788 | 825 | ||
| 789 | retval |= handler(kvm, &memslot->rmap[gfn_offset]); | 826 | retval |= handler(kvm, &memslot->rmap[gfn_offset], |
| 827 | data); | ||
| 790 | 828 | ||
| 791 | for (j = 0; j < KVM_NR_PAGE_SIZES - 1; ++j) { | 829 | for (j = 0; j < KVM_NR_PAGE_SIZES - 1; ++j) { |
| 792 | int idx = gfn_offset; | 830 | int idx = gfn_offset; |
| 793 | idx /= KVM_PAGES_PER_HPAGE(PT_DIRECTORY_LEVEL + j); | 831 | idx /= KVM_PAGES_PER_HPAGE(PT_DIRECTORY_LEVEL + j); |
| 794 | retval |= handler(kvm, | 832 | retval |= handler(kvm, |
| 795 | &memslot->lpage_info[j][idx].rmap_pde); | 833 | &memslot->lpage_info[j][idx].rmap_pde, |
| 834 | data); | ||
| 796 | } | 835 | } |
| 797 | } | 836 | } |
| 798 | } | 837 | } |
| @@ -802,10 +841,15 @@ static int kvm_handle_hva(struct kvm *kvm, unsigned long hva, | |||
| 802 | 841 | ||
| 803 | int kvm_unmap_hva(struct kvm *kvm, unsigned long hva) | 842 | int kvm_unmap_hva(struct kvm *kvm, unsigned long hva) |
| 804 | { | 843 | { |
| 805 | return kvm_handle_hva(kvm, hva, kvm_unmap_rmapp); | 844 | return kvm_handle_hva(kvm, hva, 0, kvm_unmap_rmapp); |
| 806 | } | 845 | } |
| 807 | 846 | ||
| 808 | static int kvm_age_rmapp(struct kvm *kvm, unsigned long *rmapp) | 847 | void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte) |
| 848 | { | ||
| 849 | kvm_handle_hva(kvm, hva, (u64)&pte, kvm_set_pte_rmapp); | ||
| 850 | } | ||
| 851 | |||
| 852 | static int kvm_age_rmapp(struct kvm *kvm, unsigned long *rmapp, u64 data) | ||
| 809 | { | 853 | { |
| 810 | u64 *spte; | 854 | u64 *spte; |
| 811 | int young = 0; | 855 | int young = 0; |
| @@ -841,13 +885,13 @@ static void rmap_recycle(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn) | |||
| 841 | gfn = unalias_gfn(vcpu->kvm, gfn); | 885 | gfn = unalias_gfn(vcpu->kvm, gfn); |
| 842 | rmapp = gfn_to_rmap(vcpu->kvm, gfn, sp->role.level); | 886 | rmapp = gfn_to_rmap(vcpu->kvm, gfn, sp->role.level); |
| 843 | 887 | ||
| 844 | kvm_unmap_rmapp(vcpu->kvm, rmapp); | 888 | kvm_unmap_rmapp(vcpu->kvm, rmapp, 0); |
| 845 | kvm_flush_remote_tlbs(vcpu->kvm); | 889 | kvm_flush_remote_tlbs(vcpu->kvm); |
| 846 | } | 890 | } |
| 847 | 891 | ||
| 848 | int kvm_age_hva(struct kvm *kvm, unsigned long hva) | 892 | int kvm_age_hva(struct kvm *kvm, unsigned long hva) |
| 849 | { | 893 | { |
| 850 | return kvm_handle_hva(kvm, hva, kvm_age_rmapp); | 894 | return kvm_handle_hva(kvm, hva, 0, kvm_age_rmapp); |
| 851 | } | 895 | } |
| 852 | 896 | ||
| 853 | #ifdef MMU_DEBUG | 897 | #ifdef MMU_DEBUG |
| @@ -1756,7 +1800,7 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep, | |||
| 1756 | unsigned pte_access, int user_fault, | 1800 | unsigned pte_access, int user_fault, |
| 1757 | int write_fault, int dirty, int level, | 1801 | int write_fault, int dirty, int level, |
| 1758 | gfn_t gfn, pfn_t pfn, bool speculative, | 1802 | gfn_t gfn, pfn_t pfn, bool speculative, |
| 1759 | bool can_unsync) | 1803 | bool can_unsync, bool reset_host_protection) |
| 1760 | { | 1804 | { |
| 1761 | u64 spte; | 1805 | u64 spte; |
| 1762 | int ret = 0; | 1806 | int ret = 0; |
| @@ -1783,6 +1827,9 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep, | |||
| 1783 | spte |= kvm_x86_ops->get_mt_mask(vcpu, gfn, | 1827 | spte |= kvm_x86_ops->get_mt_mask(vcpu, gfn, |
| 1784 | kvm_is_mmio_pfn(pfn)); | 1828 | kvm_is_mmio_pfn(pfn)); |
| 1785 | 1829 | ||
| 1830 | if (reset_host_protection) | ||
| 1831 | spte |= SPTE_HOST_WRITEABLE; | ||
| 1832 | |||
| 1786 | spte |= (u64)pfn << PAGE_SHIFT; | 1833 | spte |= (u64)pfn << PAGE_SHIFT; |
| 1787 | 1834 | ||
| 1788 | if ((pte_access & ACC_WRITE_MASK) | 1835 | if ((pte_access & ACC_WRITE_MASK) |
| @@ -1828,7 +1875,8 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep, | |||
| 1828 | unsigned pt_access, unsigned pte_access, | 1875 | unsigned pt_access, unsigned pte_access, |
| 1829 | int user_fault, int write_fault, int dirty, | 1876 | int user_fault, int write_fault, int dirty, |
| 1830 | int *ptwrite, int level, gfn_t gfn, | 1877 | int *ptwrite, int level, gfn_t gfn, |
| 1831 | pfn_t pfn, bool speculative) | 1878 | pfn_t pfn, bool speculative, |
| 1879 | bool reset_host_protection) | ||
| 1832 | { | 1880 | { |
| 1833 | int was_rmapped = 0; | 1881 | int was_rmapped = 0; |
| 1834 | int was_writeble = is_writeble_pte(*sptep); | 1882 | int was_writeble = is_writeble_pte(*sptep); |
| @@ -1860,7 +1908,8 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep, | |||
| 1860 | } | 1908 | } |
| 1861 | 1909 | ||
| 1862 | if (set_spte(vcpu, sptep, pte_access, user_fault, write_fault, | 1910 | if (set_spte(vcpu, sptep, pte_access, user_fault, write_fault, |
| 1863 | dirty, level, gfn, pfn, speculative, true)) { | 1911 | dirty, level, gfn, pfn, speculative, true, |
| 1912 | reset_host_protection)) { | ||
| 1864 | if (write_fault) | 1913 | if (write_fault) |
| 1865 | *ptwrite = 1; | 1914 | *ptwrite = 1; |
| 1866 | kvm_x86_ops->tlb_flush(vcpu); | 1915 | kvm_x86_ops->tlb_flush(vcpu); |
| @@ -1877,8 +1926,7 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep, | |||
| 1877 | page_header_update_slot(vcpu->kvm, sptep, gfn); | 1926 | page_header_update_slot(vcpu->kvm, sptep, gfn); |
| 1878 | if (!was_rmapped) { | 1927 | if (!was_rmapped) { |
| 1879 | rmap_count = rmap_add(vcpu, sptep, gfn); | 1928 | rmap_count = rmap_add(vcpu, sptep, gfn); |
| 1880 | if (!is_rmap_spte(*sptep)) | 1929 | kvm_release_pfn_clean(pfn); |
| 1881 | kvm_release_pfn_clean(pfn); | ||
| 1882 | if (rmap_count > RMAP_RECYCLE_THRESHOLD) | 1930 | if (rmap_count > RMAP_RECYCLE_THRESHOLD) |
| 1883 | rmap_recycle(vcpu, sptep, gfn); | 1931 | rmap_recycle(vcpu, sptep, gfn); |
| 1884 | } else { | 1932 | } else { |
| @@ -1909,7 +1957,7 @@ static int __direct_map(struct kvm_vcpu *vcpu, gpa_t v, int write, | |||
| 1909 | if (iterator.level == level) { | 1957 | if (iterator.level == level) { |
| 1910 | mmu_set_spte(vcpu, iterator.sptep, ACC_ALL, ACC_ALL, | 1958 | mmu_set_spte(vcpu, iterator.sptep, ACC_ALL, ACC_ALL, |
| 1911 | 0, write, 1, &pt_write, | 1959 | 0, write, 1, &pt_write, |
| 1912 | level, gfn, pfn, false); | 1960 | level, gfn, pfn, false, true); |
| 1913 | ++vcpu->stat.pf_fixed; | 1961 | ++vcpu->stat.pf_fixed; |
| 1914 | break; | 1962 | break; |
| 1915 | } | 1963 | } |
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h index d2fec9c12d22..72558f8ff3f5 100644 --- a/arch/x86/kvm/paging_tmpl.h +++ b/arch/x86/kvm/paging_tmpl.h | |||
| @@ -273,9 +273,13 @@ static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *page, | |||
| 273 | if (mmu_notifier_retry(vcpu, vcpu->arch.update_pte.mmu_seq)) | 273 | if (mmu_notifier_retry(vcpu, vcpu->arch.update_pte.mmu_seq)) |
| 274 | return; | 274 | return; |
| 275 | kvm_get_pfn(pfn); | 275 | kvm_get_pfn(pfn); |
| 276 | /* | ||
| 277 | * we call mmu_set_spte() with reset_host_protection = true beacuse that | ||
| 278 | * vcpu->arch.update_pte.pfn was fetched from get_user_pages(write = 1). | ||
| 279 | */ | ||
| 276 | mmu_set_spte(vcpu, spte, page->role.access, pte_access, 0, 0, | 280 | mmu_set_spte(vcpu, spte, page->role.access, pte_access, 0, 0, |
| 277 | gpte & PT_DIRTY_MASK, NULL, PT_PAGE_TABLE_LEVEL, | 281 | gpte & PT_DIRTY_MASK, NULL, PT_PAGE_TABLE_LEVEL, |
| 278 | gpte_to_gfn(gpte), pfn, true); | 282 | gpte_to_gfn(gpte), pfn, true, true); |
| 279 | } | 283 | } |
| 280 | 284 | ||
| 281 | /* | 285 | /* |
| @@ -308,7 +312,7 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr, | |||
| 308 | user_fault, write_fault, | 312 | user_fault, write_fault, |
| 309 | gw->ptes[gw->level-1] & PT_DIRTY_MASK, | 313 | gw->ptes[gw->level-1] & PT_DIRTY_MASK, |
| 310 | ptwrite, level, | 314 | ptwrite, level, |
| 311 | gw->gfn, pfn, false); | 315 | gw->gfn, pfn, false, true); |
| 312 | break; | 316 | break; |
| 313 | } | 317 | } |
| 314 | 318 | ||
| @@ -558,6 +562,7 @@ static void FNAME(prefetch_page)(struct kvm_vcpu *vcpu, | |||
| 558 | static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) | 562 | static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) |
| 559 | { | 563 | { |
| 560 | int i, offset, nr_present; | 564 | int i, offset, nr_present; |
| 565 | bool reset_host_protection; | ||
| 561 | 566 | ||
| 562 | offset = nr_present = 0; | 567 | offset = nr_present = 0; |
| 563 | 568 | ||
| @@ -595,9 +600,16 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) | |||
| 595 | 600 | ||
| 596 | nr_present++; | 601 | nr_present++; |
| 597 | pte_access = sp->role.access & FNAME(gpte_access)(vcpu, gpte); | 602 | pte_access = sp->role.access & FNAME(gpte_access)(vcpu, gpte); |
| 603 | if (!(sp->spt[i] & SPTE_HOST_WRITEABLE)) { | ||
| 604 | pte_access &= ~ACC_WRITE_MASK; | ||
| 605 | reset_host_protection = 0; | ||
| 606 | } else { | ||
| 607 | reset_host_protection = 1; | ||
| 608 | } | ||
| 598 | set_spte(vcpu, &sp->spt[i], pte_access, 0, 0, | 609 | set_spte(vcpu, &sp->spt[i], pte_access, 0, 0, |
| 599 | is_dirty_gpte(gpte), PT_PAGE_TABLE_LEVEL, gfn, | 610 | is_dirty_gpte(gpte), PT_PAGE_TABLE_LEVEL, gfn, |
| 600 | spte_to_pfn(sp->spt[i]), true, false); | 611 | spte_to_pfn(sp->spt[i]), true, false, |
| 612 | reset_host_protection); | ||
| 601 | } | 613 | } |
| 602 | 614 | ||
| 603 | return !nr_present; | 615 | return !nr_present; |
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index 944cc9c04b3c..c17404add91f 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c | |||
| @@ -767,6 +767,8 @@ static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu) | |||
| 767 | rdtscll(tsc_this); | 767 | rdtscll(tsc_this); |
| 768 | delta = vcpu->arch.host_tsc - tsc_this; | 768 | delta = vcpu->arch.host_tsc - tsc_this; |
| 769 | svm->vmcb->control.tsc_offset += delta; | 769 | svm->vmcb->control.tsc_offset += delta; |
| 770 | if (is_nested(svm)) | ||
| 771 | svm->nested.hsave->control.tsc_offset += delta; | ||
| 770 | vcpu->cpu = cpu; | 772 | vcpu->cpu = cpu; |
| 771 | kvm_migrate_timers(vcpu); | 773 | kvm_migrate_timers(vcpu); |
| 772 | svm->asid_generation = 0; | 774 | svm->asid_generation = 0; |
| @@ -2057,10 +2059,14 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 *data) | |||
| 2057 | 2059 | ||
| 2058 | switch (ecx) { | 2060 | switch (ecx) { |
| 2059 | case MSR_IA32_TSC: { | 2061 | case MSR_IA32_TSC: { |
| 2060 | u64 tsc; | 2062 | u64 tsc_offset; |
| 2063 | |||
| 2064 | if (is_nested(svm)) | ||
| 2065 | tsc_offset = svm->nested.hsave->control.tsc_offset; | ||
| 2066 | else | ||
| 2067 | tsc_offset = svm->vmcb->control.tsc_offset; | ||
| 2061 | 2068 | ||
| 2062 | rdtscll(tsc); | 2069 | *data = tsc_offset + native_read_tsc(); |
| 2063 | *data = svm->vmcb->control.tsc_offset + tsc; | ||
| 2064 | break; | 2070 | break; |
| 2065 | } | 2071 | } |
| 2066 | case MSR_K6_STAR: | 2072 | case MSR_K6_STAR: |
| @@ -2146,10 +2152,17 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 data) | |||
| 2146 | 2152 | ||
| 2147 | switch (ecx) { | 2153 | switch (ecx) { |
| 2148 | case MSR_IA32_TSC: { | 2154 | case MSR_IA32_TSC: { |
| 2149 | u64 tsc; | 2155 | u64 tsc_offset = data - native_read_tsc(); |
| 2156 | u64 g_tsc_offset = 0; | ||
| 2157 | |||
| 2158 | if (is_nested(svm)) { | ||
| 2159 | g_tsc_offset = svm->vmcb->control.tsc_offset - | ||
| 2160 | svm->nested.hsave->control.tsc_offset; | ||
| 2161 | svm->nested.hsave->control.tsc_offset = tsc_offset; | ||
| 2162 | } | ||
| 2163 | |||
| 2164 | svm->vmcb->control.tsc_offset = tsc_offset + g_tsc_offset; | ||
| 2150 | 2165 | ||
| 2151 | rdtscll(tsc); | ||
| 2152 | svm->vmcb->control.tsc_offset = data - tsc; | ||
| 2153 | break; | 2166 | break; |
| 2154 | } | 2167 | } |
| 2155 | case MSR_K6_STAR: | 2168 | case MSR_K6_STAR: |
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index f3812014bd0b..ed53b42caba1 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c | |||
| @@ -709,7 +709,7 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu) | |||
| 709 | if (vcpu->cpu != cpu) { | 709 | if (vcpu->cpu != cpu) { |
| 710 | vcpu_clear(vmx); | 710 | vcpu_clear(vmx); |
| 711 | kvm_migrate_timers(vcpu); | 711 | kvm_migrate_timers(vcpu); |
| 712 | vpid_sync_vcpu_all(vmx); | 712 | set_bit(KVM_REQ_TLB_FLUSH, &vcpu->requests); |
| 713 | local_irq_disable(); | 713 | local_irq_disable(); |
| 714 | list_add(&vmx->local_vcpus_link, | 714 | list_add(&vmx->local_vcpus_link, |
| 715 | &per_cpu(vcpus_on_cpu, cpu)); | 715 | &per_cpu(vcpus_on_cpu, cpu)); |
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index be451ee44249..9b9695322f56 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c | |||
| @@ -1591,6 +1591,8 @@ static int kvm_dev_ioctl_get_supported_cpuid(struct kvm_cpuid2 *cpuid, | |||
| 1591 | 1591 | ||
| 1592 | if (cpuid->nent < 1) | 1592 | if (cpuid->nent < 1) |
| 1593 | goto out; | 1593 | goto out; |
| 1594 | if (cpuid->nent > KVM_MAX_CPUID_ENTRIES) | ||
| 1595 | cpuid->nent = KVM_MAX_CPUID_ENTRIES; | ||
| 1594 | r = -ENOMEM; | 1596 | r = -ENOMEM; |
| 1595 | cpuid_entries = vmalloc(sizeof(struct kvm_cpuid_entry2) * cpuid->nent); | 1597 | cpuid_entries = vmalloc(sizeof(struct kvm_cpuid_entry2) * cpuid->nent); |
| 1596 | if (!cpuid_entries) | 1598 | if (!cpuid_entries) |
diff --git a/arch/x86/lib/Makefile b/arch/x86/lib/Makefile index 3e549b8ec8c9..85f5db95c60f 100644 --- a/arch/x86/lib/Makefile +++ b/arch/x86/lib/Makefile | |||
| @@ -15,8 +15,10 @@ ifeq ($(CONFIG_X86_32),y) | |||
| 15 | obj-y += atomic64_32.o | 15 | obj-y += atomic64_32.o |
| 16 | lib-y += checksum_32.o | 16 | lib-y += checksum_32.o |
| 17 | lib-y += strstr_32.o | 17 | lib-y += strstr_32.o |
| 18 | lib-y += semaphore_32.o string_32.o cmpxchg8b_emu.o | 18 | lib-y += semaphore_32.o string_32.o |
| 19 | 19 | ifneq ($(CONFIG_X86_CMPXCHG64),y) | |
| 20 | lib-y += cmpxchg8b_emu.o | ||
| 21 | endif | ||
| 20 | lib-$(CONFIG_X86_USE_3DNOW) += mmx_32.o | 22 | lib-$(CONFIG_X86_USE_3DNOW) += mmx_32.o |
| 21 | else | 23 | else |
| 22 | obj-y += io_64.o iomap_copy_64.o | 24 | obj-y += io_64.o iomap_copy_64.o |
diff --git a/drivers/char/agp/parisc-agp.c b/drivers/char/agp/parisc-agp.c index 60ab75104da9..1c129211302d 100644 --- a/drivers/char/agp/parisc-agp.c +++ b/drivers/char/agp/parisc-agp.c | |||
| @@ -217,7 +217,7 @@ static const struct agp_bridge_driver parisc_agp_driver = { | |||
| 217 | .configure = parisc_agp_configure, | 217 | .configure = parisc_agp_configure, |
| 218 | .fetch_size = parisc_agp_fetch_size, | 218 | .fetch_size = parisc_agp_fetch_size, |
| 219 | .tlb_flush = parisc_agp_tlbflush, | 219 | .tlb_flush = parisc_agp_tlbflush, |
| 220 | .mask_memory = parisc_agp_page_mask_memory, | 220 | .mask_memory = parisc_agp_mask_memory, |
| 221 | .masks = parisc_agp_masks, | 221 | .masks = parisc_agp_masks, |
| 222 | .agp_enable = parisc_agp_enable, | 222 | .agp_enable = parisc_agp_enable, |
| 223 | .cache_flush = global_cache_flush, | 223 | .cache_flush = global_cache_flush, |
diff --git a/drivers/connector/cn_proc.c b/drivers/connector/cn_proc.c index abf4a2529f80..60697909ebdb 100644 --- a/drivers/connector/cn_proc.c +++ b/drivers/connector/cn_proc.c | |||
| @@ -227,7 +227,8 @@ static void cn_proc_ack(int err, int rcvd_seq, int rcvd_ack) | |||
| 227 | * cn_proc_mcast_ctl | 227 | * cn_proc_mcast_ctl |
| 228 | * @data: message sent from userspace via the connector | 228 | * @data: message sent from userspace via the connector |
| 229 | */ | 229 | */ |
| 230 | static void cn_proc_mcast_ctl(struct cn_msg *msg) | 230 | static void cn_proc_mcast_ctl(struct cn_msg *msg, |
| 231 | struct netlink_skb_parms *nsp) | ||
| 231 | { | 232 | { |
| 232 | enum proc_cn_mcast_op *mc_op = NULL; | 233 | enum proc_cn_mcast_op *mc_op = NULL; |
| 233 | int err = 0; | 234 | int err = 0; |
diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c index 4e551e63b6dc..4f4ac82382f7 100644 --- a/drivers/edac/amd64_edac.c +++ b/drivers/edac/amd64_edac.c | |||
| @@ -15,8 +15,8 @@ module_param(ecc_enable_override, int, 0644); | |||
| 15 | 15 | ||
| 16 | /* Lookup table for all possible MC control instances */ | 16 | /* Lookup table for all possible MC control instances */ |
| 17 | struct amd64_pvt; | 17 | struct amd64_pvt; |
| 18 | static struct mem_ctl_info *mci_lookup[MAX_NUMNODES]; | 18 | static struct mem_ctl_info *mci_lookup[EDAC_MAX_NUMNODES]; |
| 19 | static struct amd64_pvt *pvt_lookup[MAX_NUMNODES]; | 19 | static struct amd64_pvt *pvt_lookup[EDAC_MAX_NUMNODES]; |
| 20 | 20 | ||
| 21 | /* | 21 | /* |
| 22 | * See F2x80 for K8 and F2x[1,0]80 for Fam10 and later. The table below is only | 22 | * See F2x80 for K8 and F2x[1,0]80 for Fam10 and later. The table below is only |
| @@ -189,7 +189,10 @@ static int amd64_get_scrub_rate(struct mem_ctl_info *mci, u32 *bw) | |||
| 189 | /* Map from a CSROW entry to the mask entry that operates on it */ | 189 | /* Map from a CSROW entry to the mask entry that operates on it */ |
| 190 | static inline u32 amd64_map_to_dcs_mask(struct amd64_pvt *pvt, int csrow) | 190 | static inline u32 amd64_map_to_dcs_mask(struct amd64_pvt *pvt, int csrow) |
| 191 | { | 191 | { |
| 192 | return csrow >> (pvt->num_dcsm >> 3); | 192 | if (boot_cpu_data.x86 == 0xf && pvt->ext_model < OPTERON_CPU_REV_F) |
| 193 | return csrow; | ||
| 194 | else | ||
| 195 | return csrow >> 1; | ||
| 193 | } | 196 | } |
| 194 | 197 | ||
| 195 | /* return the 'base' address the i'th CS entry of the 'dct' DRAM controller */ | 198 | /* return the 'base' address the i'th CS entry of the 'dct' DRAM controller */ |
| @@ -279,29 +282,26 @@ static struct mem_ctl_info *find_mc_by_sys_addr(struct mem_ctl_info *mci, | |||
| 279 | intlv_en = pvt->dram_IntlvEn[0]; | 282 | intlv_en = pvt->dram_IntlvEn[0]; |
| 280 | 283 | ||
| 281 | if (intlv_en == 0) { | 284 | if (intlv_en == 0) { |
| 282 | for (node_id = 0; ; ) { | 285 | for (node_id = 0; node_id < DRAM_REG_COUNT; node_id++) { |
| 283 | if (amd64_base_limit_match(pvt, sys_addr, node_id)) | 286 | if (amd64_base_limit_match(pvt, sys_addr, node_id)) |
| 284 | break; | 287 | goto found; |
| 285 | |||
| 286 | if (++node_id >= DRAM_REG_COUNT) | ||
| 287 | goto err_no_match; | ||
| 288 | } | 288 | } |
| 289 | goto found; | 289 | goto err_no_match; |
| 290 | } | 290 | } |
| 291 | 291 | ||
| 292 | if (unlikely((intlv_en != (0x01 << 8)) && | 292 | if (unlikely((intlv_en != 0x01) && |
| 293 | (intlv_en != (0x03 << 8)) && | 293 | (intlv_en != 0x03) && |
| 294 | (intlv_en != (0x07 << 8)))) { | 294 | (intlv_en != 0x07))) { |
| 295 | amd64_printk(KERN_WARNING, "junk value of 0x%x extracted from " | 295 | amd64_printk(KERN_WARNING, "junk value of 0x%x extracted from " |
| 296 | "IntlvEn field of DRAM Base Register for node 0: " | 296 | "IntlvEn field of DRAM Base Register for node 0: " |
| 297 | "This probably indicates a BIOS bug.\n", intlv_en); | 297 | "this probably indicates a BIOS bug.\n", intlv_en); |
| 298 | return NULL; | 298 | return NULL; |
| 299 | } | 299 | } |
| 300 | 300 | ||
| 301 | bits = (((u32) sys_addr) >> 12) & intlv_en; | 301 | bits = (((u32) sys_addr) >> 12) & intlv_en; |
| 302 | 302 | ||
| 303 | for (node_id = 0; ; ) { | 303 | for (node_id = 0; ; ) { |
| 304 | if ((pvt->dram_limit[node_id] & intlv_en) == bits) | 304 | if ((pvt->dram_IntlvSel[node_id] & intlv_en) == bits) |
| 305 | break; /* intlv_sel field matches */ | 305 | break; /* intlv_sel field matches */ |
| 306 | 306 | ||
| 307 | if (++node_id >= DRAM_REG_COUNT) | 307 | if (++node_id >= DRAM_REG_COUNT) |
| @@ -311,10 +311,10 @@ static struct mem_ctl_info *find_mc_by_sys_addr(struct mem_ctl_info *mci, | |||
| 311 | /* sanity test for sys_addr */ | 311 | /* sanity test for sys_addr */ |
| 312 | if (unlikely(!amd64_base_limit_match(pvt, sys_addr, node_id))) { | 312 | if (unlikely(!amd64_base_limit_match(pvt, sys_addr, node_id))) { |
| 313 | amd64_printk(KERN_WARNING, | 313 | amd64_printk(KERN_WARNING, |
| 314 | "%s(): sys_addr 0x%lx falls outside base/limit " | 314 | "%s(): sys_addr 0x%llx falls outside base/limit " |
| 315 | "address range for node %d with node interleaving " | 315 | "address range for node %d with node interleaving " |
| 316 | "enabled.\n", __func__, (unsigned long)sys_addr, | 316 | "enabled.\n", |
| 317 | node_id); | 317 | __func__, sys_addr, node_id); |
| 318 | return NULL; | 318 | return NULL; |
| 319 | } | 319 | } |
| 320 | 320 | ||
| @@ -377,7 +377,7 @@ static int input_addr_to_csrow(struct mem_ctl_info *mci, u64 input_addr) | |||
| 377 | * base/mask register pair, test the condition shown near the start of | 377 | * base/mask register pair, test the condition shown near the start of |
| 378 | * section 3.5.4 (p. 84, BKDG #26094, K8, revA-E). | 378 | * section 3.5.4 (p. 84, BKDG #26094, K8, revA-E). |
| 379 | */ | 379 | */ |
| 380 | for (csrow = 0; csrow < CHIPSELECT_COUNT; csrow++) { | 380 | for (csrow = 0; csrow < pvt->cs_count; csrow++) { |
| 381 | 381 | ||
| 382 | /* This DRAM chip select is disabled on this node */ | 382 | /* This DRAM chip select is disabled on this node */ |
| 383 | if ((pvt->dcsb0[csrow] & K8_DCSB_CS_ENABLE) == 0) | 383 | if ((pvt->dcsb0[csrow] & K8_DCSB_CS_ENABLE) == 0) |
| @@ -734,7 +734,7 @@ static void find_csrow_limits(struct mem_ctl_info *mci, int csrow, | |||
| 734 | u64 base, mask; | 734 | u64 base, mask; |
| 735 | 735 | ||
| 736 | pvt = mci->pvt_info; | 736 | pvt = mci->pvt_info; |
| 737 | BUG_ON((csrow < 0) || (csrow >= CHIPSELECT_COUNT)); | 737 | BUG_ON((csrow < 0) || (csrow >= pvt->cs_count)); |
| 738 | 738 | ||
| 739 | base = base_from_dct_base(pvt, csrow); | 739 | base = base_from_dct_base(pvt, csrow); |
| 740 | mask = mask_from_dct_mask(pvt, csrow); | 740 | mask = mask_from_dct_mask(pvt, csrow); |
| @@ -962,35 +962,27 @@ err_reg: | |||
| 962 | */ | 962 | */ |
| 963 | static void amd64_set_dct_base_and_mask(struct amd64_pvt *pvt) | 963 | static void amd64_set_dct_base_and_mask(struct amd64_pvt *pvt) |
| 964 | { | 964 | { |
| 965 | if (pvt->ext_model >= OPTERON_CPU_REV_F) { | 965 | |
| 966 | if (boot_cpu_data.x86 == 0xf && pvt->ext_model < OPTERON_CPU_REV_F) { | ||
| 967 | pvt->dcsb_base = REV_E_DCSB_BASE_BITS; | ||
| 968 | pvt->dcsm_mask = REV_E_DCSM_MASK_BITS; | ||
| 969 | pvt->dcs_mask_notused = REV_E_DCS_NOTUSED_BITS; | ||
| 970 | pvt->dcs_shift = REV_E_DCS_SHIFT; | ||
| 971 | pvt->cs_count = 8; | ||
| 972 | pvt->num_dcsm = 8; | ||
| 973 | } else { | ||
| 966 | pvt->dcsb_base = REV_F_F1Xh_DCSB_BASE_BITS; | 974 | pvt->dcsb_base = REV_F_F1Xh_DCSB_BASE_BITS; |
| 967 | pvt->dcsm_mask = REV_F_F1Xh_DCSM_MASK_BITS; | 975 | pvt->dcsm_mask = REV_F_F1Xh_DCSM_MASK_BITS; |
| 968 | pvt->dcs_mask_notused = REV_F_F1Xh_DCS_NOTUSED_BITS; | 976 | pvt->dcs_mask_notused = REV_F_F1Xh_DCS_NOTUSED_BITS; |
| 969 | pvt->dcs_shift = REV_F_F1Xh_DCS_SHIFT; | 977 | pvt->dcs_shift = REV_F_F1Xh_DCS_SHIFT; |
| 970 | 978 | ||
| 971 | switch (boot_cpu_data.x86) { | 979 | if (boot_cpu_data.x86 == 0x11) { |
| 972 | case 0xf: | 980 | pvt->cs_count = 4; |
| 973 | pvt->num_dcsm = REV_F_DCSM_COUNT; | 981 | pvt->num_dcsm = 2; |
| 974 | break; | 982 | } else { |
| 975 | 983 | pvt->cs_count = 8; | |
| 976 | case 0x10: | 984 | pvt->num_dcsm = 4; |
| 977 | pvt->num_dcsm = F10_DCSM_COUNT; | ||
| 978 | break; | ||
| 979 | |||
| 980 | case 0x11: | ||
| 981 | pvt->num_dcsm = F11_DCSM_COUNT; | ||
| 982 | break; | ||
| 983 | |||
| 984 | default: | ||
| 985 | amd64_printk(KERN_ERR, "Unsupported family!\n"); | ||
| 986 | break; | ||
| 987 | } | 985 | } |
| 988 | } else { | ||
| 989 | pvt->dcsb_base = REV_E_DCSB_BASE_BITS; | ||
| 990 | pvt->dcsm_mask = REV_E_DCSM_MASK_BITS; | ||
| 991 | pvt->dcs_mask_notused = REV_E_DCS_NOTUSED_BITS; | ||
| 992 | pvt->dcs_shift = REV_E_DCS_SHIFT; | ||
| 993 | pvt->num_dcsm = REV_E_DCSM_COUNT; | ||
| 994 | } | 986 | } |
| 995 | } | 987 | } |
| 996 | 988 | ||
| @@ -1003,7 +995,7 @@ static void amd64_read_dct_base_mask(struct amd64_pvt *pvt) | |||
| 1003 | 995 | ||
| 1004 | amd64_set_dct_base_and_mask(pvt); | 996 | amd64_set_dct_base_and_mask(pvt); |
| 1005 | 997 | ||
| 1006 | for (cs = 0; cs < CHIPSELECT_COUNT; cs++) { | 998 | for (cs = 0; cs < pvt->cs_count; cs++) { |
| 1007 | reg = K8_DCSB0 + (cs * 4); | 999 | reg = K8_DCSB0 + (cs * 4); |
| 1008 | err = pci_read_config_dword(pvt->dram_f2_ctl, reg, | 1000 | err = pci_read_config_dword(pvt->dram_f2_ctl, reg, |
| 1009 | &pvt->dcsb0[cs]); | 1001 | &pvt->dcsb0[cs]); |
| @@ -1130,7 +1122,7 @@ static void k8_read_dram_base_limit(struct amd64_pvt *pvt, int dram) | |||
| 1130 | debugf0("Reading K8_DRAM_BASE_LOW failed\n"); | 1122 | debugf0("Reading K8_DRAM_BASE_LOW failed\n"); |
| 1131 | 1123 | ||
| 1132 | /* Extract parts into separate data entries */ | 1124 | /* Extract parts into separate data entries */ |
| 1133 | pvt->dram_base[dram] = ((u64) low & 0xFFFF0000) << 8; | 1125 | pvt->dram_base[dram] = ((u64) low & 0xFFFF0000) << 24; |
| 1134 | pvt->dram_IntlvEn[dram] = (low >> 8) & 0x7; | 1126 | pvt->dram_IntlvEn[dram] = (low >> 8) & 0x7; |
| 1135 | pvt->dram_rw_en[dram] = (low & 0x3); | 1127 | pvt->dram_rw_en[dram] = (low & 0x3); |
| 1136 | 1128 | ||
| @@ -1143,7 +1135,7 @@ static void k8_read_dram_base_limit(struct amd64_pvt *pvt, int dram) | |||
| 1143 | * Extract parts into separate data entries. Limit is the HIGHEST memory | 1135 | * Extract parts into separate data entries. Limit is the HIGHEST memory |
| 1144 | * location of the region, so lower 24 bits need to be all ones | 1136 | * location of the region, so lower 24 bits need to be all ones |
| 1145 | */ | 1137 | */ |
| 1146 | pvt->dram_limit[dram] = (((u64) low & 0xFFFF0000) << 8) | 0x00FFFFFF; | 1138 | pvt->dram_limit[dram] = (((u64) low & 0xFFFF0000) << 24) | 0x00FFFFFF; |
| 1147 | pvt->dram_IntlvSel[dram] = (low >> 8) & 0x7; | 1139 | pvt->dram_IntlvSel[dram] = (low >> 8) & 0x7; |
| 1148 | pvt->dram_DstNode[dram] = (low & 0x7); | 1140 | pvt->dram_DstNode[dram] = (low & 0x7); |
| 1149 | } | 1141 | } |
| @@ -1193,7 +1185,7 @@ static void k8_map_sysaddr_to_csrow(struct mem_ctl_info *mci, | |||
| 1193 | * different from the node that detected the error. | 1185 | * different from the node that detected the error. |
| 1194 | */ | 1186 | */ |
| 1195 | src_mci = find_mc_by_sys_addr(mci, SystemAddress); | 1187 | src_mci = find_mc_by_sys_addr(mci, SystemAddress); |
| 1196 | if (src_mci) { | 1188 | if (!src_mci) { |
| 1197 | amd64_mc_printk(mci, KERN_ERR, | 1189 | amd64_mc_printk(mci, KERN_ERR, |
| 1198 | "failed to map error address 0x%lx to a node\n", | 1190 | "failed to map error address 0x%lx to a node\n", |
| 1199 | (unsigned long)SystemAddress); | 1191 | (unsigned long)SystemAddress); |
| @@ -1376,8 +1368,8 @@ static void f10_read_dram_base_limit(struct amd64_pvt *pvt, int dram) | |||
| 1376 | 1368 | ||
| 1377 | pvt->dram_IntlvEn[dram] = (low_base >> 8) & 0x7; | 1369 | pvt->dram_IntlvEn[dram] = (low_base >> 8) & 0x7; |
| 1378 | 1370 | ||
| 1379 | pvt->dram_base[dram] = (((((u64) high_base & 0x000000FF) << 32) | | 1371 | pvt->dram_base[dram] = (((u64)high_base & 0x000000FF) << 40) | |
| 1380 | ((u64) low_base & 0xFFFF0000))) << 8; | 1372 | (((u64)low_base & 0xFFFF0000) << 24); |
| 1381 | 1373 | ||
| 1382 | low_offset = K8_DRAM_LIMIT_LOW + (dram << 3); | 1374 | low_offset = K8_DRAM_LIMIT_LOW + (dram << 3); |
| 1383 | high_offset = F10_DRAM_LIMIT_HIGH + (dram << 3); | 1375 | high_offset = F10_DRAM_LIMIT_HIGH + (dram << 3); |
| @@ -1398,9 +1390,9 @@ static void f10_read_dram_base_limit(struct amd64_pvt *pvt, int dram) | |||
| 1398 | * Extract address values and form a LIMIT address. Limit is the HIGHEST | 1390 | * Extract address values and form a LIMIT address. Limit is the HIGHEST |
| 1399 | * memory location of the region, so low 24 bits need to be all ones. | 1391 | * memory location of the region, so low 24 bits need to be all ones. |
| 1400 | */ | 1392 | */ |
| 1401 | low_limit |= 0x0000FFFF; | 1393 | pvt->dram_limit[dram] = (((u64)high_limit & 0x000000FF) << 40) | |
| 1402 | pvt->dram_limit[dram] = | 1394 | (((u64) low_limit & 0xFFFF0000) << 24) | |
| 1403 | ((((u64) high_limit << 32) + (u64) low_limit) << 8) | (0xFF); | 1395 | 0x00FFFFFF; |
| 1404 | } | 1396 | } |
| 1405 | 1397 | ||
| 1406 | static void f10_read_dram_ctl_register(struct amd64_pvt *pvt) | 1398 | static void f10_read_dram_ctl_register(struct amd64_pvt *pvt) |
| @@ -1566,7 +1558,7 @@ static int f10_lookup_addr_in_dct(u32 in_addr, u32 nid, u32 cs) | |||
| 1566 | 1558 | ||
| 1567 | debugf1("InputAddr=0x%x channelselect=%d\n", in_addr, cs); | 1559 | debugf1("InputAddr=0x%x channelselect=%d\n", in_addr, cs); |
| 1568 | 1560 | ||
| 1569 | for (csrow = 0; csrow < CHIPSELECT_COUNT; csrow++) { | 1561 | for (csrow = 0; csrow < pvt->cs_count; csrow++) { |
| 1570 | 1562 | ||
| 1571 | cs_base = amd64_get_dct_base(pvt, cs, csrow); | 1563 | cs_base = amd64_get_dct_base(pvt, cs, csrow); |
| 1572 | if (!(cs_base & K8_DCSB_CS_ENABLE)) | 1564 | if (!(cs_base & K8_DCSB_CS_ENABLE)) |
| @@ -2497,7 +2489,7 @@ err_reg: | |||
| 2497 | * NOTE: CPU Revision Dependent code | 2489 | * NOTE: CPU Revision Dependent code |
| 2498 | * | 2490 | * |
| 2499 | * Input: | 2491 | * Input: |
| 2500 | * @csrow_nr ChipSelect Row Number (0..CHIPSELECT_COUNT-1) | 2492 | * @csrow_nr ChipSelect Row Number (0..pvt->cs_count-1) |
| 2501 | * k8 private pointer to --> | 2493 | * k8 private pointer to --> |
| 2502 | * DRAM Bank Address mapping register | 2494 | * DRAM Bank Address mapping register |
| 2503 | * node_id | 2495 | * node_id |
| @@ -2577,7 +2569,7 @@ static int amd64_init_csrows(struct mem_ctl_info *mci) | |||
| 2577 | (pvt->nbcfg & K8_NBCFG_ECC_ENABLE) ? "Enabled" : "Disabled" | 2569 | (pvt->nbcfg & K8_NBCFG_ECC_ENABLE) ? "Enabled" : "Disabled" |
| 2578 | ); | 2570 | ); |
| 2579 | 2571 | ||
| 2580 | for (i = 0; i < CHIPSELECT_COUNT; i++) { | 2572 | for (i = 0; i < pvt->cs_count; i++) { |
| 2581 | csrow = &mci->csrows[i]; | 2573 | csrow = &mci->csrows[i]; |
| 2582 | 2574 | ||
| 2583 | if ((pvt->dcsb0[i] & K8_DCSB_CS_ENABLE) == 0) { | 2575 | if ((pvt->dcsb0[i] & K8_DCSB_CS_ENABLE) == 0) { |
| @@ -2988,7 +2980,7 @@ static int amd64_init_2nd_stage(struct amd64_pvt *pvt) | |||
| 2988 | goto err_exit; | 2980 | goto err_exit; |
| 2989 | 2981 | ||
| 2990 | ret = -ENOMEM; | 2982 | ret = -ENOMEM; |
| 2991 | mci = edac_mc_alloc(0, CHIPSELECT_COUNT, pvt->channel_count, node_id); | 2983 | mci = edac_mc_alloc(0, pvt->cs_count, pvt->channel_count, node_id); |
| 2992 | if (!mci) | 2984 | if (!mci) |
| 2993 | goto err_exit; | 2985 | goto err_exit; |
| 2994 | 2986 | ||
diff --git a/drivers/edac/amd64_edac.h b/drivers/edac/amd64_edac.h index 8ea07e2715dc..c6f359a85207 100644 --- a/drivers/edac/amd64_edac.h +++ b/drivers/edac/amd64_edac.h | |||
| @@ -132,6 +132,8 @@ | |||
| 132 | #define EDAC_AMD64_VERSION " Ver: 3.2.0 " __DATE__ | 132 | #define EDAC_AMD64_VERSION " Ver: 3.2.0 " __DATE__ |
| 133 | #define EDAC_MOD_STR "amd64_edac" | 133 | #define EDAC_MOD_STR "amd64_edac" |
| 134 | 134 | ||
| 135 | #define EDAC_MAX_NUMNODES 8 | ||
| 136 | |||
| 135 | /* Extended Model from CPUID, for CPU Revision numbers */ | 137 | /* Extended Model from CPUID, for CPU Revision numbers */ |
| 136 | #define OPTERON_CPU_LE_REV_C 0 | 138 | #define OPTERON_CPU_LE_REV_C 0 |
| 137 | #define OPTERON_CPU_REV_D 1 | 139 | #define OPTERON_CPU_REV_D 1 |
| @@ -142,7 +144,7 @@ | |||
| 142 | #define OPTERON_CPU_REV_FA 5 | 144 | #define OPTERON_CPU_REV_FA 5 |
| 143 | 145 | ||
| 144 | /* Hardware limit on ChipSelect rows per MC and processors per system */ | 146 | /* Hardware limit on ChipSelect rows per MC and processors per system */ |
| 145 | #define CHIPSELECT_COUNT 8 | 147 | #define MAX_CS_COUNT 8 |
| 146 | #define DRAM_REG_COUNT 8 | 148 | #define DRAM_REG_COUNT 8 |
| 147 | 149 | ||
| 148 | 150 | ||
| @@ -193,7 +195,6 @@ | |||
| 193 | */ | 195 | */ |
| 194 | #define REV_E_DCSB_BASE_BITS (0xFFE0FE00ULL) | 196 | #define REV_E_DCSB_BASE_BITS (0xFFE0FE00ULL) |
| 195 | #define REV_E_DCS_SHIFT 4 | 197 | #define REV_E_DCS_SHIFT 4 |
| 196 | #define REV_E_DCSM_COUNT 8 | ||
| 197 | 198 | ||
| 198 | #define REV_F_F1Xh_DCSB_BASE_BITS (0x1FF83FE0ULL) | 199 | #define REV_F_F1Xh_DCSB_BASE_BITS (0x1FF83FE0ULL) |
| 199 | #define REV_F_F1Xh_DCS_SHIFT 8 | 200 | #define REV_F_F1Xh_DCS_SHIFT 8 |
| @@ -204,9 +205,6 @@ | |||
| 204 | */ | 205 | */ |
| 205 | #define REV_F_DCSB_BASE_BITS (0x1FF83FE0ULL) | 206 | #define REV_F_DCSB_BASE_BITS (0x1FF83FE0ULL) |
| 206 | #define REV_F_DCS_SHIFT 8 | 207 | #define REV_F_DCS_SHIFT 8 |
| 207 | #define REV_F_DCSM_COUNT 4 | ||
| 208 | #define F10_DCSM_COUNT 4 | ||
| 209 | #define F11_DCSM_COUNT 2 | ||
| 210 | 208 | ||
| 211 | /* DRAM CS Mask Registers */ | 209 | /* DRAM CS Mask Registers */ |
| 212 | #define K8_DCSM0 0x60 | 210 | #define K8_DCSM0 0x60 |
| @@ -374,13 +372,11 @@ enum { | |||
| 374 | 372 | ||
| 375 | #define SET_NB_DRAM_INJECTION_WRITE(word, bits) \ | 373 | #define SET_NB_DRAM_INJECTION_WRITE(word, bits) \ |
| 376 | (BIT(((word) & 0xF) + 20) | \ | 374 | (BIT(((word) & 0xF) + 20) | \ |
| 377 | BIT(17) | \ | 375 | BIT(17) | bits) |
| 378 | ((bits) & 0xF)) | ||
| 379 | 376 | ||
| 380 | #define SET_NB_DRAM_INJECTION_READ(word, bits) \ | 377 | #define SET_NB_DRAM_INJECTION_READ(word, bits) \ |
| 381 | (BIT(((word) & 0xF) + 20) | \ | 378 | (BIT(((word) & 0xF) + 20) | \ |
| 382 | BIT(16) | \ | 379 | BIT(16) | bits) |
| 383 | ((bits) & 0xF)) | ||
| 384 | 380 | ||
| 385 | #define K8_NBCAP 0xE8 | 381 | #define K8_NBCAP 0xE8 |
| 386 | #define K8_NBCAP_CORES (BIT(12)|BIT(13)) | 382 | #define K8_NBCAP_CORES (BIT(12)|BIT(13)) |
| @@ -445,12 +441,12 @@ struct amd64_pvt { | |||
| 445 | u32 dbam1; /* DRAM Base Address Mapping reg for DCT1 */ | 441 | u32 dbam1; /* DRAM Base Address Mapping reg for DCT1 */ |
| 446 | 442 | ||
| 447 | /* DRAM CS Base Address Registers F2x[1,0][5C:40] */ | 443 | /* DRAM CS Base Address Registers F2x[1,0][5C:40] */ |
| 448 | u32 dcsb0[CHIPSELECT_COUNT]; | 444 | u32 dcsb0[MAX_CS_COUNT]; |
| 449 | u32 dcsb1[CHIPSELECT_COUNT]; | 445 | u32 dcsb1[MAX_CS_COUNT]; |
| 450 | 446 | ||
| 451 | /* DRAM CS Mask Registers F2x[1,0][6C:60] */ | 447 | /* DRAM CS Mask Registers F2x[1,0][6C:60] */ |
| 452 | u32 dcsm0[CHIPSELECT_COUNT]; | 448 | u32 dcsm0[MAX_CS_COUNT]; |
| 453 | u32 dcsm1[CHIPSELECT_COUNT]; | 449 | u32 dcsm1[MAX_CS_COUNT]; |
| 454 | 450 | ||
| 455 | /* | 451 | /* |
| 456 | * Decoded parts of DRAM BASE and LIMIT Registers | 452 | * Decoded parts of DRAM BASE and LIMIT Registers |
| @@ -470,6 +466,7 @@ struct amd64_pvt { | |||
| 470 | */ | 466 | */ |
| 471 | u32 dcsb_base; /* DCSB base bits */ | 467 | u32 dcsb_base; /* DCSB base bits */ |
| 472 | u32 dcsm_mask; /* DCSM mask bits */ | 468 | u32 dcsm_mask; /* DCSM mask bits */ |
| 469 | u32 cs_count; /* num chip selects (== num DCSB registers) */ | ||
| 473 | u32 num_dcsm; /* Number of DCSM registers */ | 470 | u32 num_dcsm; /* Number of DCSM registers */ |
| 474 | u32 dcs_mask_notused; /* DCSM notused mask bits */ | 471 | u32 dcs_mask_notused; /* DCSM notused mask bits */ |
| 475 | u32 dcs_shift; /* DCSB and DCSM shift value */ | 472 | u32 dcs_shift; /* DCSB and DCSM shift value */ |
diff --git a/drivers/edac/amd64_edac_inj.c b/drivers/edac/amd64_edac_inj.c index d3675b76b3a7..29f1f7a612d9 100644 --- a/drivers/edac/amd64_edac_inj.c +++ b/drivers/edac/amd64_edac_inj.c | |||
| @@ -1,5 +1,11 @@ | |||
| 1 | #include "amd64_edac.h" | 1 | #include "amd64_edac.h" |
| 2 | 2 | ||
| 3 | static ssize_t amd64_inject_section_show(struct mem_ctl_info *mci, char *buf) | ||
| 4 | { | ||
| 5 | struct amd64_pvt *pvt = mci->pvt_info; | ||
| 6 | return sprintf(buf, "0x%x\n", pvt->injection.section); | ||
| 7 | } | ||
| 8 | |||
| 3 | /* | 9 | /* |
| 4 | * store error injection section value which refers to one of 4 16-byte sections | 10 | * store error injection section value which refers to one of 4 16-byte sections |
| 5 | * within a 64-byte cacheline | 11 | * within a 64-byte cacheline |
| @@ -15,12 +21,26 @@ static ssize_t amd64_inject_section_store(struct mem_ctl_info *mci, | |||
| 15 | 21 | ||
| 16 | ret = strict_strtoul(data, 10, &value); | 22 | ret = strict_strtoul(data, 10, &value); |
| 17 | if (ret != -EINVAL) { | 23 | if (ret != -EINVAL) { |
| 24 | |||
| 25 | if (value > 3) { | ||
| 26 | amd64_printk(KERN_WARNING, | ||
| 27 | "%s: invalid section 0x%lx\n", | ||
| 28 | __func__, value); | ||
| 29 | return -EINVAL; | ||
| 30 | } | ||
| 31 | |||
| 18 | pvt->injection.section = (u32) value; | 32 | pvt->injection.section = (u32) value; |
| 19 | return count; | 33 | return count; |
| 20 | } | 34 | } |
| 21 | return ret; | 35 | return ret; |
| 22 | } | 36 | } |
| 23 | 37 | ||
| 38 | static ssize_t amd64_inject_word_show(struct mem_ctl_info *mci, char *buf) | ||
| 39 | { | ||
| 40 | struct amd64_pvt *pvt = mci->pvt_info; | ||
| 41 | return sprintf(buf, "0x%x\n", pvt->injection.word); | ||
| 42 | } | ||
| 43 | |||
| 24 | /* | 44 | /* |
| 25 | * store error injection word value which refers to one of 9 16-bit word of the | 45 | * store error injection word value which refers to one of 9 16-bit word of the |
| 26 | * 16-byte (128-bit + ECC bits) section | 46 | * 16-byte (128-bit + ECC bits) section |
| @@ -37,14 +57,25 @@ static ssize_t amd64_inject_word_store(struct mem_ctl_info *mci, | |||
| 37 | ret = strict_strtoul(data, 10, &value); | 57 | ret = strict_strtoul(data, 10, &value); |
| 38 | if (ret != -EINVAL) { | 58 | if (ret != -EINVAL) { |
| 39 | 59 | ||
| 40 | value = (value <= 8) ? value : 0; | 60 | if (value > 8) { |
| 41 | pvt->injection.word = (u32) value; | 61 | amd64_printk(KERN_WARNING, |
| 62 | "%s: invalid word 0x%lx\n", | ||
| 63 | __func__, value); | ||
| 64 | return -EINVAL; | ||
| 65 | } | ||
| 42 | 66 | ||
| 67 | pvt->injection.word = (u32) value; | ||
| 43 | return count; | 68 | return count; |
| 44 | } | 69 | } |
| 45 | return ret; | 70 | return ret; |
| 46 | } | 71 | } |
| 47 | 72 | ||
| 73 | static ssize_t amd64_inject_ecc_vector_show(struct mem_ctl_info *mci, char *buf) | ||
| 74 | { | ||
| 75 | struct amd64_pvt *pvt = mci->pvt_info; | ||
| 76 | return sprintf(buf, "0x%x\n", pvt->injection.bit_map); | ||
| 77 | } | ||
| 78 | |||
| 48 | /* | 79 | /* |
| 49 | * store 16 bit error injection vector which enables injecting errors to the | 80 | * store 16 bit error injection vector which enables injecting errors to the |
| 50 | * corresponding bit within the error injection word above. When used during a | 81 | * corresponding bit within the error injection word above. When used during a |
| @@ -60,8 +91,14 @@ static ssize_t amd64_inject_ecc_vector_store(struct mem_ctl_info *mci, | |||
| 60 | ret = strict_strtoul(data, 16, &value); | 91 | ret = strict_strtoul(data, 16, &value); |
| 61 | if (ret != -EINVAL) { | 92 | if (ret != -EINVAL) { |
| 62 | 93 | ||
| 63 | pvt->injection.bit_map = (u32) value & 0xFFFF; | 94 | if (value & 0xFFFF0000) { |
| 95 | amd64_printk(KERN_WARNING, | ||
| 96 | "%s: invalid EccVector: 0x%lx\n", | ||
| 97 | __func__, value); | ||
| 98 | return -EINVAL; | ||
| 99 | } | ||
| 64 | 100 | ||
| 101 | pvt->injection.bit_map = (u32) value; | ||
| 65 | return count; | 102 | return count; |
| 66 | } | 103 | } |
| 67 | return ret; | 104 | return ret; |
| @@ -147,7 +184,7 @@ struct mcidev_sysfs_attribute amd64_inj_attrs[] = { | |||
| 147 | .name = "inject_section", | 184 | .name = "inject_section", |
| 148 | .mode = (S_IRUGO | S_IWUSR) | 185 | .mode = (S_IRUGO | S_IWUSR) |
| 149 | }, | 186 | }, |
| 150 | .show = NULL, | 187 | .show = amd64_inject_section_show, |
| 151 | .store = amd64_inject_section_store, | 188 | .store = amd64_inject_section_store, |
| 152 | }, | 189 | }, |
| 153 | { | 190 | { |
| @@ -155,7 +192,7 @@ struct mcidev_sysfs_attribute amd64_inj_attrs[] = { | |||
| 155 | .name = "inject_word", | 192 | .name = "inject_word", |
| 156 | .mode = (S_IRUGO | S_IWUSR) | 193 | .mode = (S_IRUGO | S_IWUSR) |
| 157 | }, | 194 | }, |
| 158 | .show = NULL, | 195 | .show = amd64_inject_word_show, |
| 159 | .store = amd64_inject_word_store, | 196 | .store = amd64_inject_word_store, |
| 160 | }, | 197 | }, |
| 161 | { | 198 | { |
| @@ -163,7 +200,7 @@ struct mcidev_sysfs_attribute amd64_inj_attrs[] = { | |||
| 163 | .name = "inject_ecc_vector", | 200 | .name = "inject_ecc_vector", |
| 164 | .mode = (S_IRUGO | S_IWUSR) | 201 | .mode = (S_IRUGO | S_IWUSR) |
| 165 | }, | 202 | }, |
| 166 | .show = NULL, | 203 | .show = amd64_inject_ecc_vector_show, |
| 167 | .store = amd64_inject_ecc_vector_store, | 204 | .store = amd64_inject_ecc_vector_store, |
| 168 | }, | 205 | }, |
| 169 | { | 206 | { |
diff --git a/drivers/firmware/iscsi_ibft.c b/drivers/firmware/iscsi_ibft.c index 420a96e7f2db..051d1ebbd287 100644 --- a/drivers/firmware/iscsi_ibft.c +++ b/drivers/firmware/iscsi_ibft.c | |||
| @@ -939,7 +939,7 @@ static int __init ibft_init(void) | |||
| 939 | 939 | ||
| 940 | if (ibft_addr) { | 940 | if (ibft_addr) { |
| 941 | printk(KERN_INFO "iBFT detected at 0x%llx.\n", | 941 | printk(KERN_INFO "iBFT detected at 0x%llx.\n", |
| 942 | (u64)virt_to_phys((void *)ibft_addr)); | 942 | (u64)isa_virt_to_bus(ibft_addr)); |
| 943 | 943 | ||
| 944 | rc = ibft_check_device(); | 944 | rc = ibft_check_device(); |
| 945 | if (rc) | 945 | if (rc) |
diff --git a/drivers/firmware/iscsi_ibft_find.c b/drivers/firmware/iscsi_ibft_find.c index d53fbbfefa3e..dfb15c06c88f 100644 --- a/drivers/firmware/iscsi_ibft_find.c +++ b/drivers/firmware/iscsi_ibft_find.c | |||
| @@ -65,10 +65,10 @@ void __init reserve_ibft_region(void) | |||
| 65 | * so skip that area */ | 65 | * so skip that area */ |
| 66 | if (pos == VGA_MEM) | 66 | if (pos == VGA_MEM) |
| 67 | pos += VGA_SIZE; | 67 | pos += VGA_SIZE; |
| 68 | virt = phys_to_virt(pos); | 68 | virt = isa_bus_to_virt(pos); |
| 69 | if (memcmp(virt, IBFT_SIGN, IBFT_SIGN_LEN) == 0) { | 69 | if (memcmp(virt, IBFT_SIGN, IBFT_SIGN_LEN) == 0) { |
| 70 | unsigned long *addr = | 70 | unsigned long *addr = |
| 71 | (unsigned long *)phys_to_virt(pos + 4); | 71 | (unsigned long *)isa_bus_to_virt(pos + 4); |
| 72 | len = *addr; | 72 | len = *addr; |
| 73 | /* if the length of the table extends past 1M, | 73 | /* if the length of the table extends past 1M, |
| 74 | * the table cannot be valid. */ | 74 | * the table cannot be valid. */ |
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c index 8e7b0ebece0c..5cae0b3eee9b 100644 --- a/drivers/gpu/drm/drm_crtc.c +++ b/drivers/gpu/drm/drm_crtc.c | |||
| @@ -1556,8 +1556,6 @@ int drm_mode_cursor_ioctl(struct drm_device *dev, | |||
| 1556 | struct drm_crtc *crtc; | 1556 | struct drm_crtc *crtc; |
| 1557 | int ret = 0; | 1557 | int ret = 0; |
| 1558 | 1558 | ||
| 1559 | DRM_DEBUG_KMS("\n"); | ||
| 1560 | |||
| 1561 | if (!req->flags) { | 1559 | if (!req->flags) { |
| 1562 | DRM_ERROR("no operation set\n"); | 1560 | DRM_ERROR("no operation set\n"); |
| 1563 | return -EINVAL; | 1561 | return -EINVAL; |
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index 819ddcbfcce5..23dc9c115fd9 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c | |||
| @@ -454,6 +454,96 @@ out_free: | |||
| 454 | } | 454 | } |
| 455 | EXPORT_SYMBOL(drm_fb_helper_init_crtc_count); | 455 | EXPORT_SYMBOL(drm_fb_helper_init_crtc_count); |
| 456 | 456 | ||
| 457 | static void setcolreg(struct drm_crtc *crtc, u16 red, u16 green, | ||
| 458 | u16 blue, u16 regno, struct fb_info *info) | ||
| 459 | { | ||
| 460 | struct drm_fb_helper *fb_helper = info->par; | ||
| 461 | struct drm_framebuffer *fb = fb_helper->fb; | ||
| 462 | int pindex; | ||
| 463 | |||
| 464 | pindex = regno; | ||
| 465 | |||
| 466 | if (fb->bits_per_pixel == 16) { | ||
| 467 | pindex = regno << 3; | ||
| 468 | |||
| 469 | if (fb->depth == 16 && regno > 63) | ||
| 470 | return; | ||
| 471 | if (fb->depth == 15 && regno > 31) | ||
| 472 | return; | ||
| 473 | |||
| 474 | if (fb->depth == 16) { | ||
| 475 | u16 r, g, b; | ||
| 476 | int i; | ||
| 477 | if (regno < 32) { | ||
| 478 | for (i = 0; i < 8; i++) | ||
| 479 | fb_helper->funcs->gamma_set(crtc, red, | ||
| 480 | green, blue, pindex + i); | ||
| 481 | } | ||
| 482 | |||
| 483 | fb_helper->funcs->gamma_get(crtc, &r, | ||
| 484 | &g, &b, | ||
| 485 | pindex >> 1); | ||
| 486 | |||
| 487 | for (i = 0; i < 4; i++) | ||
| 488 | fb_helper->funcs->gamma_set(crtc, r, | ||
| 489 | green, b, | ||
| 490 | (pindex >> 1) + i); | ||
| 491 | } | ||
| 492 | } | ||
| 493 | |||
| 494 | if (fb->depth != 16) | ||
| 495 | fb_helper->funcs->gamma_set(crtc, red, green, blue, pindex); | ||
| 496 | |||
| 497 | if (regno < 16 && info->fix.visual == FB_VISUAL_DIRECTCOLOR) { | ||
| 498 | ((u32 *) fb->pseudo_palette)[regno] = | ||
| 499 | (regno << info->var.red.offset) | | ||
| 500 | (regno << info->var.green.offset) | | ||
| 501 | (regno << info->var.blue.offset); | ||
| 502 | } | ||
| 503 | } | ||
| 504 | |||
| 505 | int drm_fb_helper_setcmap(struct fb_cmap *cmap, struct fb_info *info) | ||
| 506 | { | ||
| 507 | struct drm_fb_helper *fb_helper = info->par; | ||
| 508 | struct drm_device *dev = fb_helper->dev; | ||
| 509 | u16 *red, *green, *blue, *transp; | ||
| 510 | struct drm_crtc *crtc; | ||
| 511 | int i, rc = 0; | ||
| 512 | int start; | ||
| 513 | |||
| 514 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { | ||
| 515 | struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; | ||
| 516 | for (i = 0; i < fb_helper->crtc_count; i++) { | ||
| 517 | if (crtc->base.id == fb_helper->crtc_info[i].crtc_id) | ||
| 518 | break; | ||
| 519 | } | ||
| 520 | if (i == fb_helper->crtc_count) | ||
| 521 | continue; | ||
| 522 | |||
| 523 | red = cmap->red; | ||
| 524 | green = cmap->green; | ||
| 525 | blue = cmap->blue; | ||
| 526 | transp = cmap->transp; | ||
| 527 | start = cmap->start; | ||
| 528 | |||
| 529 | for (i = 0; i < cmap->len; i++) { | ||
| 530 | u16 hred, hgreen, hblue, htransp = 0xffff; | ||
| 531 | |||
| 532 | hred = *red++; | ||
| 533 | hgreen = *green++; | ||
| 534 | hblue = *blue++; | ||
| 535 | |||
| 536 | if (transp) | ||
| 537 | htransp = *transp++; | ||
| 538 | |||
| 539 | setcolreg(crtc, hred, hgreen, hblue, start++, info); | ||
| 540 | } | ||
| 541 | crtc_funcs->load_lut(crtc); | ||
| 542 | } | ||
| 543 | return rc; | ||
| 544 | } | ||
| 545 | EXPORT_SYMBOL(drm_fb_helper_setcmap); | ||
| 546 | |||
| 457 | int drm_fb_helper_setcolreg(unsigned regno, | 547 | int drm_fb_helper_setcolreg(unsigned regno, |
| 458 | unsigned red, | 548 | unsigned red, |
| 459 | unsigned green, | 549 | unsigned green, |
| @@ -466,9 +556,11 @@ int drm_fb_helper_setcolreg(unsigned regno, | |||
| 466 | struct drm_crtc *crtc; | 556 | struct drm_crtc *crtc; |
| 467 | int i; | 557 | int i; |
| 468 | 558 | ||
| 469 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { | 559 | if (regno > 255) |
| 470 | struct drm_framebuffer *fb = fb_helper->fb; | 560 | return 1; |
| 471 | 561 | ||
| 562 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { | ||
| 563 | struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; | ||
| 472 | for (i = 0; i < fb_helper->crtc_count; i++) { | 564 | for (i = 0; i < fb_helper->crtc_count; i++) { |
| 473 | if (crtc->base.id == fb_helper->crtc_info[i].crtc_id) | 565 | if (crtc->base.id == fb_helper->crtc_info[i].crtc_id) |
| 474 | break; | 566 | break; |
| @@ -476,35 +568,9 @@ int drm_fb_helper_setcolreg(unsigned regno, | |||
| 476 | if (i == fb_helper->crtc_count) | 568 | if (i == fb_helper->crtc_count) |
| 477 | continue; | 569 | continue; |
| 478 | 570 | ||
| 479 | if (regno > 255) | ||
| 480 | return 1; | ||
| 481 | |||
| 482 | if (fb->depth == 8) { | ||
| 483 | fb_helper->funcs->gamma_set(crtc, red, green, blue, regno); | ||
| 484 | return 0; | ||
| 485 | } | ||
| 486 | 571 | ||
| 487 | if (regno < 16) { | 572 | setcolreg(crtc, red, green, blue, regno, info); |
| 488 | switch (fb->depth) { | 573 | crtc_funcs->load_lut(crtc); |
| 489 | case 15: | ||
| 490 | fb->pseudo_palette[regno] = ((red & 0xf800) >> 1) | | ||
| 491 | ((green & 0xf800) >> 6) | | ||
| 492 | ((blue & 0xf800) >> 11); | ||
| 493 | break; | ||
| 494 | case 16: | ||
| 495 | fb->pseudo_palette[regno] = (red & 0xf800) | | ||
| 496 | ((green & 0xfc00) >> 5) | | ||
| 497 | ((blue & 0xf800) >> 11); | ||
| 498 | break; | ||
| 499 | case 24: | ||
| 500 | case 32: | ||
| 501 | fb->pseudo_palette[regno] = | ||
| 502 | (((red >> 8) & 0xff) << info->var.red.offset) | | ||
| 503 | (((green >> 8) & 0xff) << info->var.green.offset) | | ||
| 504 | (((blue >> 8) & 0xff) << info->var.blue.offset); | ||
| 505 | break; | ||
| 506 | } | ||
| 507 | } | ||
| 508 | } | 574 | } |
| 509 | return 0; | 575 | return 0; |
| 510 | } | 576 | } |
| @@ -674,6 +740,7 @@ int drm_fb_helper_pan_display(struct fb_var_screeninfo *var, | |||
| 674 | EXPORT_SYMBOL(drm_fb_helper_pan_display); | 740 | EXPORT_SYMBOL(drm_fb_helper_pan_display); |
| 675 | 741 | ||
| 676 | int drm_fb_helper_single_fb_probe(struct drm_device *dev, | 742 | int drm_fb_helper_single_fb_probe(struct drm_device *dev, |
| 743 | int preferred_bpp, | ||
| 677 | int (*fb_create)(struct drm_device *dev, | 744 | int (*fb_create)(struct drm_device *dev, |
| 678 | uint32_t fb_width, | 745 | uint32_t fb_width, |
| 679 | uint32_t fb_height, | 746 | uint32_t fb_height, |
| @@ -696,6 +763,11 @@ int drm_fb_helper_single_fb_probe(struct drm_device *dev, | |||
| 696 | struct drm_fb_helper *fb_helper; | 763 | struct drm_fb_helper *fb_helper; |
| 697 | uint32_t surface_depth = 24, surface_bpp = 32; | 764 | uint32_t surface_depth = 24, surface_bpp = 32; |
| 698 | 765 | ||
| 766 | /* if driver picks 8 or 16 by default use that | ||
| 767 | for both depth/bpp */ | ||
| 768 | if (preferred_bpp != surface_bpp) { | ||
| 769 | surface_depth = surface_bpp = preferred_bpp; | ||
| 770 | } | ||
| 699 | /* first up get a count of crtcs now in use and new min/maxes width/heights */ | 771 | /* first up get a count of crtcs now in use and new min/maxes width/heights */ |
| 700 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { | 772 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { |
| 701 | struct drm_fb_helper_connector *fb_help_conn = connector->fb_helper_private; | 773 | struct drm_fb_helper_connector *fb_help_conn = connector->fb_helper_private; |
| @@ -851,10 +923,12 @@ void drm_fb_helper_free(struct drm_fb_helper *helper) | |||
| 851 | } | 923 | } |
| 852 | EXPORT_SYMBOL(drm_fb_helper_free); | 924 | EXPORT_SYMBOL(drm_fb_helper_free); |
| 853 | 925 | ||
| 854 | void drm_fb_helper_fill_fix(struct fb_info *info, uint32_t pitch) | 926 | void drm_fb_helper_fill_fix(struct fb_info *info, uint32_t pitch, |
| 927 | uint32_t depth) | ||
| 855 | { | 928 | { |
| 856 | info->fix.type = FB_TYPE_PACKED_PIXELS; | 929 | info->fix.type = FB_TYPE_PACKED_PIXELS; |
| 857 | info->fix.visual = FB_VISUAL_TRUECOLOR; | 930 | info->fix.visual = depth == 8 ? FB_VISUAL_PSEUDOCOLOR : |
| 931 | FB_VISUAL_DIRECTCOLOR; | ||
| 858 | info->fix.type_aux = 0; | 932 | info->fix.type_aux = 0; |
| 859 | info->fix.xpanstep = 1; /* doing it in hw */ | 933 | info->fix.xpanstep = 1; /* doing it in hw */ |
| 860 | info->fix.ypanstep = 1; /* doing it in hw */ | 934 | info->fix.ypanstep = 1; /* doing it in hw */ |
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 93ff6c03733e..ffa39671751f 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c | |||
| @@ -3244,6 +3244,16 @@ void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green, | |||
| 3244 | intel_crtc->lut_b[regno] = blue >> 8; | 3244 | intel_crtc->lut_b[regno] = blue >> 8; |
| 3245 | } | 3245 | } |
| 3246 | 3246 | ||
| 3247 | void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green, | ||
| 3248 | u16 *blue, int regno) | ||
| 3249 | { | ||
| 3250 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | ||
| 3251 | |||
| 3252 | *red = intel_crtc->lut_r[regno] << 8; | ||
| 3253 | *green = intel_crtc->lut_g[regno] << 8; | ||
| 3254 | *blue = intel_crtc->lut_b[regno] << 8; | ||
| 3255 | } | ||
| 3256 | |||
| 3247 | static void intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, | 3257 | static void intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, |
| 3248 | u16 *blue, uint32_t size) | 3258 | u16 *blue, uint32_t size) |
| 3249 | { | 3259 | { |
| @@ -3835,6 +3845,7 @@ static const struct drm_crtc_helper_funcs intel_helper_funcs = { | |||
| 3835 | .mode_set_base = intel_pipe_set_base, | 3845 | .mode_set_base = intel_pipe_set_base, |
| 3836 | .prepare = intel_crtc_prepare, | 3846 | .prepare = intel_crtc_prepare, |
| 3837 | .commit = intel_crtc_commit, | 3847 | .commit = intel_crtc_commit, |
| 3848 | .load_lut = intel_crtc_load_lut, | ||
| 3838 | }; | 3849 | }; |
| 3839 | 3850 | ||
| 3840 | static const struct drm_crtc_funcs intel_crtc_funcs = { | 3851 | static const struct drm_crtc_funcs intel_crtc_funcs = { |
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 8aa4b7f30daa..ef61fe9507e2 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h | |||
| @@ -175,6 +175,8 @@ extern int intelfb_resize(struct drm_device *dev, struct drm_crtc *crtc); | |||
| 175 | extern void intelfb_restore(void); | 175 | extern void intelfb_restore(void); |
| 176 | extern void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green, | 176 | extern void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green, |
| 177 | u16 blue, int regno); | 177 | u16 blue, int regno); |
| 178 | extern void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green, | ||
| 179 | u16 *blue, int regno); | ||
| 178 | 180 | ||
| 179 | extern int intel_framebuffer_create(struct drm_device *dev, | 181 | extern int intel_framebuffer_create(struct drm_device *dev, |
| 180 | struct drm_mode_fb_cmd *mode_cmd, | 182 | struct drm_mode_fb_cmd *mode_cmd, |
diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c index e85d7e9eed7d..2b0fe54cd92c 100644 --- a/drivers/gpu/drm/i915/intel_fb.c +++ b/drivers/gpu/drm/i915/intel_fb.c | |||
| @@ -60,10 +60,12 @@ static struct fb_ops intelfb_ops = { | |||
| 60 | .fb_imageblit = cfb_imageblit, | 60 | .fb_imageblit = cfb_imageblit, |
| 61 | .fb_pan_display = drm_fb_helper_pan_display, | 61 | .fb_pan_display = drm_fb_helper_pan_display, |
| 62 | .fb_blank = drm_fb_helper_blank, | 62 | .fb_blank = drm_fb_helper_blank, |
| 63 | .fb_setcmap = drm_fb_helper_setcmap, | ||
| 63 | }; | 64 | }; |
| 64 | 65 | ||
| 65 | static struct drm_fb_helper_funcs intel_fb_helper_funcs = { | 66 | static struct drm_fb_helper_funcs intel_fb_helper_funcs = { |
| 66 | .gamma_set = intel_crtc_fb_gamma_set, | 67 | .gamma_set = intel_crtc_fb_gamma_set, |
| 68 | .gamma_get = intel_crtc_fb_gamma_get, | ||
| 67 | }; | 69 | }; |
| 68 | 70 | ||
| 69 | 71 | ||
| @@ -123,6 +125,10 @@ static int intelfb_create(struct drm_device *dev, uint32_t fb_width, | |||
| 123 | struct device *device = &dev->pdev->dev; | 125 | struct device *device = &dev->pdev->dev; |
| 124 | int size, ret, mmio_bar = IS_I9XX(dev) ? 0 : 1; | 126 | int size, ret, mmio_bar = IS_I9XX(dev) ? 0 : 1; |
| 125 | 127 | ||
| 128 | /* we don't do packed 24bpp */ | ||
| 129 | if (surface_bpp == 24) | ||
| 130 | surface_bpp = 32; | ||
| 131 | |||
| 126 | mode_cmd.width = surface_width; | 132 | mode_cmd.width = surface_width; |
| 127 | mode_cmd.height = surface_height; | 133 | mode_cmd.height = surface_height; |
| 128 | 134 | ||
| @@ -206,7 +212,7 @@ static int intelfb_create(struct drm_device *dev, uint32_t fb_width, | |||
| 206 | 212 | ||
| 207 | // memset(info->screen_base, 0, size); | 213 | // memset(info->screen_base, 0, size); |
| 208 | 214 | ||
| 209 | drm_fb_helper_fill_fix(info, fb->pitch); | 215 | drm_fb_helper_fill_fix(info, fb->pitch, fb->depth); |
| 210 | drm_fb_helper_fill_var(info, fb, fb_width, fb_height); | 216 | drm_fb_helper_fill_var(info, fb, fb_width, fb_height); |
| 211 | 217 | ||
| 212 | /* FIXME: we really shouldn't expose mmio space at all */ | 218 | /* FIXME: we really shouldn't expose mmio space at all */ |
| @@ -244,7 +250,7 @@ int intelfb_probe(struct drm_device *dev) | |||
| 244 | int ret; | 250 | int ret; |
| 245 | 251 | ||
| 246 | DRM_DEBUG("\n"); | 252 | DRM_DEBUG("\n"); |
| 247 | ret = drm_fb_helper_single_fb_probe(dev, intelfb_create); | 253 | ret = drm_fb_helper_single_fb_probe(dev, 32, intelfb_create); |
| 248 | return ret; | 254 | return ret; |
| 249 | } | 255 | } |
| 250 | EXPORT_SYMBOL(intelfb_probe); | 256 | EXPORT_SYMBOL(intelfb_probe); |
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c index 6a015929deee..14fa9701aeb3 100644 --- a/drivers/gpu/drm/radeon/atombios_crtc.c +++ b/drivers/gpu/drm/radeon/atombios_crtc.c | |||
| @@ -733,6 +733,7 @@ static const struct drm_crtc_helper_funcs atombios_helper_funcs = { | |||
| 733 | .mode_set_base = atombios_crtc_set_base, | 733 | .mode_set_base = atombios_crtc_set_base, |
| 734 | .prepare = atombios_crtc_prepare, | 734 | .prepare = atombios_crtc_prepare, |
| 735 | .commit = atombios_crtc_commit, | 735 | .commit = atombios_crtc_commit, |
| 736 | .load_lut = radeon_crtc_load_lut, | ||
| 736 | }; | 737 | }; |
| 737 | 738 | ||
| 738 | void radeon_atombios_init_crtc(struct drm_device *dev, | 739 | void radeon_atombios_init_crtc(struct drm_device *dev, |
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c index e6cce24de802..161094c07d94 100644 --- a/drivers/gpu/drm/radeon/r100.c +++ b/drivers/gpu/drm/radeon/r100.c | |||
| @@ -32,6 +32,9 @@ | |||
| 32 | #include "radeon_reg.h" | 32 | #include "radeon_reg.h" |
| 33 | #include "radeon.h" | 33 | #include "radeon.h" |
| 34 | #include "r100d.h" | 34 | #include "r100d.h" |
| 35 | #include "rs100d.h" | ||
| 36 | #include "rv200d.h" | ||
| 37 | #include "rv250d.h" | ||
| 35 | 38 | ||
| 36 | #include <linux/firmware.h> | 39 | #include <linux/firmware.h> |
| 37 | #include <linux/platform_device.h> | 40 | #include <linux/platform_device.h> |
| @@ -60,18 +63,7 @@ MODULE_FIRMWARE(FIRMWARE_R520); | |||
| 60 | 63 | ||
| 61 | /* This files gather functions specifics to: | 64 | /* This files gather functions specifics to: |
| 62 | * r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280 | 65 | * r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280 |
| 63 | * | ||
| 64 | * Some of these functions might be used by newer ASICs. | ||
| 65 | */ | 66 | */ |
| 66 | int r200_init(struct radeon_device *rdev); | ||
| 67 | void r100_hdp_reset(struct radeon_device *rdev); | ||
| 68 | void r100_gpu_init(struct radeon_device *rdev); | ||
| 69 | int r100_gui_wait_for_idle(struct radeon_device *rdev); | ||
| 70 | int r100_mc_wait_for_idle(struct radeon_device *rdev); | ||
| 71 | void r100_gpu_wait_for_vsync(struct radeon_device *rdev); | ||
| 72 | void r100_gpu_wait_for_vsync2(struct radeon_device *rdev); | ||
| 73 | int r100_debugfs_mc_info_init(struct radeon_device *rdev); | ||
| 74 | |||
| 75 | 67 | ||
| 76 | /* | 68 | /* |
| 77 | * PCI GART | 69 | * PCI GART |
| @@ -152,136 +144,6 @@ void r100_pci_gart_fini(struct radeon_device *rdev) | |||
| 152 | radeon_gart_fini(rdev); | 144 | radeon_gart_fini(rdev); |
| 153 | } | 145 | } |
| 154 | 146 | ||
| 155 | |||
| 156 | /* | ||
| 157 | * MC | ||
| 158 | */ | ||
| 159 | void r100_mc_disable_clients(struct radeon_device *rdev) | ||
| 160 | { | ||
| 161 | uint32_t ov0_scale_cntl, crtc_ext_cntl, crtc_gen_cntl, crtc2_gen_cntl; | ||
| 162 | |||
| 163 | /* FIXME: is this function correct for rs100,rs200,rs300 ? */ | ||
| 164 | if (r100_gui_wait_for_idle(rdev)) { | ||
| 165 | printk(KERN_WARNING "Failed to wait GUI idle while " | ||
| 166 | "programming pipes. Bad things might happen.\n"); | ||
| 167 | } | ||
| 168 | |||
| 169 | /* stop display and memory access */ | ||
| 170 | ov0_scale_cntl = RREG32(RADEON_OV0_SCALE_CNTL); | ||
| 171 | WREG32(RADEON_OV0_SCALE_CNTL, ov0_scale_cntl & ~RADEON_SCALER_ENABLE); | ||
| 172 | crtc_ext_cntl = RREG32(RADEON_CRTC_EXT_CNTL); | ||
| 173 | WREG32(RADEON_CRTC_EXT_CNTL, crtc_ext_cntl | RADEON_CRTC_DISPLAY_DIS); | ||
| 174 | crtc_gen_cntl = RREG32(RADEON_CRTC_GEN_CNTL); | ||
| 175 | |||
| 176 | r100_gpu_wait_for_vsync(rdev); | ||
| 177 | |||
| 178 | WREG32(RADEON_CRTC_GEN_CNTL, | ||
| 179 | (crtc_gen_cntl & ~(RADEON_CRTC_CUR_EN | RADEON_CRTC_ICON_EN)) | | ||
| 180 | RADEON_CRTC_DISP_REQ_EN_B | RADEON_CRTC_EXT_DISP_EN); | ||
| 181 | |||
| 182 | if (!(rdev->flags & RADEON_SINGLE_CRTC)) { | ||
| 183 | crtc2_gen_cntl = RREG32(RADEON_CRTC2_GEN_CNTL); | ||
| 184 | |||
| 185 | r100_gpu_wait_for_vsync2(rdev); | ||
| 186 | WREG32(RADEON_CRTC2_GEN_CNTL, | ||
| 187 | (crtc2_gen_cntl & | ||
| 188 | ~(RADEON_CRTC2_CUR_EN | RADEON_CRTC2_ICON_EN)) | | ||
| 189 | RADEON_CRTC2_DISP_REQ_EN_B); | ||
| 190 | } | ||
| 191 | |||
| 192 | udelay(500); | ||
| 193 | } | ||
| 194 | |||
| 195 | void r100_mc_setup(struct radeon_device *rdev) | ||
| 196 | { | ||
| 197 | uint32_t tmp; | ||
| 198 | int r; | ||
| 199 | |||
| 200 | r = r100_debugfs_mc_info_init(rdev); | ||
| 201 | if (r) { | ||
| 202 | DRM_ERROR("Failed to register debugfs file for R100 MC !\n"); | ||
| 203 | } | ||
| 204 | /* Write VRAM size in case we are limiting it */ | ||
| 205 | WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.real_vram_size); | ||
| 206 | /* Novell bug 204882 for RN50/M6/M7 with 8/16/32MB VRAM, | ||
| 207 | * if the aperture is 64MB but we have 32MB VRAM | ||
| 208 | * we report only 32MB VRAM but we have to set MC_FB_LOCATION | ||
| 209 | * to 64MB, otherwise the gpu accidentially dies */ | ||
| 210 | tmp = rdev->mc.vram_location + rdev->mc.mc_vram_size - 1; | ||
| 211 | tmp = REG_SET(RADEON_MC_FB_TOP, tmp >> 16); | ||
| 212 | tmp |= REG_SET(RADEON_MC_FB_START, rdev->mc.vram_location >> 16); | ||
| 213 | WREG32(RADEON_MC_FB_LOCATION, tmp); | ||
| 214 | |||
| 215 | /* Enable bus mastering */ | ||
| 216 | tmp = RREG32(RADEON_BUS_CNTL) & ~RADEON_BUS_MASTER_DIS; | ||
| 217 | WREG32(RADEON_BUS_CNTL, tmp); | ||
| 218 | |||
| 219 | if (rdev->flags & RADEON_IS_AGP) { | ||
| 220 | tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - 1; | ||
| 221 | tmp = REG_SET(RADEON_MC_AGP_TOP, tmp >> 16); | ||
| 222 | tmp |= REG_SET(RADEON_MC_AGP_START, rdev->mc.gtt_location >> 16); | ||
| 223 | WREG32(RADEON_MC_AGP_LOCATION, tmp); | ||
| 224 | WREG32(RADEON_AGP_BASE, rdev->mc.agp_base); | ||
| 225 | } else { | ||
| 226 | WREG32(RADEON_MC_AGP_LOCATION, 0x0FFFFFFF); | ||
| 227 | WREG32(RADEON_AGP_BASE, 0); | ||
| 228 | } | ||
| 229 | |||
| 230 | tmp = RREG32(RADEON_HOST_PATH_CNTL) & RADEON_HDP_APER_CNTL; | ||
| 231 | tmp |= (7 << 28); | ||
| 232 | WREG32(RADEON_HOST_PATH_CNTL, tmp | RADEON_HDP_SOFT_RESET | RADEON_HDP_READ_BUFFER_INVALIDATE); | ||
| 233 | (void)RREG32(RADEON_HOST_PATH_CNTL); | ||
| 234 | WREG32(RADEON_HOST_PATH_CNTL, tmp); | ||
| 235 | (void)RREG32(RADEON_HOST_PATH_CNTL); | ||
| 236 | } | ||
| 237 | |||
| 238 | int r100_mc_init(struct radeon_device *rdev) | ||
| 239 | { | ||
| 240 | int r; | ||
| 241 | |||
| 242 | if (r100_debugfs_rbbm_init(rdev)) { | ||
| 243 | DRM_ERROR("Failed to register debugfs file for RBBM !\n"); | ||
| 244 | } | ||
| 245 | |||
| 246 | r100_gpu_init(rdev); | ||
| 247 | /* Disable gart which also disable out of gart access */ | ||
| 248 | r100_pci_gart_disable(rdev); | ||
| 249 | |||
| 250 | /* Setup GPU memory space */ | ||
| 251 | rdev->mc.gtt_location = 0xFFFFFFFFUL; | ||
| 252 | if (rdev->flags & RADEON_IS_AGP) { | ||
| 253 | r = radeon_agp_init(rdev); | ||
| 254 | if (r) { | ||
| 255 | printk(KERN_WARNING "[drm] Disabling AGP\n"); | ||
| 256 | rdev->flags &= ~RADEON_IS_AGP; | ||
| 257 | rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024; | ||
| 258 | } else { | ||
| 259 | rdev->mc.gtt_location = rdev->mc.agp_base; | ||
| 260 | } | ||
| 261 | } | ||
| 262 | r = radeon_mc_setup(rdev); | ||
| 263 | if (r) { | ||
| 264 | return r; | ||
| 265 | } | ||
| 266 | |||
| 267 | r100_mc_disable_clients(rdev); | ||
| 268 | if (r100_mc_wait_for_idle(rdev)) { | ||
| 269 | printk(KERN_WARNING "Failed to wait MC idle while " | ||
| 270 | "programming pipes. Bad things might happen.\n"); | ||
| 271 | } | ||
| 272 | |||
| 273 | r100_mc_setup(rdev); | ||
| 274 | return 0; | ||
| 275 | } | ||
| 276 | |||
| 277 | void r100_mc_fini(struct radeon_device *rdev) | ||
| 278 | { | ||
| 279 | } | ||
| 280 | |||
| 281 | |||
| 282 | /* | ||
| 283 | * Interrupts | ||
| 284 | */ | ||
| 285 | int r100_irq_set(struct radeon_device *rdev) | 147 | int r100_irq_set(struct radeon_device *rdev) |
| 286 | { | 148 | { |
| 287 | uint32_t tmp = 0; | 149 | uint32_t tmp = 0; |
| @@ -358,10 +220,6 @@ u32 r100_get_vblank_counter(struct radeon_device *rdev, int crtc) | |||
| 358 | return RREG32(RADEON_CRTC2_CRNT_FRAME); | 220 | return RREG32(RADEON_CRTC2_CRNT_FRAME); |
| 359 | } | 221 | } |
| 360 | 222 | ||
| 361 | |||
| 362 | /* | ||
| 363 | * Fence emission | ||
| 364 | */ | ||
| 365 | void r100_fence_ring_emit(struct radeon_device *rdev, | 223 | void r100_fence_ring_emit(struct radeon_device *rdev, |
| 366 | struct radeon_fence *fence) | 224 | struct radeon_fence *fence) |
| 367 | { | 225 | { |
| @@ -377,10 +235,6 @@ void r100_fence_ring_emit(struct radeon_device *rdev, | |||
| 377 | radeon_ring_write(rdev, RADEON_SW_INT_FIRE); | 235 | radeon_ring_write(rdev, RADEON_SW_INT_FIRE); |
| 378 | } | 236 | } |
| 379 | 237 | ||
| 380 | |||
| 381 | /* | ||
| 382 | * Writeback | ||
| 383 | */ | ||
| 384 | int r100_wb_init(struct radeon_device *rdev) | 238 | int r100_wb_init(struct radeon_device *rdev) |
| 385 | { | 239 | { |
| 386 | int r; | 240 | int r; |
| @@ -504,10 +358,6 @@ int r100_copy_blit(struct radeon_device *rdev, | |||
| 504 | return r; | 358 | return r; |
| 505 | } | 359 | } |
| 506 | 360 | ||
| 507 | |||
| 508 | /* | ||
| 509 | * CP | ||
| 510 | */ | ||
| 511 | static int r100_cp_wait_for_idle(struct radeon_device *rdev) | 361 | static int r100_cp_wait_for_idle(struct radeon_device *rdev) |
| 512 | { | 362 | { |
| 513 | unsigned i; | 363 | unsigned i; |
| @@ -612,6 +462,7 @@ static int r100_cp_init_microcode(struct radeon_device *rdev) | |||
| 612 | } | 462 | } |
| 613 | return err; | 463 | return err; |
| 614 | } | 464 | } |
| 465 | |||
| 615 | static void r100_cp_load_microcode(struct radeon_device *rdev) | 466 | static void r100_cp_load_microcode(struct radeon_device *rdev) |
| 616 | { | 467 | { |
| 617 | const __be32 *fw_data; | 468 | const __be32 *fw_data; |
| @@ -978,7 +829,7 @@ int r100_cs_packet_parse_vline(struct radeon_cs_parser *p) | |||
| 978 | 829 | ||
| 979 | header = radeon_get_ib_value(p, h_idx); | 830 | header = radeon_get_ib_value(p, h_idx); |
| 980 | crtc_id = radeon_get_ib_value(p, h_idx + 5); | 831 | crtc_id = radeon_get_ib_value(p, h_idx + 5); |
| 981 | reg = header >> 2; | 832 | reg = CP_PACKET0_GET_REG(header); |
| 982 | mutex_lock(&p->rdev->ddev->mode_config.mutex); | 833 | mutex_lock(&p->rdev->ddev->mode_config.mutex); |
| 983 | obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC); | 834 | obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC); |
| 984 | if (!obj) { | 835 | if (!obj) { |
| @@ -1990,7 +1841,7 @@ void r100_pll_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) | |||
| 1990 | r100_pll_errata_after_data(rdev); | 1841 | r100_pll_errata_after_data(rdev); |
| 1991 | } | 1842 | } |
| 1992 | 1843 | ||
| 1993 | int r100_init(struct radeon_device *rdev) | 1844 | void r100_set_safe_registers(struct radeon_device *rdev) |
| 1994 | { | 1845 | { |
| 1995 | if (ASIC_IS_RN50(rdev)) { | 1846 | if (ASIC_IS_RN50(rdev)) { |
| 1996 | rdev->config.r100.reg_safe_bm = rn50_reg_safe_bm; | 1847 | rdev->config.r100.reg_safe_bm = rn50_reg_safe_bm; |
| @@ -1999,9 +1850,8 @@ int r100_init(struct radeon_device *rdev) | |||
| 1999 | rdev->config.r100.reg_safe_bm = r100_reg_safe_bm; | 1850 | rdev->config.r100.reg_safe_bm = r100_reg_safe_bm; |
| 2000 | rdev->config.r100.reg_safe_bm_size = ARRAY_SIZE(r100_reg_safe_bm); | 1851 | rdev->config.r100.reg_safe_bm_size = ARRAY_SIZE(r100_reg_safe_bm); |
| 2001 | } else { | 1852 | } else { |
| 2002 | return r200_init(rdev); | 1853 | r200_set_safe_registers(rdev); |
| 2003 | } | 1854 | } |
| 2004 | return 0; | ||
| 2005 | } | 1855 | } |
| 2006 | 1856 | ||
| 2007 | /* | 1857 | /* |
| @@ -2299,9 +2149,11 @@ void r100_bandwidth_update(struct radeon_device *rdev) | |||
| 2299 | mode1 = &rdev->mode_info.crtcs[0]->base.mode; | 2149 | mode1 = &rdev->mode_info.crtcs[0]->base.mode; |
| 2300 | pixel_bytes1 = rdev->mode_info.crtcs[0]->base.fb->bits_per_pixel / 8; | 2150 | pixel_bytes1 = rdev->mode_info.crtcs[0]->base.fb->bits_per_pixel / 8; |
| 2301 | } | 2151 | } |
| 2302 | if (rdev->mode_info.crtcs[1]->base.enabled) { | 2152 | if (!(rdev->flags & RADEON_SINGLE_CRTC)) { |
| 2303 | mode2 = &rdev->mode_info.crtcs[1]->base.mode; | 2153 | if (rdev->mode_info.crtcs[1]->base.enabled) { |
| 2304 | pixel_bytes2 = rdev->mode_info.crtcs[1]->base.fb->bits_per_pixel / 8; | 2154 | mode2 = &rdev->mode_info.crtcs[1]->base.mode; |
| 2155 | pixel_bytes2 = rdev->mode_info.crtcs[1]->base.fb->bits_per_pixel / 8; | ||
| 2156 | } | ||
| 2305 | } | 2157 | } |
| 2306 | 2158 | ||
| 2307 | min_mem_eff.full = rfixed_const_8(0); | 2159 | min_mem_eff.full = rfixed_const_8(0); |
| @@ -3114,7 +2966,7 @@ void r100_mc_stop(struct radeon_device *rdev, struct r100_mc_save *save) | |||
| 3114 | WREG32(R_000740_CP_CSQ_CNTL, 0); | 2966 | WREG32(R_000740_CP_CSQ_CNTL, 0); |
| 3115 | 2967 | ||
| 3116 | /* Save few CRTC registers */ | 2968 | /* Save few CRTC registers */ |
| 3117 | save->GENMO_WT = RREG32(R_0003C0_GENMO_WT); | 2969 | save->GENMO_WT = RREG8(R_0003C2_GENMO_WT); |
| 3118 | save->CRTC_EXT_CNTL = RREG32(R_000054_CRTC_EXT_CNTL); | 2970 | save->CRTC_EXT_CNTL = RREG32(R_000054_CRTC_EXT_CNTL); |
| 3119 | save->CRTC_GEN_CNTL = RREG32(R_000050_CRTC_GEN_CNTL); | 2971 | save->CRTC_GEN_CNTL = RREG32(R_000050_CRTC_GEN_CNTL); |
| 3120 | save->CUR_OFFSET = RREG32(R_000260_CUR_OFFSET); | 2972 | save->CUR_OFFSET = RREG32(R_000260_CUR_OFFSET); |
| @@ -3124,7 +2976,7 @@ void r100_mc_stop(struct radeon_device *rdev, struct r100_mc_save *save) | |||
| 3124 | } | 2976 | } |
| 3125 | 2977 | ||
| 3126 | /* Disable VGA aperture access */ | 2978 | /* Disable VGA aperture access */ |
| 3127 | WREG32(R_0003C0_GENMO_WT, C_0003C0_VGA_RAM_EN & save->GENMO_WT); | 2979 | WREG8(R_0003C2_GENMO_WT, C_0003C2_VGA_RAM_EN & save->GENMO_WT); |
| 3128 | /* Disable cursor, overlay, crtc */ | 2980 | /* Disable cursor, overlay, crtc */ |
| 3129 | WREG32(R_000260_CUR_OFFSET, save->CUR_OFFSET | S_000260_CUR_LOCK(1)); | 2981 | WREG32(R_000260_CUR_OFFSET, save->CUR_OFFSET | S_000260_CUR_LOCK(1)); |
| 3130 | WREG32(R_000054_CRTC_EXT_CNTL, save->CRTC_EXT_CNTL | | 2982 | WREG32(R_000054_CRTC_EXT_CNTL, save->CRTC_EXT_CNTL | |
| @@ -3156,10 +3008,264 @@ void r100_mc_resume(struct radeon_device *rdev, struct r100_mc_save *save) | |||
| 3156 | rdev->mc.vram_location); | 3008 | rdev->mc.vram_location); |
| 3157 | } | 3009 | } |
| 3158 | /* Restore CRTC registers */ | 3010 | /* Restore CRTC registers */ |
| 3159 | WREG32(R_0003C0_GENMO_WT, save->GENMO_WT); | 3011 | WREG8(R_0003C2_GENMO_WT, save->GENMO_WT); |
| 3160 | WREG32(R_000054_CRTC_EXT_CNTL, save->CRTC_EXT_CNTL); | 3012 | WREG32(R_000054_CRTC_EXT_CNTL, save->CRTC_EXT_CNTL); |
| 3161 | WREG32(R_000050_CRTC_GEN_CNTL, save->CRTC_GEN_CNTL); | 3013 | WREG32(R_000050_CRTC_GEN_CNTL, save->CRTC_GEN_CNTL); |
| 3162 | if (!(rdev->flags & RADEON_SINGLE_CRTC)) { | 3014 | if (!(rdev->flags & RADEON_SINGLE_CRTC)) { |
| 3163 | WREG32(R_0003F8_CRTC2_GEN_CNTL, save->CRTC2_GEN_CNTL); | 3015 | WREG32(R_0003F8_CRTC2_GEN_CNTL, save->CRTC2_GEN_CNTL); |
| 3164 | } | 3016 | } |
| 3165 | } | 3017 | } |
| 3018 | |||
| 3019 | void r100_vga_render_disable(struct radeon_device *rdev) | ||
| 3020 | { | ||
| 3021 | u32 tmp; | ||
| 3022 | |||
| 3023 | tmp = RREG8(R_0003C2_GENMO_WT); | ||
| 3024 | WREG8(R_0003C2_GENMO_WT, C_0003C2_VGA_RAM_EN & tmp); | ||
| 3025 | } | ||
| 3026 | |||
| 3027 | static void r100_debugfs(struct radeon_device *rdev) | ||
| 3028 | { | ||
| 3029 | int r; | ||
| 3030 | |||
| 3031 | r = r100_debugfs_mc_info_init(rdev); | ||
| 3032 | if (r) | ||
| 3033 | dev_warn(rdev->dev, "Failed to create r100_mc debugfs file.\n"); | ||
| 3034 | } | ||
| 3035 | |||
| 3036 | static void r100_mc_program(struct radeon_device *rdev) | ||
| 3037 | { | ||
| 3038 | struct r100_mc_save save; | ||
| 3039 | |||
| 3040 | /* Stops all mc clients */ | ||
| 3041 | r100_mc_stop(rdev, &save); | ||
| 3042 | if (rdev->flags & RADEON_IS_AGP) { | ||
| 3043 | WREG32(R_00014C_MC_AGP_LOCATION, | ||
| 3044 | S_00014C_MC_AGP_START(rdev->mc.gtt_start >> 16) | | ||
| 3045 | S_00014C_MC_AGP_TOP(rdev->mc.gtt_end >> 16)); | ||
| 3046 | WREG32(R_000170_AGP_BASE, lower_32_bits(rdev->mc.agp_base)); | ||
| 3047 | if (rdev->family > CHIP_RV200) | ||
| 3048 | WREG32(R_00015C_AGP_BASE_2, | ||
| 3049 | upper_32_bits(rdev->mc.agp_base) & 0xff); | ||
| 3050 | } else { | ||
| 3051 | WREG32(R_00014C_MC_AGP_LOCATION, 0x0FFFFFFF); | ||
| 3052 | WREG32(R_000170_AGP_BASE, 0); | ||
| 3053 | if (rdev->family > CHIP_RV200) | ||
| 3054 | WREG32(R_00015C_AGP_BASE_2, 0); | ||
| 3055 | } | ||
| 3056 | /* Wait for mc idle */ | ||
| 3057 | if (r100_mc_wait_for_idle(rdev)) | ||
| 3058 | dev_warn(rdev->dev, "Wait for MC idle timeout.\n"); | ||
| 3059 | /* Program MC, should be a 32bits limited address space */ | ||
| 3060 | WREG32(R_000148_MC_FB_LOCATION, | ||
| 3061 | S_000148_MC_FB_START(rdev->mc.vram_start >> 16) | | ||
| 3062 | S_000148_MC_FB_TOP(rdev->mc.vram_end >> 16)); | ||
| 3063 | r100_mc_resume(rdev, &save); | ||
| 3064 | } | ||
| 3065 | |||
| 3066 | void r100_clock_startup(struct radeon_device *rdev) | ||
| 3067 | { | ||
| 3068 | u32 tmp; | ||
| 3069 | |||
| 3070 | if (radeon_dynclks != -1 && radeon_dynclks) | ||
| 3071 | radeon_legacy_set_clock_gating(rdev, 1); | ||
| 3072 | /* We need to force on some of the block */ | ||
| 3073 | tmp = RREG32_PLL(R_00000D_SCLK_CNTL); | ||
| 3074 | tmp |= S_00000D_FORCE_CP(1) | S_00000D_FORCE_VIP(1); | ||
| 3075 | if ((rdev->family == CHIP_RV250) || (rdev->family == CHIP_RV280)) | ||
| 3076 | tmp |= S_00000D_FORCE_DISP1(1) | S_00000D_FORCE_DISP2(1); | ||
| 3077 | WREG32_PLL(R_00000D_SCLK_CNTL, tmp); | ||
| 3078 | } | ||
| 3079 | |||
| 3080 | static int r100_startup(struct radeon_device *rdev) | ||
| 3081 | { | ||
| 3082 | int r; | ||
| 3083 | |||
| 3084 | r100_mc_program(rdev); | ||
| 3085 | /* Resume clock */ | ||
| 3086 | r100_clock_startup(rdev); | ||
| 3087 | /* Initialize GPU configuration (# pipes, ...) */ | ||
| 3088 | r100_gpu_init(rdev); | ||
| 3089 | /* Initialize GART (initialize after TTM so we can allocate | ||
| 3090 | * memory through TTM but finalize after TTM) */ | ||
| 3091 | if (rdev->flags & RADEON_IS_PCI) { | ||
| 3092 | r = r100_pci_gart_enable(rdev); | ||
| 3093 | if (r) | ||
| 3094 | return r; | ||
| 3095 | } | ||
| 3096 | /* Enable IRQ */ | ||
| 3097 | rdev->irq.sw_int = true; | ||
| 3098 | r100_irq_set(rdev); | ||
| 3099 | /* 1M ring buffer */ | ||
| 3100 | r = r100_cp_init(rdev, 1024 * 1024); | ||
| 3101 | if (r) { | ||
| 3102 | dev_err(rdev->dev, "failled initializing CP (%d).\n", r); | ||
| 3103 | return r; | ||
| 3104 | } | ||
| 3105 | r = r100_wb_init(rdev); | ||
| 3106 | if (r) | ||
| 3107 | dev_err(rdev->dev, "failled initializing WB (%d).\n", r); | ||
| 3108 | r = r100_ib_init(rdev); | ||
| 3109 | if (r) { | ||
| 3110 | dev_err(rdev->dev, "failled initializing IB (%d).\n", r); | ||
| 3111 | return r; | ||
| 3112 | } | ||
| 3113 | return 0; | ||
| 3114 | } | ||
| 3115 | |||
| 3116 | int r100_resume(struct radeon_device *rdev) | ||
| 3117 | { | ||
| 3118 | /* Make sur GART are not working */ | ||
| 3119 | if (rdev->flags & RADEON_IS_PCI) | ||
| 3120 | r100_pci_gart_disable(rdev); | ||
| 3121 | /* Resume clock before doing reset */ | ||
| 3122 | r100_clock_startup(rdev); | ||
| 3123 | /* Reset gpu before posting otherwise ATOM will enter infinite loop */ | ||
| 3124 | if (radeon_gpu_reset(rdev)) { | ||
| 3125 | dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", | ||
| 3126 | RREG32(R_000E40_RBBM_STATUS), | ||
| 3127 | RREG32(R_0007C0_CP_STAT)); | ||
| 3128 | } | ||
| 3129 | /* post */ | ||
| 3130 | radeon_combios_asic_init(rdev->ddev); | ||
| 3131 | /* Resume clock after posting */ | ||
| 3132 | r100_clock_startup(rdev); | ||
| 3133 | return r100_startup(rdev); | ||
| 3134 | } | ||
| 3135 | |||
| 3136 | int r100_suspend(struct radeon_device *rdev) | ||
| 3137 | { | ||
| 3138 | r100_cp_disable(rdev); | ||
| 3139 | r100_wb_disable(rdev); | ||
| 3140 | r100_irq_disable(rdev); | ||
| 3141 | if (rdev->flags & RADEON_IS_PCI) | ||
| 3142 | r100_pci_gart_disable(rdev); | ||
| 3143 | return 0; | ||
| 3144 | } | ||
| 3145 | |||
| 3146 | void r100_fini(struct radeon_device *rdev) | ||
| 3147 | { | ||
| 3148 | r100_suspend(rdev); | ||
| 3149 | r100_cp_fini(rdev); | ||
| 3150 | r100_wb_fini(rdev); | ||
| 3151 | r100_ib_fini(rdev); | ||
| 3152 | radeon_gem_fini(rdev); | ||
| 3153 | if (rdev->flags & RADEON_IS_PCI) | ||
| 3154 | r100_pci_gart_fini(rdev); | ||
| 3155 | radeon_irq_kms_fini(rdev); | ||
| 3156 | radeon_fence_driver_fini(rdev); | ||
| 3157 | radeon_object_fini(rdev); | ||
| 3158 | radeon_atombios_fini(rdev); | ||
| 3159 | kfree(rdev->bios); | ||
| 3160 | rdev->bios = NULL; | ||
| 3161 | } | ||
| 3162 | |||
| 3163 | int r100_mc_init(struct radeon_device *rdev) | ||
| 3164 | { | ||
| 3165 | int r; | ||
| 3166 | u32 tmp; | ||
| 3167 | |||
| 3168 | /* Setup GPU memory space */ | ||
| 3169 | rdev->mc.vram_location = 0xFFFFFFFFUL; | ||
| 3170 | rdev->mc.gtt_location = 0xFFFFFFFFUL; | ||
| 3171 | if (rdev->flags & RADEON_IS_IGP) { | ||
| 3172 | tmp = G_00015C_MC_FB_START(RREG32(R_00015C_NB_TOM)); | ||
| 3173 | rdev->mc.vram_location = tmp << 16; | ||
| 3174 | } | ||
| 3175 | if (rdev->flags & RADEON_IS_AGP) { | ||
| 3176 | r = radeon_agp_init(rdev); | ||
| 3177 | if (r) { | ||
| 3178 | printk(KERN_WARNING "[drm] Disabling AGP\n"); | ||
| 3179 | rdev->flags &= ~RADEON_IS_AGP; | ||
| 3180 | rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024; | ||
| 3181 | } else { | ||
| 3182 | rdev->mc.gtt_location = rdev->mc.agp_base; | ||
| 3183 | } | ||
| 3184 | } | ||
| 3185 | r = radeon_mc_setup(rdev); | ||
| 3186 | if (r) | ||
| 3187 | return r; | ||
| 3188 | return 0; | ||
| 3189 | } | ||
| 3190 | |||
| 3191 | int r100_init(struct radeon_device *rdev) | ||
| 3192 | { | ||
| 3193 | int r; | ||
| 3194 | |||
| 3195 | /* Register debugfs file specific to this group of asics */ | ||
| 3196 | r100_debugfs(rdev); | ||
| 3197 | /* Disable VGA */ | ||
| 3198 | r100_vga_render_disable(rdev); | ||
| 3199 | /* Initialize scratch registers */ | ||
| 3200 | radeon_scratch_init(rdev); | ||
| 3201 | /* Initialize surface registers */ | ||
| 3202 | radeon_surface_init(rdev); | ||
| 3203 | /* TODO: disable VGA need to use VGA request */ | ||
| 3204 | /* BIOS*/ | ||
| 3205 | if (!radeon_get_bios(rdev)) { | ||
| 3206 | if (ASIC_IS_AVIVO(rdev)) | ||
| 3207 | return -EINVAL; | ||
| 3208 | } | ||
| 3209 | if (rdev->is_atom_bios) { | ||
| 3210 | dev_err(rdev->dev, "Expecting combios for RS400/RS480 GPU\n"); | ||
| 3211 | return -EINVAL; | ||
| 3212 | } else { | ||
| 3213 | r = radeon_combios_init(rdev); | ||
| 3214 | if (r) | ||
| 3215 | return r; | ||
| 3216 | } | ||
| 3217 | /* Reset gpu before posting otherwise ATOM will enter infinite loop */ | ||
| 3218 | if (radeon_gpu_reset(rdev)) { | ||
| 3219 | dev_warn(rdev->dev, | ||
| 3220 | "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", | ||
| 3221 | RREG32(R_000E40_RBBM_STATUS), | ||
| 3222 | RREG32(R_0007C0_CP_STAT)); | ||
| 3223 | } | ||
| 3224 | /* check if cards are posted or not */ | ||
| 3225 | if (!radeon_card_posted(rdev) && rdev->bios) { | ||
| 3226 | DRM_INFO("GPU not posted. posting now...\n"); | ||
| 3227 | radeon_combios_asic_init(rdev->ddev); | ||
| 3228 | } | ||
| 3229 | /* Set asic errata */ | ||
| 3230 | r100_errata(rdev); | ||
| 3231 | /* Initialize clocks */ | ||
| 3232 | radeon_get_clock_info(rdev->ddev); | ||
| 3233 | /* Get vram informations */ | ||
| 3234 | r100_vram_info(rdev); | ||
| 3235 | /* Initialize memory controller (also test AGP) */ | ||
| 3236 | r = r100_mc_init(rdev); | ||
| 3237 | if (r) | ||
| 3238 | return r; | ||
| 3239 | /* Fence driver */ | ||
| 3240 | r = radeon_fence_driver_init(rdev); | ||
| 3241 | if (r) | ||
| 3242 | return r; | ||
| 3243 | r = radeon_irq_kms_init(rdev); | ||
| 3244 | if (r) | ||
| 3245 | return r; | ||
| 3246 | /* Memory manager */ | ||
| 3247 | r = radeon_object_init(rdev); | ||
| 3248 | if (r) | ||
| 3249 | return r; | ||
| 3250 | if (rdev->flags & RADEON_IS_PCI) { | ||
| 3251 | r = r100_pci_gart_init(rdev); | ||
| 3252 | if (r) | ||
| 3253 | return r; | ||
| 3254 | } | ||
| 3255 | r100_set_safe_registers(rdev); | ||
| 3256 | rdev->accel_working = true; | ||
| 3257 | r = r100_startup(rdev); | ||
| 3258 | if (r) { | ||
| 3259 | /* Somethings want wront with the accel init stop accel */ | ||
| 3260 | dev_err(rdev->dev, "Disabling GPU acceleration\n"); | ||
| 3261 | r100_suspend(rdev); | ||
| 3262 | r100_cp_fini(rdev); | ||
| 3263 | r100_wb_fini(rdev); | ||
| 3264 | r100_ib_fini(rdev); | ||
| 3265 | if (rdev->flags & RADEON_IS_PCI) | ||
| 3266 | r100_pci_gart_fini(rdev); | ||
| 3267 | radeon_irq_kms_fini(rdev); | ||
| 3268 | rdev->accel_working = false; | ||
| 3269 | } | ||
| 3270 | return 0; | ||
| 3271 | } | ||
diff --git a/drivers/gpu/drm/radeon/r100d.h b/drivers/gpu/drm/radeon/r100d.h index c4b257ec920e..df29a630c466 100644 --- a/drivers/gpu/drm/radeon/r100d.h +++ b/drivers/gpu/drm/radeon/r100d.h | |||
| @@ -381,6 +381,24 @@ | |||
| 381 | #define S_000054_VCRTC_IDX_MASTER(x) (((x) & 0x7F) << 24) | 381 | #define S_000054_VCRTC_IDX_MASTER(x) (((x) & 0x7F) << 24) |
| 382 | #define G_000054_VCRTC_IDX_MASTER(x) (((x) >> 24) & 0x7F) | 382 | #define G_000054_VCRTC_IDX_MASTER(x) (((x) >> 24) & 0x7F) |
| 383 | #define C_000054_VCRTC_IDX_MASTER 0x80FFFFFF | 383 | #define C_000054_VCRTC_IDX_MASTER 0x80FFFFFF |
| 384 | #define R_000148_MC_FB_LOCATION 0x000148 | ||
| 385 | #define S_000148_MC_FB_START(x) (((x) & 0xFFFF) << 0) | ||
| 386 | #define G_000148_MC_FB_START(x) (((x) >> 0) & 0xFFFF) | ||
| 387 | #define C_000148_MC_FB_START 0xFFFF0000 | ||
| 388 | #define S_000148_MC_FB_TOP(x) (((x) & 0xFFFF) << 16) | ||
| 389 | #define G_000148_MC_FB_TOP(x) (((x) >> 16) & 0xFFFF) | ||
| 390 | #define C_000148_MC_FB_TOP 0x0000FFFF | ||
| 391 | #define R_00014C_MC_AGP_LOCATION 0x00014C | ||
| 392 | #define S_00014C_MC_AGP_START(x) (((x) & 0xFFFF) << 0) | ||
| 393 | #define G_00014C_MC_AGP_START(x) (((x) >> 0) & 0xFFFF) | ||
| 394 | #define C_00014C_MC_AGP_START 0xFFFF0000 | ||
| 395 | #define S_00014C_MC_AGP_TOP(x) (((x) & 0xFFFF) << 16) | ||
| 396 | #define G_00014C_MC_AGP_TOP(x) (((x) >> 16) & 0xFFFF) | ||
| 397 | #define C_00014C_MC_AGP_TOP 0x0000FFFF | ||
| 398 | #define R_000170_AGP_BASE 0x000170 | ||
| 399 | #define S_000170_AGP_BASE_ADDR(x) (((x) & 0xFFFFFFFF) << 0) | ||
| 400 | #define G_000170_AGP_BASE_ADDR(x) (((x) >> 0) & 0xFFFFFFFF) | ||
| 401 | #define C_000170_AGP_BASE_ADDR 0x00000000 | ||
| 384 | #define R_00023C_DISPLAY_BASE_ADDR 0x00023C | 402 | #define R_00023C_DISPLAY_BASE_ADDR 0x00023C |
| 385 | #define S_00023C_DISPLAY_BASE_ADDR(x) (((x) & 0xFFFFFFFF) << 0) | 403 | #define S_00023C_DISPLAY_BASE_ADDR(x) (((x) & 0xFFFFFFFF) << 0) |
| 386 | #define G_00023C_DISPLAY_BASE_ADDR(x) (((x) >> 0) & 0xFFFFFFFF) | 404 | #define G_00023C_DISPLAY_BASE_ADDR(x) (((x) >> 0) & 0xFFFFFFFF) |
| @@ -403,25 +421,25 @@ | |||
| 403 | #define S_000360_CUR2_LOCK(x) (((x) & 0x1) << 31) | 421 | #define S_000360_CUR2_LOCK(x) (((x) & 0x1) << 31) |
| 404 | #define G_000360_CUR2_LOCK(x) (((x) >> 31) & 0x1) | 422 | #define G_000360_CUR2_LOCK(x) (((x) >> 31) & 0x1) |
| 405 | #define C_000360_CUR2_LOCK 0x7FFFFFFF | 423 | #define C_000360_CUR2_LOCK 0x7FFFFFFF |
| 406 | #define R_0003C0_GENMO_WT 0x0003C0 | 424 | #define R_0003C2_GENMO_WT 0x0003C0 |
| 407 | #define S_0003C0_GENMO_MONO_ADDRESS_B(x) (((x) & 0x1) << 0) | 425 | #define S_0003C2_GENMO_MONO_ADDRESS_B(x) (((x) & 0x1) << 0) |
| 408 | #define G_0003C0_GENMO_MONO_ADDRESS_B(x) (((x) >> 0) & 0x1) | 426 | #define G_0003C2_GENMO_MONO_ADDRESS_B(x) (((x) >> 0) & 0x1) |
| 409 | #define C_0003C0_GENMO_MONO_ADDRESS_B 0xFFFFFFFE | 427 | #define C_0003C2_GENMO_MONO_ADDRESS_B 0xFE |
| 410 | #define S_0003C0_VGA_RAM_EN(x) (((x) & 0x1) << 1) | 428 | #define S_0003C2_VGA_RAM_EN(x) (((x) & 0x1) << 1) |
| 411 | #define G_0003C0_VGA_RAM_EN(x) (((x) >> 1) & 0x1) | 429 | #define G_0003C2_VGA_RAM_EN(x) (((x) >> 1) & 0x1) |
| 412 | #define C_0003C0_VGA_RAM_EN 0xFFFFFFFD | 430 | #define C_0003C2_VGA_RAM_EN 0xFD |
| 413 | #define S_0003C0_VGA_CKSEL(x) (((x) & 0x3) << 2) | 431 | #define S_0003C2_VGA_CKSEL(x) (((x) & 0x3) << 2) |
| 414 | #define G_0003C0_VGA_CKSEL(x) (((x) >> 2) & 0x3) | 432 | #define G_0003C2_VGA_CKSEL(x) (((x) >> 2) & 0x3) |
| 415 | #define C_0003C0_VGA_CKSEL 0xFFFFFFF3 | 433 | #define C_0003C2_VGA_CKSEL 0xF3 |
| 416 | #define S_0003C0_ODD_EVEN_MD_PGSEL(x) (((x) & 0x1) << 5) | 434 | #define S_0003C2_ODD_EVEN_MD_PGSEL(x) (((x) & 0x1) << 5) |
| 417 | #define G_0003C0_ODD_EVEN_MD_PGSEL(x) (((x) >> 5) & 0x1) | 435 | #define G_0003C2_ODD_EVEN_MD_PGSEL(x) (((x) >> 5) & 0x1) |
| 418 | #define C_0003C0_ODD_EVEN_MD_PGSEL 0xFFFFFFDF | 436 | #define C_0003C2_ODD_EVEN_MD_PGSEL 0xDF |
| 419 | #define S_0003C0_VGA_HSYNC_POL(x) (((x) & 0x1) << 6) | 437 | #define S_0003C2_VGA_HSYNC_POL(x) (((x) & 0x1) << 6) |
| 420 | #define G_0003C0_VGA_HSYNC_POL(x) (((x) >> 6) & 0x1) | 438 | #define G_0003C2_VGA_HSYNC_POL(x) (((x) >> 6) & 0x1) |
| 421 | #define C_0003C0_VGA_HSYNC_POL 0xFFFFFFBF | 439 | #define C_0003C2_VGA_HSYNC_POL 0xBF |
| 422 | #define S_0003C0_VGA_VSYNC_POL(x) (((x) & 0x1) << 7) | 440 | #define S_0003C2_VGA_VSYNC_POL(x) (((x) & 0x1) << 7) |
| 423 | #define G_0003C0_VGA_VSYNC_POL(x) (((x) >> 7) & 0x1) | 441 | #define G_0003C2_VGA_VSYNC_POL(x) (((x) >> 7) & 0x1) |
| 424 | #define C_0003C0_VGA_VSYNC_POL 0xFFFFFF7F | 442 | #define C_0003C2_VGA_VSYNC_POL 0x7F |
| 425 | #define R_0003F8_CRTC2_GEN_CNTL 0x0003F8 | 443 | #define R_0003F8_CRTC2_GEN_CNTL 0x0003F8 |
| 426 | #define S_0003F8_CRTC2_DBL_SCAN_EN(x) (((x) & 0x1) << 0) | 444 | #define S_0003F8_CRTC2_DBL_SCAN_EN(x) (((x) & 0x1) << 0) |
| 427 | #define G_0003F8_CRTC2_DBL_SCAN_EN(x) (((x) >> 0) & 0x1) | 445 | #define G_0003F8_CRTC2_DBL_SCAN_EN(x) (((x) >> 0) & 0x1) |
| @@ -545,6 +563,46 @@ | |||
| 545 | #define S_000774_SCRATCH_ADDR(x) (((x) & 0x7FFFFFF) << 5) | 563 | #define S_000774_SCRATCH_ADDR(x) (((x) & 0x7FFFFFF) << 5) |
| 546 | #define G_000774_SCRATCH_ADDR(x) (((x) >> 5) & 0x7FFFFFF) | 564 | #define G_000774_SCRATCH_ADDR(x) (((x) >> 5) & 0x7FFFFFF) |
| 547 | #define C_000774_SCRATCH_ADDR 0x0000001F | 565 | #define C_000774_SCRATCH_ADDR 0x0000001F |
| 566 | #define R_0007C0_CP_STAT 0x0007C0 | ||
| 567 | #define S_0007C0_MRU_BUSY(x) (((x) & 0x1) << 0) | ||
| 568 | #define G_0007C0_MRU_BUSY(x) (((x) >> 0) & 0x1) | ||
| 569 | #define C_0007C0_MRU_BUSY 0xFFFFFFFE | ||
| 570 | #define S_0007C0_MWU_BUSY(x) (((x) & 0x1) << 1) | ||
| 571 | #define G_0007C0_MWU_BUSY(x) (((x) >> 1) & 0x1) | ||
| 572 | #define C_0007C0_MWU_BUSY 0xFFFFFFFD | ||
| 573 | #define S_0007C0_RSIU_BUSY(x) (((x) & 0x1) << 2) | ||
| 574 | #define G_0007C0_RSIU_BUSY(x) (((x) >> 2) & 0x1) | ||
| 575 | #define C_0007C0_RSIU_BUSY 0xFFFFFFFB | ||
| 576 | #define S_0007C0_RCIU_BUSY(x) (((x) & 0x1) << 3) | ||
| 577 | #define G_0007C0_RCIU_BUSY(x) (((x) >> 3) & 0x1) | ||
| 578 | #define C_0007C0_RCIU_BUSY 0xFFFFFFF7 | ||
| 579 | #define S_0007C0_CSF_PRIMARY_BUSY(x) (((x) & 0x1) << 9) | ||
| 580 | #define G_0007C0_CSF_PRIMARY_BUSY(x) (((x) >> 9) & 0x1) | ||
| 581 | #define C_0007C0_CSF_PRIMARY_BUSY 0xFFFFFDFF | ||
| 582 | #define S_0007C0_CSF_INDIRECT_BUSY(x) (((x) & 0x1) << 10) | ||
| 583 | #define G_0007C0_CSF_INDIRECT_BUSY(x) (((x) >> 10) & 0x1) | ||
| 584 | #define C_0007C0_CSF_INDIRECT_BUSY 0xFFFFFBFF | ||
| 585 | #define S_0007C0_CSQ_PRIMARY_BUSY(x) (((x) & 0x1) << 11) | ||
| 586 | #define G_0007C0_CSQ_PRIMARY_BUSY(x) (((x) >> 11) & 0x1) | ||
| 587 | #define C_0007C0_CSQ_PRIMARY_BUSY 0xFFFFF7FF | ||
| 588 | #define S_0007C0_CSQ_INDIRECT_BUSY(x) (((x) & 0x1) << 12) | ||
| 589 | #define G_0007C0_CSQ_INDIRECT_BUSY(x) (((x) >> 12) & 0x1) | ||
| 590 | #define C_0007C0_CSQ_INDIRECT_BUSY 0xFFFFEFFF | ||
| 591 | #define S_0007C0_CSI_BUSY(x) (((x) & 0x1) << 13) | ||
| 592 | #define G_0007C0_CSI_BUSY(x) (((x) >> 13) & 0x1) | ||
| 593 | #define C_0007C0_CSI_BUSY 0xFFFFDFFF | ||
| 594 | #define S_0007C0_GUIDMA_BUSY(x) (((x) & 0x1) << 28) | ||
| 595 | #define G_0007C0_GUIDMA_BUSY(x) (((x) >> 28) & 0x1) | ||
| 596 | #define C_0007C0_GUIDMA_BUSY 0xEFFFFFFF | ||
| 597 | #define S_0007C0_VIDDMA_BUSY(x) (((x) & 0x1) << 29) | ||
| 598 | #define G_0007C0_VIDDMA_BUSY(x) (((x) >> 29) & 0x1) | ||
| 599 | #define C_0007C0_VIDDMA_BUSY 0xDFFFFFFF | ||
| 600 | #define S_0007C0_CMDSTRM_BUSY(x) (((x) & 0x1) << 30) | ||
| 601 | #define G_0007C0_CMDSTRM_BUSY(x) (((x) >> 30) & 0x1) | ||
| 602 | #define C_0007C0_CMDSTRM_BUSY 0xBFFFFFFF | ||
| 603 | #define S_0007C0_CP_BUSY(x) (((x) & 0x1) << 31) | ||
| 604 | #define G_0007C0_CP_BUSY(x) (((x) >> 31) & 0x1) | ||
| 605 | #define C_0007C0_CP_BUSY 0x7FFFFFFF | ||
| 548 | #define R_000E40_RBBM_STATUS 0x000E40 | 606 | #define R_000E40_RBBM_STATUS 0x000E40 |
| 549 | #define S_000E40_CMDFIFO_AVAIL(x) (((x) & 0x7F) << 0) | 607 | #define S_000E40_CMDFIFO_AVAIL(x) (((x) & 0x7F) << 0) |
| 550 | #define G_000E40_CMDFIFO_AVAIL(x) (((x) >> 0) & 0x7F) | 608 | #define G_000E40_CMDFIFO_AVAIL(x) (((x) >> 0) & 0x7F) |
| @@ -604,4 +662,53 @@ | |||
| 604 | #define G_000E40_GUI_ACTIVE(x) (((x) >> 31) & 0x1) | 662 | #define G_000E40_GUI_ACTIVE(x) (((x) >> 31) & 0x1) |
| 605 | #define C_000E40_GUI_ACTIVE 0x7FFFFFFF | 663 | #define C_000E40_GUI_ACTIVE 0x7FFFFFFF |
| 606 | 664 | ||
| 665 | |||
| 666 | #define R_00000D_SCLK_CNTL 0x00000D | ||
| 667 | #define S_00000D_SCLK_SRC_SEL(x) (((x) & 0x7) << 0) | ||
| 668 | #define G_00000D_SCLK_SRC_SEL(x) (((x) >> 0) & 0x7) | ||
| 669 | #define C_00000D_SCLK_SRC_SEL 0xFFFFFFF8 | ||
| 670 | #define S_00000D_TCLK_SRC_SEL(x) (((x) & 0x7) << 8) | ||
| 671 | #define G_00000D_TCLK_SRC_SEL(x) (((x) >> 8) & 0x7) | ||
| 672 | #define C_00000D_TCLK_SRC_SEL 0xFFFFF8FF | ||
| 673 | #define S_00000D_FORCE_CP(x) (((x) & 0x1) << 16) | ||
| 674 | #define G_00000D_FORCE_CP(x) (((x) >> 16) & 0x1) | ||
| 675 | #define C_00000D_FORCE_CP 0xFFFEFFFF | ||
| 676 | #define S_00000D_FORCE_HDP(x) (((x) & 0x1) << 17) | ||
| 677 | #define G_00000D_FORCE_HDP(x) (((x) >> 17) & 0x1) | ||
| 678 | #define C_00000D_FORCE_HDP 0xFFFDFFFF | ||
| 679 | #define S_00000D_FORCE_DISP(x) (((x) & 0x1) << 18) | ||
| 680 | #define G_00000D_FORCE_DISP(x) (((x) >> 18) & 0x1) | ||
| 681 | #define C_00000D_FORCE_DISP 0xFFFBFFFF | ||
| 682 | #define S_00000D_FORCE_TOP(x) (((x) & 0x1) << 19) | ||
| 683 | #define G_00000D_FORCE_TOP(x) (((x) >> 19) & 0x1) | ||
| 684 | #define C_00000D_FORCE_TOP 0xFFF7FFFF | ||
| 685 | #define S_00000D_FORCE_E2(x) (((x) & 0x1) << 20) | ||
| 686 | #define G_00000D_FORCE_E2(x) (((x) >> 20) & 0x1) | ||
| 687 | #define C_00000D_FORCE_E2 0xFFEFFFFF | ||
| 688 | #define S_00000D_FORCE_SE(x) (((x) & 0x1) << 21) | ||
| 689 | #define G_00000D_FORCE_SE(x) (((x) >> 21) & 0x1) | ||
| 690 | #define C_00000D_FORCE_SE 0xFFDFFFFF | ||
| 691 | #define S_00000D_FORCE_IDCT(x) (((x) & 0x1) << 22) | ||
| 692 | #define G_00000D_FORCE_IDCT(x) (((x) >> 22) & 0x1) | ||
| 693 | #define C_00000D_FORCE_IDCT 0xFFBFFFFF | ||
| 694 | #define S_00000D_FORCE_VIP(x) (((x) & 0x1) << 23) | ||
| 695 | #define G_00000D_FORCE_VIP(x) (((x) >> 23) & 0x1) | ||
| 696 | #define C_00000D_FORCE_VIP 0xFF7FFFFF | ||
| 697 | #define S_00000D_FORCE_RE(x) (((x) & 0x1) << 24) | ||
| 698 | #define G_00000D_FORCE_RE(x) (((x) >> 24) & 0x1) | ||
| 699 | #define C_00000D_FORCE_RE 0xFEFFFFFF | ||
| 700 | #define S_00000D_FORCE_PB(x) (((x) & 0x1) << 25) | ||
| 701 | #define G_00000D_FORCE_PB(x) (((x) >> 25) & 0x1) | ||
| 702 | #define C_00000D_FORCE_PB 0xFDFFFFFF | ||
| 703 | #define S_00000D_FORCE_TAM(x) (((x) & 0x1) << 26) | ||
| 704 | #define G_00000D_FORCE_TAM(x) (((x) >> 26) & 0x1) | ||
| 705 | #define C_00000D_FORCE_TAM 0xFBFFFFFF | ||
| 706 | #define S_00000D_FORCE_TDM(x) (((x) & 0x1) << 27) | ||
| 707 | #define G_00000D_FORCE_TDM(x) (((x) >> 27) & 0x1) | ||
| 708 | #define C_00000D_FORCE_TDM 0xF7FFFFFF | ||
| 709 | #define S_00000D_FORCE_RB(x) (((x) & 0x1) << 28) | ||
| 710 | #define G_00000D_FORCE_RB(x) (((x) >> 28) & 0x1) | ||
| 711 | #define C_00000D_FORCE_RB 0xEFFFFFFF | ||
| 712 | |||
| 713 | |||
| 607 | #endif | 714 | #endif |
diff --git a/drivers/gpu/drm/radeon/r200.c b/drivers/gpu/drm/radeon/r200.c index cf7fea5ff2e5..eb740fc3549f 100644 --- a/drivers/gpu/drm/radeon/r200.c +++ b/drivers/gpu/drm/radeon/r200.c | |||
| @@ -447,9 +447,8 @@ int r200_packet0_check(struct radeon_cs_parser *p, | |||
| 447 | return 0; | 447 | return 0; |
| 448 | } | 448 | } |
| 449 | 449 | ||
| 450 | int r200_init(struct radeon_device *rdev) | 450 | void r200_set_safe_registers(struct radeon_device *rdev) |
| 451 | { | 451 | { |
| 452 | rdev->config.r100.reg_safe_bm = r200_reg_safe_bm; | 452 | rdev->config.r100.reg_safe_bm = r200_reg_safe_bm; |
| 453 | rdev->config.r100.reg_safe_bm_size = ARRAY_SIZE(r200_reg_safe_bm); | 453 | rdev->config.r100.reg_safe_bm_size = ARRAY_SIZE(r200_reg_safe_bm); |
| 454 | return 0; | ||
| 455 | } | 454 | } |
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c index 1ebea8cc8c93..e08c4a8974ca 100644 --- a/drivers/gpu/drm/radeon/r300.c +++ b/drivers/gpu/drm/radeon/r300.c | |||
| @@ -33,43 +33,16 @@ | |||
| 33 | #include "radeon_drm.h" | 33 | #include "radeon_drm.h" |
| 34 | #include "r100_track.h" | 34 | #include "r100_track.h" |
| 35 | #include "r300d.h" | 35 | #include "r300d.h" |
| 36 | 36 | #include "rv350d.h" | |
| 37 | #include "r300_reg_safe.h" | 37 | #include "r300_reg_safe.h" |
| 38 | 38 | ||
| 39 | /* r300,r350,rv350,rv370,rv380 depends on : */ | 39 | /* This files gather functions specifics to: r300,r350,rv350,rv370,rv380 */ |
| 40 | void r100_hdp_reset(struct radeon_device *rdev); | ||
| 41 | int r100_cp_reset(struct radeon_device *rdev); | ||
| 42 | int r100_rb2d_reset(struct radeon_device *rdev); | ||
| 43 | int r100_cp_init(struct radeon_device *rdev, unsigned ring_size); | ||
| 44 | int r100_pci_gart_enable(struct radeon_device *rdev); | ||
| 45 | void r100_mc_setup(struct radeon_device *rdev); | ||
| 46 | void r100_mc_disable_clients(struct radeon_device *rdev); | ||
| 47 | int r100_gui_wait_for_idle(struct radeon_device *rdev); | ||
| 48 | int r100_cs_packet_parse(struct radeon_cs_parser *p, | ||
| 49 | struct radeon_cs_packet *pkt, | ||
| 50 | unsigned idx); | ||
| 51 | int r100_cs_packet_parse_vline(struct radeon_cs_parser *p); | ||
| 52 | int r100_cs_parse_packet0(struct radeon_cs_parser *p, | ||
| 53 | struct radeon_cs_packet *pkt, | ||
| 54 | const unsigned *auth, unsigned n, | ||
| 55 | radeon_packet0_check_t check); | ||
| 56 | int r100_cs_track_check_pkt3_indx_buffer(struct radeon_cs_parser *p, | ||
| 57 | struct radeon_cs_packet *pkt, | ||
| 58 | struct radeon_object *robj); | ||
| 59 | |||
| 60 | /* This files gather functions specifics to: | ||
| 61 | * r300,r350,rv350,rv370,rv380 | ||
| 62 | * | ||
| 63 | * Some of these functions might be used by newer ASICs. | ||
| 64 | */ | ||
| 65 | void r300_gpu_init(struct radeon_device *rdev); | ||
| 66 | int r300_mc_wait_for_idle(struct radeon_device *rdev); | ||
| 67 | int rv370_debugfs_pcie_gart_info_init(struct radeon_device *rdev); | ||
| 68 | |||
| 69 | 40 | ||
| 70 | /* | 41 | /* |
| 71 | * rv370,rv380 PCIE GART | 42 | * rv370,rv380 PCIE GART |
| 72 | */ | 43 | */ |
| 44 | static int rv370_debugfs_pcie_gart_info_init(struct radeon_device *rdev); | ||
| 45 | |||
| 73 | void rv370_pcie_gart_tlb_flush(struct radeon_device *rdev) | 46 | void rv370_pcie_gart_tlb_flush(struct radeon_device *rdev) |
| 74 | { | 47 | { |
| 75 | uint32_t tmp; | 48 | uint32_t tmp; |
| @@ -182,59 +155,6 @@ void rv370_pcie_gart_fini(struct radeon_device *rdev) | |||
| 182 | radeon_gart_fini(rdev); | 155 | radeon_gart_fini(rdev); |
| 183 | } | 156 | } |
| 184 | 157 | ||
| 185 | /* | ||
| 186 | * MC | ||
| 187 | */ | ||
| 188 | int r300_mc_init(struct radeon_device *rdev) | ||
| 189 | { | ||
| 190 | int r; | ||
| 191 | |||
| 192 | if (r100_debugfs_rbbm_init(rdev)) { | ||
| 193 | DRM_ERROR("Failed to register debugfs file for RBBM !\n"); | ||
| 194 | } | ||
| 195 | |||
| 196 | r300_gpu_init(rdev); | ||
| 197 | r100_pci_gart_disable(rdev); | ||
| 198 | if (rdev->flags & RADEON_IS_PCIE) { | ||
| 199 | rv370_pcie_gart_disable(rdev); | ||
| 200 | } | ||
| 201 | |||
| 202 | /* Setup GPU memory space */ | ||
| 203 | rdev->mc.vram_location = 0xFFFFFFFFUL; | ||
| 204 | rdev->mc.gtt_location = 0xFFFFFFFFUL; | ||
| 205 | if (rdev->flags & RADEON_IS_AGP) { | ||
| 206 | r = radeon_agp_init(rdev); | ||
| 207 | if (r) { | ||
| 208 | printk(KERN_WARNING "[drm] Disabling AGP\n"); | ||
| 209 | rdev->flags &= ~RADEON_IS_AGP; | ||
| 210 | rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024; | ||
| 211 | } else { | ||
| 212 | rdev->mc.gtt_location = rdev->mc.agp_base; | ||
| 213 | } | ||
| 214 | } | ||
| 215 | r = radeon_mc_setup(rdev); | ||
| 216 | if (r) { | ||
| 217 | return r; | ||
| 218 | } | ||
| 219 | |||
| 220 | /* Program GPU memory space */ | ||
| 221 | r100_mc_disable_clients(rdev); | ||
| 222 | if (r300_mc_wait_for_idle(rdev)) { | ||
| 223 | printk(KERN_WARNING "Failed to wait MC idle while " | ||
| 224 | "programming pipes. Bad things might happen.\n"); | ||
| 225 | } | ||
| 226 | r100_mc_setup(rdev); | ||
| 227 | return 0; | ||
| 228 | } | ||
| 229 | |||
| 230 | void r300_mc_fini(struct radeon_device *rdev) | ||
| 231 | { | ||
| 232 | } | ||
| 233 | |||
| 234 | |||
| 235 | /* | ||
| 236 | * Fence emission | ||
| 237 | */ | ||
| 238 | void r300_fence_ring_emit(struct radeon_device *rdev, | 158 | void r300_fence_ring_emit(struct radeon_device *rdev, |
| 239 | struct radeon_fence *fence) | 159 | struct radeon_fence *fence) |
| 240 | { | 160 | { |
| @@ -260,10 +180,6 @@ void r300_fence_ring_emit(struct radeon_device *rdev, | |||
| 260 | radeon_ring_write(rdev, RADEON_SW_INT_FIRE); | 180 | radeon_ring_write(rdev, RADEON_SW_INT_FIRE); |
| 261 | } | 181 | } |
| 262 | 182 | ||
| 263 | |||
| 264 | /* | ||
| 265 | * Global GPU functions | ||
| 266 | */ | ||
| 267 | int r300_copy_dma(struct radeon_device *rdev, | 183 | int r300_copy_dma(struct radeon_device *rdev, |
| 268 | uint64_t src_offset, | 184 | uint64_t src_offset, |
| 269 | uint64_t dst_offset, | 185 | uint64_t dst_offset, |
| @@ -582,11 +498,6 @@ void r300_vram_info(struct radeon_device *rdev) | |||
| 582 | r100_vram_init_sizes(rdev); | 498 | r100_vram_init_sizes(rdev); |
| 583 | } | 499 | } |
| 584 | 500 | ||
| 585 | |||
| 586 | /* | ||
| 587 | * PCIE Lanes | ||
| 588 | */ | ||
| 589 | |||
| 590 | void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes) | 501 | void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes) |
| 591 | { | 502 | { |
| 592 | uint32_t link_width_cntl, mask; | 503 | uint32_t link_width_cntl, mask; |
| @@ -646,10 +557,6 @@ void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes) | |||
| 646 | 557 | ||
| 647 | } | 558 | } |
| 648 | 559 | ||
| 649 | |||
| 650 | /* | ||
| 651 | * Debugfs info | ||
| 652 | */ | ||
| 653 | #if defined(CONFIG_DEBUG_FS) | 560 | #if defined(CONFIG_DEBUG_FS) |
| 654 | static int rv370_debugfs_pcie_gart_info(struct seq_file *m, void *data) | 561 | static int rv370_debugfs_pcie_gart_info(struct seq_file *m, void *data) |
| 655 | { | 562 | { |
| @@ -680,7 +587,7 @@ static struct drm_info_list rv370_pcie_gart_info_list[] = { | |||
| 680 | }; | 587 | }; |
| 681 | #endif | 588 | #endif |
| 682 | 589 | ||
| 683 | int rv370_debugfs_pcie_gart_info_init(struct radeon_device *rdev) | 590 | static int rv370_debugfs_pcie_gart_info_init(struct radeon_device *rdev) |
| 684 | { | 591 | { |
| 685 | #if defined(CONFIG_DEBUG_FS) | 592 | #if defined(CONFIG_DEBUG_FS) |
| 686 | return radeon_debugfs_add_files(rdev, rv370_pcie_gart_info_list, 1); | 593 | return radeon_debugfs_add_files(rdev, rv370_pcie_gart_info_list, 1); |
| @@ -689,10 +596,6 @@ int rv370_debugfs_pcie_gart_info_init(struct radeon_device *rdev) | |||
| 689 | #endif | 596 | #endif |
| 690 | } | 597 | } |
| 691 | 598 | ||
| 692 | |||
| 693 | /* | ||
| 694 | * CS functions | ||
| 695 | */ | ||
| 696 | static int r300_packet0_check(struct radeon_cs_parser *p, | 599 | static int r300_packet0_check(struct radeon_cs_parser *p, |
| 697 | struct radeon_cs_packet *pkt, | 600 | struct radeon_cs_packet *pkt, |
| 698 | unsigned idx, unsigned reg) | 601 | unsigned idx, unsigned reg) |
| @@ -1226,12 +1129,6 @@ void r300_set_reg_safe(struct radeon_device *rdev) | |||
| 1226 | rdev->config.r300.reg_safe_bm_size = ARRAY_SIZE(r300_reg_safe_bm); | 1129 | rdev->config.r300.reg_safe_bm_size = ARRAY_SIZE(r300_reg_safe_bm); |
| 1227 | } | 1130 | } |
| 1228 | 1131 | ||
| 1229 | int r300_init(struct radeon_device *rdev) | ||
| 1230 | { | ||
| 1231 | r300_set_reg_safe(rdev); | ||
| 1232 | return 0; | ||
| 1233 | } | ||
| 1234 | |||
| 1235 | void r300_mc_program(struct radeon_device *rdev) | 1132 | void r300_mc_program(struct radeon_device *rdev) |
| 1236 | { | 1133 | { |
| 1237 | struct r100_mc_save save; | 1134 | struct r100_mc_save save; |
| @@ -1265,3 +1162,198 @@ void r300_mc_program(struct radeon_device *rdev) | |||
| 1265 | S_000148_MC_FB_TOP(rdev->mc.vram_end >> 16)); | 1162 | S_000148_MC_FB_TOP(rdev->mc.vram_end >> 16)); |
| 1266 | r100_mc_resume(rdev, &save); | 1163 | r100_mc_resume(rdev, &save); |
| 1267 | } | 1164 | } |
| 1165 | |||
| 1166 | void r300_clock_startup(struct radeon_device *rdev) | ||
| 1167 | { | ||
| 1168 | u32 tmp; | ||
| 1169 | |||
| 1170 | if (radeon_dynclks != -1 && radeon_dynclks) | ||
| 1171 | radeon_legacy_set_clock_gating(rdev, 1); | ||
| 1172 | /* We need to force on some of the block */ | ||
| 1173 | tmp = RREG32_PLL(R_00000D_SCLK_CNTL); | ||
| 1174 | tmp |= S_00000D_FORCE_CP(1) | S_00000D_FORCE_VIP(1); | ||
| 1175 | if ((rdev->family == CHIP_RV350) || (rdev->family == CHIP_RV380)) | ||
| 1176 | tmp |= S_00000D_FORCE_VAP(1); | ||
| 1177 | WREG32_PLL(R_00000D_SCLK_CNTL, tmp); | ||
| 1178 | } | ||
| 1179 | |||
| 1180 | static int r300_startup(struct radeon_device *rdev) | ||
| 1181 | { | ||
| 1182 | int r; | ||
| 1183 | |||
| 1184 | r300_mc_program(rdev); | ||
| 1185 | /* Resume clock */ | ||
| 1186 | r300_clock_startup(rdev); | ||
| 1187 | /* Initialize GPU configuration (# pipes, ...) */ | ||
| 1188 | r300_gpu_init(rdev); | ||
| 1189 | /* Initialize GART (initialize after TTM so we can allocate | ||
| 1190 | * memory through TTM but finalize after TTM) */ | ||
| 1191 | if (rdev->flags & RADEON_IS_PCIE) { | ||
| 1192 | r = rv370_pcie_gart_enable(rdev); | ||
| 1193 | if (r) | ||
| 1194 | return r; | ||
| 1195 | } | ||
| 1196 | if (rdev->flags & RADEON_IS_PCI) { | ||
| 1197 | r = r100_pci_gart_enable(rdev); | ||
| 1198 | if (r) | ||
| 1199 | return r; | ||
| 1200 | } | ||
| 1201 | /* Enable IRQ */ | ||
| 1202 | rdev->irq.sw_int = true; | ||
| 1203 | r100_irq_set(rdev); | ||
| 1204 | /* 1M ring buffer */ | ||
| 1205 | r = r100_cp_init(rdev, 1024 * 1024); | ||
| 1206 | if (r) { | ||
| 1207 | dev_err(rdev->dev, "failled initializing CP (%d).\n", r); | ||
| 1208 | return r; | ||
| 1209 | } | ||
| 1210 | r = r100_wb_init(rdev); | ||
| 1211 | if (r) | ||
| 1212 | dev_err(rdev->dev, "failled initializing WB (%d).\n", r); | ||
| 1213 | r = r100_ib_init(rdev); | ||
| 1214 | if (r) { | ||
| 1215 | dev_err(rdev->dev, "failled initializing IB (%d).\n", r); | ||
| 1216 | return r; | ||
| 1217 | } | ||
| 1218 | return 0; | ||
| 1219 | } | ||
| 1220 | |||
| 1221 | int r300_resume(struct radeon_device *rdev) | ||
| 1222 | { | ||
| 1223 | /* Make sur GART are not working */ | ||
| 1224 | if (rdev->flags & RADEON_IS_PCIE) | ||
| 1225 | rv370_pcie_gart_disable(rdev); | ||
| 1226 | if (rdev->flags & RADEON_IS_PCI) | ||
| 1227 | r100_pci_gart_disable(rdev); | ||
| 1228 | /* Resume clock before doing reset */ | ||
| 1229 | r300_clock_startup(rdev); | ||
| 1230 | /* Reset gpu before posting otherwise ATOM will enter infinite loop */ | ||
| 1231 | if (radeon_gpu_reset(rdev)) { | ||
| 1232 | dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", | ||
| 1233 | RREG32(R_000E40_RBBM_STATUS), | ||
| 1234 | RREG32(R_0007C0_CP_STAT)); | ||
| 1235 | } | ||
| 1236 | /* post */ | ||
| 1237 | radeon_combios_asic_init(rdev->ddev); | ||
| 1238 | /* Resume clock after posting */ | ||
| 1239 | r300_clock_startup(rdev); | ||
| 1240 | return r300_startup(rdev); | ||
| 1241 | } | ||
| 1242 | |||
| 1243 | int r300_suspend(struct radeon_device *rdev) | ||
| 1244 | { | ||
| 1245 | r100_cp_disable(rdev); | ||
| 1246 | r100_wb_disable(rdev); | ||
| 1247 | r100_irq_disable(rdev); | ||
| 1248 | if (rdev->flags & RADEON_IS_PCIE) | ||
| 1249 | rv370_pcie_gart_disable(rdev); | ||
| 1250 | if (rdev->flags & RADEON_IS_PCI) | ||
| 1251 | r100_pci_gart_disable(rdev); | ||
| 1252 | return 0; | ||
| 1253 | } | ||
| 1254 | |||
| 1255 | void r300_fini(struct radeon_device *rdev) | ||
| 1256 | { | ||
| 1257 | r300_suspend(rdev); | ||
| 1258 | r100_cp_fini(rdev); | ||
| 1259 | r100_wb_fini(rdev); | ||
| 1260 | r100_ib_fini(rdev); | ||
| 1261 | radeon_gem_fini(rdev); | ||
| 1262 | if (rdev->flags & RADEON_IS_PCIE) | ||
| 1263 | rv370_pcie_gart_fini(rdev); | ||
| 1264 | if (rdev->flags & RADEON_IS_PCI) | ||
| 1265 | r100_pci_gart_fini(rdev); | ||
| 1266 | radeon_irq_kms_fini(rdev); | ||
| 1267 | radeon_fence_driver_fini(rdev); | ||
| 1268 | radeon_object_fini(rdev); | ||
| 1269 | radeon_atombios_fini(rdev); | ||
| 1270 | kfree(rdev->bios); | ||
| 1271 | rdev->bios = NULL; | ||
| 1272 | } | ||
| 1273 | |||
| 1274 | int r300_init(struct radeon_device *rdev) | ||
| 1275 | { | ||
| 1276 | int r; | ||
| 1277 | |||
| 1278 | /* Disable VGA */ | ||
| 1279 | r100_vga_render_disable(rdev); | ||
| 1280 | /* Initialize scratch registers */ | ||
| 1281 | radeon_scratch_init(rdev); | ||
| 1282 | /* Initialize surface registers */ | ||
| 1283 | radeon_surface_init(rdev); | ||
| 1284 | /* TODO: disable VGA need to use VGA request */ | ||
| 1285 | /* BIOS*/ | ||
| 1286 | if (!radeon_get_bios(rdev)) { | ||
| 1287 | if (ASIC_IS_AVIVO(rdev)) | ||
| 1288 | return -EINVAL; | ||
| 1289 | } | ||
| 1290 | if (rdev->is_atom_bios) { | ||
| 1291 | dev_err(rdev->dev, "Expecting combios for RS400/RS480 GPU\n"); | ||
| 1292 | return -EINVAL; | ||
| 1293 | } else { | ||
| 1294 | r = radeon_combios_init(rdev); | ||
| 1295 | if (r) | ||
| 1296 | return r; | ||
| 1297 | } | ||
| 1298 | /* Reset gpu before posting otherwise ATOM will enter infinite loop */ | ||
| 1299 | if (radeon_gpu_reset(rdev)) { | ||
| 1300 | dev_warn(rdev->dev, | ||
| 1301 | "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", | ||
| 1302 | RREG32(R_000E40_RBBM_STATUS), | ||
| 1303 | RREG32(R_0007C0_CP_STAT)); | ||
| 1304 | } | ||
| 1305 | /* check if cards are posted or not */ | ||
| 1306 | if (!radeon_card_posted(rdev) && rdev->bios) { | ||
| 1307 | DRM_INFO("GPU not posted. posting now...\n"); | ||
| 1308 | radeon_combios_asic_init(rdev->ddev); | ||
| 1309 | } | ||
| 1310 | /* Set asic errata */ | ||
| 1311 | r300_errata(rdev); | ||
| 1312 | /* Initialize clocks */ | ||
| 1313 | radeon_get_clock_info(rdev->ddev); | ||
| 1314 | /* Get vram informations */ | ||
| 1315 | r300_vram_info(rdev); | ||
| 1316 | /* Initialize memory controller (also test AGP) */ | ||
| 1317 | r = r420_mc_init(rdev); | ||
| 1318 | if (r) | ||
| 1319 | return r; | ||
| 1320 | /* Fence driver */ | ||
| 1321 | r = radeon_fence_driver_init(rdev); | ||
| 1322 | if (r) | ||
| 1323 | return r; | ||
| 1324 | r = radeon_irq_kms_init(rdev); | ||
| 1325 | if (r) | ||
| 1326 | return r; | ||
| 1327 | /* Memory manager */ | ||
| 1328 | r = radeon_object_init(rdev); | ||
| 1329 | if (r) | ||
| 1330 | return r; | ||
| 1331 | if (rdev->flags & RADEON_IS_PCIE) { | ||
| 1332 | r = rv370_pcie_gart_init(rdev); | ||
| 1333 | if (r) | ||
| 1334 | return r; | ||
| 1335 | } | ||
| 1336 | if (rdev->flags & RADEON_IS_PCI) { | ||
| 1337 | r = r100_pci_gart_init(rdev); | ||
| 1338 | if (r) | ||
| 1339 | return r; | ||
| 1340 | } | ||
| 1341 | r300_set_reg_safe(rdev); | ||
| 1342 | rdev->accel_working = true; | ||
| 1343 | r = r300_startup(rdev); | ||
| 1344 | if (r) { | ||
| 1345 | /* Somethings want wront with the accel init stop accel */ | ||
| 1346 | dev_err(rdev->dev, "Disabling GPU acceleration\n"); | ||
| 1347 | r300_suspend(rdev); | ||
| 1348 | r100_cp_fini(rdev); | ||
| 1349 | r100_wb_fini(rdev); | ||
| 1350 | r100_ib_fini(rdev); | ||
| 1351 | if (rdev->flags & RADEON_IS_PCIE) | ||
| 1352 | rv370_pcie_gart_fini(rdev); | ||
| 1353 | if (rdev->flags & RADEON_IS_PCI) | ||
| 1354 | r100_pci_gart_fini(rdev); | ||
| 1355 | radeon_irq_kms_fini(rdev); | ||
| 1356 | rdev->accel_working = false; | ||
| 1357 | } | ||
| 1358 | return 0; | ||
| 1359 | } | ||
diff --git a/drivers/gpu/drm/radeon/r300d.h b/drivers/gpu/drm/radeon/r300d.h index d4fa3eb1074f..4c73114f0de9 100644 --- a/drivers/gpu/drm/radeon/r300d.h +++ b/drivers/gpu/drm/radeon/r300d.h | |||
| @@ -96,6 +96,211 @@ | |||
| 96 | #define S_000170_AGP_BASE_ADDR(x) (((x) & 0xFFFFFFFF) << 0) | 96 | #define S_000170_AGP_BASE_ADDR(x) (((x) & 0xFFFFFFFF) << 0) |
| 97 | #define G_000170_AGP_BASE_ADDR(x) (((x) >> 0) & 0xFFFFFFFF) | 97 | #define G_000170_AGP_BASE_ADDR(x) (((x) >> 0) & 0xFFFFFFFF) |
| 98 | #define C_000170_AGP_BASE_ADDR 0x00000000 | 98 | #define C_000170_AGP_BASE_ADDR 0x00000000 |
| 99 | #define R_0007C0_CP_STAT 0x0007C0 | ||
| 100 | #define S_0007C0_MRU_BUSY(x) (((x) & 0x1) << 0) | ||
| 101 | #define G_0007C0_MRU_BUSY(x) (((x) >> 0) & 0x1) | ||
| 102 | #define C_0007C0_MRU_BUSY 0xFFFFFFFE | ||
| 103 | #define S_0007C0_MWU_BUSY(x) (((x) & 0x1) << 1) | ||
| 104 | #define G_0007C0_MWU_BUSY(x) (((x) >> 1) & 0x1) | ||
| 105 | #define C_0007C0_MWU_BUSY 0xFFFFFFFD | ||
| 106 | #define S_0007C0_RSIU_BUSY(x) (((x) & 0x1) << 2) | ||
| 107 | #define G_0007C0_RSIU_BUSY(x) (((x) >> 2) & 0x1) | ||
| 108 | #define C_0007C0_RSIU_BUSY 0xFFFFFFFB | ||
| 109 | #define S_0007C0_RCIU_BUSY(x) (((x) & 0x1) << 3) | ||
| 110 | #define G_0007C0_RCIU_BUSY(x) (((x) >> 3) & 0x1) | ||
| 111 | #define C_0007C0_RCIU_BUSY 0xFFFFFFF7 | ||
| 112 | #define S_0007C0_CSF_PRIMARY_BUSY(x) (((x) & 0x1) << 9) | ||
| 113 | #define G_0007C0_CSF_PRIMARY_BUSY(x) (((x) >> 9) & 0x1) | ||
| 114 | #define C_0007C0_CSF_PRIMARY_BUSY 0xFFFFFDFF | ||
| 115 | #define S_0007C0_CSF_INDIRECT_BUSY(x) (((x) & 0x1) << 10) | ||
| 116 | #define G_0007C0_CSF_INDIRECT_BUSY(x) (((x) >> 10) & 0x1) | ||
| 117 | #define C_0007C0_CSF_INDIRECT_BUSY 0xFFFFFBFF | ||
| 118 | #define S_0007C0_CSQ_PRIMARY_BUSY(x) (((x) & 0x1) << 11) | ||
| 119 | #define G_0007C0_CSQ_PRIMARY_BUSY(x) (((x) >> 11) & 0x1) | ||
| 120 | #define C_0007C0_CSQ_PRIMARY_BUSY 0xFFFFF7FF | ||
| 121 | #define S_0007C0_CSQ_INDIRECT_BUSY(x) (((x) & 0x1) << 12) | ||
| 122 | #define G_0007C0_CSQ_INDIRECT_BUSY(x) (((x) >> 12) & 0x1) | ||
| 123 | #define C_0007C0_CSQ_INDIRECT_BUSY 0xFFFFEFFF | ||
| 124 | #define S_0007C0_CSI_BUSY(x) (((x) & 0x1) << 13) | ||
| 125 | #define G_0007C0_CSI_BUSY(x) (((x) >> 13) & 0x1) | ||
| 126 | #define C_0007C0_CSI_BUSY 0xFFFFDFFF | ||
| 127 | #define S_0007C0_CSF_INDIRECT2_BUSY(x) (((x) & 0x1) << 14) | ||
| 128 | #define G_0007C0_CSF_INDIRECT2_BUSY(x) (((x) >> 14) & 0x1) | ||
| 129 | #define C_0007C0_CSF_INDIRECT2_BUSY 0xFFFFBFFF | ||
| 130 | #define S_0007C0_CSQ_INDIRECT2_BUSY(x) (((x) & 0x1) << 15) | ||
| 131 | #define G_0007C0_CSQ_INDIRECT2_BUSY(x) (((x) >> 15) & 0x1) | ||
| 132 | #define C_0007C0_CSQ_INDIRECT2_BUSY 0xFFFF7FFF | ||
| 133 | #define S_0007C0_GUIDMA_BUSY(x) (((x) & 0x1) << 28) | ||
| 134 | #define G_0007C0_GUIDMA_BUSY(x) (((x) >> 28) & 0x1) | ||
| 135 | #define C_0007C0_GUIDMA_BUSY 0xEFFFFFFF | ||
| 136 | #define S_0007C0_VIDDMA_BUSY(x) (((x) & 0x1) << 29) | ||
| 137 | #define G_0007C0_VIDDMA_BUSY(x) (((x) >> 29) & 0x1) | ||
| 138 | #define C_0007C0_VIDDMA_BUSY 0xDFFFFFFF | ||
| 139 | #define S_0007C0_CMDSTRM_BUSY(x) (((x) & 0x1) << 30) | ||
| 140 | #define G_0007C0_CMDSTRM_BUSY(x) (((x) >> 30) & 0x1) | ||
| 141 | #define C_0007C0_CMDSTRM_BUSY 0xBFFFFFFF | ||
| 142 | #define S_0007C0_CP_BUSY(x) (((x) & 0x1) << 31) | ||
| 143 | #define G_0007C0_CP_BUSY(x) (((x) >> 31) & 0x1) | ||
| 144 | #define C_0007C0_CP_BUSY 0x7FFFFFFF | ||
| 145 | #define R_000E40_RBBM_STATUS 0x000E40 | ||
| 146 | #define S_000E40_CMDFIFO_AVAIL(x) (((x) & 0x7F) << 0) | ||
| 147 | #define G_000E40_CMDFIFO_AVAIL(x) (((x) >> 0) & 0x7F) | ||
| 148 | #define C_000E40_CMDFIFO_AVAIL 0xFFFFFF80 | ||
| 149 | #define S_000E40_HIRQ_ON_RBB(x) (((x) & 0x1) << 8) | ||
| 150 | #define G_000E40_HIRQ_ON_RBB(x) (((x) >> 8) & 0x1) | ||
| 151 | #define C_000E40_HIRQ_ON_RBB 0xFFFFFEFF | ||
| 152 | #define S_000E40_CPRQ_ON_RBB(x) (((x) & 0x1) << 9) | ||
| 153 | #define G_000E40_CPRQ_ON_RBB(x) (((x) >> 9) & 0x1) | ||
| 154 | #define C_000E40_CPRQ_ON_RBB 0xFFFFFDFF | ||
| 155 | #define S_000E40_CFRQ_ON_RBB(x) (((x) & 0x1) << 10) | ||
| 156 | #define G_000E40_CFRQ_ON_RBB(x) (((x) >> 10) & 0x1) | ||
| 157 | #define C_000E40_CFRQ_ON_RBB 0xFFFFFBFF | ||
| 158 | #define S_000E40_HIRQ_IN_RTBUF(x) (((x) & 0x1) << 11) | ||
| 159 | #define G_000E40_HIRQ_IN_RTBUF(x) (((x) >> 11) & 0x1) | ||
| 160 | #define C_000E40_HIRQ_IN_RTBUF 0xFFFFF7FF | ||
| 161 | #define S_000E40_CPRQ_IN_RTBUF(x) (((x) & 0x1) << 12) | ||
| 162 | #define G_000E40_CPRQ_IN_RTBUF(x) (((x) >> 12) & 0x1) | ||
| 163 | #define C_000E40_CPRQ_IN_RTBUF 0xFFFFEFFF | ||
| 164 | #define S_000E40_CFRQ_IN_RTBUF(x) (((x) & 0x1) << 13) | ||
| 165 | #define G_000E40_CFRQ_IN_RTBUF(x) (((x) >> 13) & 0x1) | ||
| 166 | #define C_000E40_CFRQ_IN_RTBUF 0xFFFFDFFF | ||
| 167 | #define S_000E40_CF_PIPE_BUSY(x) (((x) & 0x1) << 14) | ||
| 168 | #define G_000E40_CF_PIPE_BUSY(x) (((x) >> 14) & 0x1) | ||
| 169 | #define C_000E40_CF_PIPE_BUSY 0xFFFFBFFF | ||
| 170 | #define S_000E40_ENG_EV_BUSY(x) (((x) & 0x1) << 15) | ||
| 171 | #define G_000E40_ENG_EV_BUSY(x) (((x) >> 15) & 0x1) | ||
| 172 | #define C_000E40_ENG_EV_BUSY 0xFFFF7FFF | ||
| 173 | #define S_000E40_CP_CMDSTRM_BUSY(x) (((x) & 0x1) << 16) | ||
| 174 | #define G_000E40_CP_CMDSTRM_BUSY(x) (((x) >> 16) & 0x1) | ||
| 175 | #define C_000E40_CP_CMDSTRM_BUSY 0xFFFEFFFF | ||
| 176 | #define S_000E40_E2_BUSY(x) (((x) & 0x1) << 17) | ||
| 177 | #define G_000E40_E2_BUSY(x) (((x) >> 17) & 0x1) | ||
| 178 | #define C_000E40_E2_BUSY 0xFFFDFFFF | ||
| 179 | #define S_000E40_RB2D_BUSY(x) (((x) & 0x1) << 18) | ||
| 180 | #define G_000E40_RB2D_BUSY(x) (((x) >> 18) & 0x1) | ||
| 181 | #define C_000E40_RB2D_BUSY 0xFFFBFFFF | ||
| 182 | #define S_000E40_RB3D_BUSY(x) (((x) & 0x1) << 19) | ||
| 183 | #define G_000E40_RB3D_BUSY(x) (((x) >> 19) & 0x1) | ||
| 184 | #define C_000E40_RB3D_BUSY 0xFFF7FFFF | ||
| 185 | #define S_000E40_VAP_BUSY(x) (((x) & 0x1) << 20) | ||
| 186 | #define G_000E40_VAP_BUSY(x) (((x) >> 20) & 0x1) | ||
| 187 | #define C_000E40_VAP_BUSY 0xFFEFFFFF | ||
| 188 | #define S_000E40_RE_BUSY(x) (((x) & 0x1) << 21) | ||
| 189 | #define G_000E40_RE_BUSY(x) (((x) >> 21) & 0x1) | ||
| 190 | #define C_000E40_RE_BUSY 0xFFDFFFFF | ||
| 191 | #define S_000E40_TAM_BUSY(x) (((x) & 0x1) << 22) | ||
| 192 | #define G_000E40_TAM_BUSY(x) (((x) >> 22) & 0x1) | ||
| 193 | #define C_000E40_TAM_BUSY 0xFFBFFFFF | ||
| 194 | #define S_000E40_TDM_BUSY(x) (((x) & 0x1) << 23) | ||
| 195 | #define G_000E40_TDM_BUSY(x) (((x) >> 23) & 0x1) | ||
| 196 | #define C_000E40_TDM_BUSY 0xFF7FFFFF | ||
| 197 | #define S_000E40_PB_BUSY(x) (((x) & 0x1) << 24) | ||
| 198 | #define G_000E40_PB_BUSY(x) (((x) >> 24) & 0x1) | ||
| 199 | #define C_000E40_PB_BUSY 0xFEFFFFFF | ||
| 200 | #define S_000E40_TIM_BUSY(x) (((x) & 0x1) << 25) | ||
| 201 | #define G_000E40_TIM_BUSY(x) (((x) >> 25) & 0x1) | ||
| 202 | #define C_000E40_TIM_BUSY 0xFDFFFFFF | ||
| 203 | #define S_000E40_GA_BUSY(x) (((x) & 0x1) << 26) | ||
| 204 | #define G_000E40_GA_BUSY(x) (((x) >> 26) & 0x1) | ||
| 205 | #define C_000E40_GA_BUSY 0xFBFFFFFF | ||
| 206 | #define S_000E40_CBA2D_BUSY(x) (((x) & 0x1) << 27) | ||
| 207 | #define G_000E40_CBA2D_BUSY(x) (((x) >> 27) & 0x1) | ||
| 208 | #define C_000E40_CBA2D_BUSY 0xF7FFFFFF | ||
| 209 | #define S_000E40_GUI_ACTIVE(x) (((x) & 0x1) << 31) | ||
| 210 | #define G_000E40_GUI_ACTIVE(x) (((x) >> 31) & 0x1) | ||
| 211 | #define C_000E40_GUI_ACTIVE 0x7FFFFFFF | ||
| 99 | 212 | ||
| 100 | 213 | ||
| 214 | #define R_00000D_SCLK_CNTL 0x00000D | ||
| 215 | #define S_00000D_SCLK_SRC_SEL(x) (((x) & 0x7) << 0) | ||
| 216 | #define G_00000D_SCLK_SRC_SEL(x) (((x) >> 0) & 0x7) | ||
| 217 | #define C_00000D_SCLK_SRC_SEL 0xFFFFFFF8 | ||
| 218 | #define S_00000D_CP_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 3) | ||
| 219 | #define G_00000D_CP_MAX_DYN_STOP_LAT(x) (((x) >> 3) & 0x1) | ||
| 220 | #define C_00000D_CP_MAX_DYN_STOP_LAT 0xFFFFFFF7 | ||
| 221 | #define S_00000D_HDP_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 4) | ||
| 222 | #define G_00000D_HDP_MAX_DYN_STOP_LAT(x) (((x) >> 4) & 0x1) | ||
| 223 | #define C_00000D_HDP_MAX_DYN_STOP_LAT 0xFFFFFFEF | ||
| 224 | #define S_00000D_TV_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 5) | ||
| 225 | #define G_00000D_TV_MAX_DYN_STOP_LAT(x) (((x) >> 5) & 0x1) | ||
| 226 | #define C_00000D_TV_MAX_DYN_STOP_LAT 0xFFFFFFDF | ||
| 227 | #define S_00000D_E2_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 6) | ||
| 228 | #define G_00000D_E2_MAX_DYN_STOP_LAT(x) (((x) >> 6) & 0x1) | ||
| 229 | #define C_00000D_E2_MAX_DYN_STOP_LAT 0xFFFFFFBF | ||
| 230 | #define S_00000D_SE_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 7) | ||
| 231 | #define G_00000D_SE_MAX_DYN_STOP_LAT(x) (((x) >> 7) & 0x1) | ||
| 232 | #define C_00000D_SE_MAX_DYN_STOP_LAT 0xFFFFFF7F | ||
| 233 | #define S_00000D_IDCT_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 8) | ||
| 234 | #define G_00000D_IDCT_MAX_DYN_STOP_LAT(x) (((x) >> 8) & 0x1) | ||
| 235 | #define C_00000D_IDCT_MAX_DYN_STOP_LAT 0xFFFFFEFF | ||
| 236 | #define S_00000D_VIP_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 9) | ||
| 237 | #define G_00000D_VIP_MAX_DYN_STOP_LAT(x) (((x) >> 9) & 0x1) | ||
| 238 | #define C_00000D_VIP_MAX_DYN_STOP_LAT 0xFFFFFDFF | ||
| 239 | #define S_00000D_RE_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 10) | ||
| 240 | #define G_00000D_RE_MAX_DYN_STOP_LAT(x) (((x) >> 10) & 0x1) | ||
| 241 | #define C_00000D_RE_MAX_DYN_STOP_LAT 0xFFFFFBFF | ||
| 242 | #define S_00000D_PB_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 11) | ||
| 243 | #define G_00000D_PB_MAX_DYN_STOP_LAT(x) (((x) >> 11) & 0x1) | ||
| 244 | #define C_00000D_PB_MAX_DYN_STOP_LAT 0xFFFFF7FF | ||
| 245 | #define S_00000D_TAM_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 12) | ||
| 246 | #define G_00000D_TAM_MAX_DYN_STOP_LAT(x) (((x) >> 12) & 0x1) | ||
| 247 | #define C_00000D_TAM_MAX_DYN_STOP_LAT 0xFFFFEFFF | ||
| 248 | #define S_00000D_TDM_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 13) | ||
| 249 | #define G_00000D_TDM_MAX_DYN_STOP_LAT(x) (((x) >> 13) & 0x1) | ||
| 250 | #define C_00000D_TDM_MAX_DYN_STOP_LAT 0xFFFFDFFF | ||
| 251 | #define S_00000D_RB_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 14) | ||
| 252 | #define G_00000D_RB_MAX_DYN_STOP_LAT(x) (((x) >> 14) & 0x1) | ||
| 253 | #define C_00000D_RB_MAX_DYN_STOP_LAT 0xFFFFBFFF | ||
| 254 | #define S_00000D_FORCE_DISP2(x) (((x) & 0x1) << 15) | ||
| 255 | #define G_00000D_FORCE_DISP2(x) (((x) >> 15) & 0x1) | ||
| 256 | #define C_00000D_FORCE_DISP2 0xFFFF7FFF | ||
| 257 | #define S_00000D_FORCE_CP(x) (((x) & 0x1) << 16) | ||
| 258 | #define G_00000D_FORCE_CP(x) (((x) >> 16) & 0x1) | ||
| 259 | #define C_00000D_FORCE_CP 0xFFFEFFFF | ||
| 260 | #define S_00000D_FORCE_HDP(x) (((x) & 0x1) << 17) | ||
| 261 | #define G_00000D_FORCE_HDP(x) (((x) >> 17) & 0x1) | ||
| 262 | #define C_00000D_FORCE_HDP 0xFFFDFFFF | ||
| 263 | #define S_00000D_FORCE_DISP1(x) (((x) & 0x1) << 18) | ||
| 264 | #define G_00000D_FORCE_DISP1(x) (((x) >> 18) & 0x1) | ||
| 265 | #define C_00000D_FORCE_DISP1 0xFFFBFFFF | ||
| 266 | #define S_00000D_FORCE_TOP(x) (((x) & 0x1) << 19) | ||
| 267 | #define G_00000D_FORCE_TOP(x) (((x) >> 19) & 0x1) | ||
| 268 | #define C_00000D_FORCE_TOP 0xFFF7FFFF | ||
| 269 | #define S_00000D_FORCE_E2(x) (((x) & 0x1) << 20) | ||
| 270 | #define G_00000D_FORCE_E2(x) (((x) >> 20) & 0x1) | ||
| 271 | #define C_00000D_FORCE_E2 0xFFEFFFFF | ||
| 272 | #define S_00000D_FORCE_SE(x) (((x) & 0x1) << 21) | ||
| 273 | #define G_00000D_FORCE_SE(x) (((x) >> 21) & 0x1) | ||
| 274 | #define C_00000D_FORCE_SE 0xFFDFFFFF | ||
| 275 | #define S_00000D_FORCE_IDCT(x) (((x) & 0x1) << 22) | ||
| 276 | #define G_00000D_FORCE_IDCT(x) (((x) >> 22) & 0x1) | ||
| 277 | #define C_00000D_FORCE_IDCT 0xFFBFFFFF | ||
| 278 | #define S_00000D_FORCE_VIP(x) (((x) & 0x1) << 23) | ||
| 279 | #define G_00000D_FORCE_VIP(x) (((x) >> 23) & 0x1) | ||
| 280 | #define C_00000D_FORCE_VIP 0xFF7FFFFF | ||
| 281 | #define S_00000D_FORCE_RE(x) (((x) & 0x1) << 24) | ||
| 282 | #define G_00000D_FORCE_RE(x) (((x) >> 24) & 0x1) | ||
| 283 | #define C_00000D_FORCE_RE 0xFEFFFFFF | ||
| 284 | #define S_00000D_FORCE_PB(x) (((x) & 0x1) << 25) | ||
| 285 | #define G_00000D_FORCE_PB(x) (((x) >> 25) & 0x1) | ||
| 286 | #define C_00000D_FORCE_PB 0xFDFFFFFF | ||
| 287 | #define S_00000D_FORCE_TAM(x) (((x) & 0x1) << 26) | ||
| 288 | #define G_00000D_FORCE_TAM(x) (((x) >> 26) & 0x1) | ||
| 289 | #define C_00000D_FORCE_TAM 0xFBFFFFFF | ||
| 290 | #define S_00000D_FORCE_TDM(x) (((x) & 0x1) << 27) | ||
| 291 | #define G_00000D_FORCE_TDM(x) (((x) >> 27) & 0x1) | ||
| 292 | #define C_00000D_FORCE_TDM 0xF7FFFFFF | ||
| 293 | #define S_00000D_FORCE_RB(x) (((x) & 0x1) << 28) | ||
| 294 | #define G_00000D_FORCE_RB(x) (((x) >> 28) & 0x1) | ||
| 295 | #define C_00000D_FORCE_RB 0xEFFFFFFF | ||
| 296 | #define S_00000D_FORCE_TV_SCLK(x) (((x) & 0x1) << 29) | ||
| 297 | #define G_00000D_FORCE_TV_SCLK(x) (((x) >> 29) & 0x1) | ||
| 298 | #define C_00000D_FORCE_TV_SCLK 0xDFFFFFFF | ||
| 299 | #define S_00000D_FORCE_SUBPIC(x) (((x) & 0x1) << 30) | ||
| 300 | #define G_00000D_FORCE_SUBPIC(x) (((x) >> 30) & 0x1) | ||
| 301 | #define C_00000D_FORCE_SUBPIC 0xBFFFFFFF | ||
| 302 | #define S_00000D_FORCE_OV0(x) (((x) & 0x1) << 31) | ||
| 303 | #define G_00000D_FORCE_OV0(x) (((x) >> 31) & 0x1) | ||
| 304 | #define C_00000D_FORCE_OV0 0x7FFFFFFF | ||
| 305 | |||
| 101 | #endif | 306 | #endif |
diff --git a/drivers/gpu/drm/radeon/r420.c b/drivers/gpu/drm/radeon/r420.c index 49a2fdc57d27..5c7fe52de30e 100644 --- a/drivers/gpu/drm/radeon/r420.c +++ b/drivers/gpu/drm/radeon/r420.c | |||
| @@ -155,6 +155,9 @@ static void r420_debugfs(struct radeon_device *rdev) | |||
| 155 | static void r420_clock_resume(struct radeon_device *rdev) | 155 | static void r420_clock_resume(struct radeon_device *rdev) |
| 156 | { | 156 | { |
| 157 | u32 sclk_cntl; | 157 | u32 sclk_cntl; |
| 158 | |||
| 159 | if (radeon_dynclks != -1 && radeon_dynclks) | ||
| 160 | radeon_atom_set_clock_gating(rdev, 1); | ||
| 158 | sclk_cntl = RREG32_PLL(R_00000D_SCLK_CNTL); | 161 | sclk_cntl = RREG32_PLL(R_00000D_SCLK_CNTL); |
| 159 | sclk_cntl |= S_00000D_FORCE_CP(1) | S_00000D_FORCE_VIP(1); | 162 | sclk_cntl |= S_00000D_FORCE_CP(1) | S_00000D_FORCE_VIP(1); |
| 160 | if (rdev->family == CHIP_R420) | 163 | if (rdev->family == CHIP_R420) |
| @@ -167,6 +170,8 @@ static int r420_startup(struct radeon_device *rdev) | |||
| 167 | int r; | 170 | int r; |
| 168 | 171 | ||
| 169 | r300_mc_program(rdev); | 172 | r300_mc_program(rdev); |
| 173 | /* Resume clock */ | ||
| 174 | r420_clock_resume(rdev); | ||
| 170 | /* Initialize GART (initialize after TTM so we can allocate | 175 | /* Initialize GART (initialize after TTM so we can allocate |
| 171 | * memory through TTM but finalize after TTM) */ | 176 | * memory through TTM but finalize after TTM) */ |
| 172 | if (rdev->flags & RADEON_IS_PCIE) { | 177 | if (rdev->flags & RADEON_IS_PCIE) { |
| @@ -267,7 +272,6 @@ int r420_init(struct radeon_device *rdev) | |||
| 267 | { | 272 | { |
| 268 | int r; | 273 | int r; |
| 269 | 274 | ||
| 270 | rdev->new_init_path = true; | ||
| 271 | /* Initialize scratch registers */ | 275 | /* Initialize scratch registers */ |
| 272 | radeon_scratch_init(rdev); | 276 | radeon_scratch_init(rdev); |
| 273 | /* Initialize surface registers */ | 277 | /* Initialize surface registers */ |
diff --git a/drivers/gpu/drm/radeon/r420d.h b/drivers/gpu/drm/radeon/r420d.h index a48a7db1e2aa..fc78d31a0b4a 100644 --- a/drivers/gpu/drm/radeon/r420d.h +++ b/drivers/gpu/drm/radeon/r420d.h | |||
| @@ -212,9 +212,9 @@ | |||
| 212 | #define S_00000D_FORCE_E2(x) (((x) & 0x1) << 20) | 212 | #define S_00000D_FORCE_E2(x) (((x) & 0x1) << 20) |
| 213 | #define G_00000D_FORCE_E2(x) (((x) >> 20) & 0x1) | 213 | #define G_00000D_FORCE_E2(x) (((x) >> 20) & 0x1) |
| 214 | #define C_00000D_FORCE_E2 0xFFEFFFFF | 214 | #define C_00000D_FORCE_E2 0xFFEFFFFF |
| 215 | #define S_00000D_FORCE_SE(x) (((x) & 0x1) << 21) | 215 | #define S_00000D_FORCE_VAP(x) (((x) & 0x1) << 21) |
| 216 | #define G_00000D_FORCE_SE(x) (((x) >> 21) & 0x1) | 216 | #define G_00000D_FORCE_VAP(x) (((x) >> 21) & 0x1) |
| 217 | #define C_00000D_FORCE_SE 0xFFDFFFFF | 217 | #define C_00000D_FORCE_VAP 0xFFDFFFFF |
| 218 | #define S_00000D_FORCE_IDCT(x) (((x) & 0x1) << 22) | 218 | #define S_00000D_FORCE_IDCT(x) (((x) & 0x1) << 22) |
| 219 | #define G_00000D_FORCE_IDCT(x) (((x) >> 22) & 0x1) | 219 | #define G_00000D_FORCE_IDCT(x) (((x) >> 22) & 0x1) |
| 220 | #define C_00000D_FORCE_IDCT 0xFFBFFFFF | 220 | #define C_00000D_FORCE_IDCT 0xFFBFFFFF |
| @@ -224,24 +224,24 @@ | |||
| 224 | #define S_00000D_FORCE_RE(x) (((x) & 0x1) << 24) | 224 | #define S_00000D_FORCE_RE(x) (((x) & 0x1) << 24) |
| 225 | #define G_00000D_FORCE_RE(x) (((x) >> 24) & 0x1) | 225 | #define G_00000D_FORCE_RE(x) (((x) >> 24) & 0x1) |
| 226 | #define C_00000D_FORCE_RE 0xFEFFFFFF | 226 | #define C_00000D_FORCE_RE 0xFEFFFFFF |
| 227 | #define S_00000D_FORCE_PB(x) (((x) & 0x1) << 25) | 227 | #define S_00000D_FORCE_SR(x) (((x) & 0x1) << 25) |
| 228 | #define G_00000D_FORCE_PB(x) (((x) >> 25) & 0x1) | 228 | #define G_00000D_FORCE_SR(x) (((x) >> 25) & 0x1) |
| 229 | #define C_00000D_FORCE_PB 0xFDFFFFFF | 229 | #define C_00000D_FORCE_SR 0xFDFFFFFF |
| 230 | #define S_00000D_FORCE_PX(x) (((x) & 0x1) << 26) | 230 | #define S_00000D_FORCE_PX(x) (((x) & 0x1) << 26) |
| 231 | #define G_00000D_FORCE_PX(x) (((x) >> 26) & 0x1) | 231 | #define G_00000D_FORCE_PX(x) (((x) >> 26) & 0x1) |
| 232 | #define C_00000D_FORCE_PX 0xFBFFFFFF | 232 | #define C_00000D_FORCE_PX 0xFBFFFFFF |
| 233 | #define S_00000D_FORCE_TX(x) (((x) & 0x1) << 27) | 233 | #define S_00000D_FORCE_TX(x) (((x) & 0x1) << 27) |
| 234 | #define G_00000D_FORCE_TX(x) (((x) >> 27) & 0x1) | 234 | #define G_00000D_FORCE_TX(x) (((x) >> 27) & 0x1) |
| 235 | #define C_00000D_FORCE_TX 0xF7FFFFFF | 235 | #define C_00000D_FORCE_TX 0xF7FFFFFF |
| 236 | #define S_00000D_FORCE_RB(x) (((x) & 0x1) << 28) | 236 | #define S_00000D_FORCE_US(x) (((x) & 0x1) << 28) |
| 237 | #define G_00000D_FORCE_RB(x) (((x) >> 28) & 0x1) | 237 | #define G_00000D_FORCE_US(x) (((x) >> 28) & 0x1) |
| 238 | #define C_00000D_FORCE_RB 0xEFFFFFFF | 238 | #define C_00000D_FORCE_US 0xEFFFFFFF |
| 239 | #define S_00000D_FORCE_TV_SCLK(x) (((x) & 0x1) << 29) | 239 | #define S_00000D_FORCE_TV_SCLK(x) (((x) & 0x1) << 29) |
| 240 | #define G_00000D_FORCE_TV_SCLK(x) (((x) >> 29) & 0x1) | 240 | #define G_00000D_FORCE_TV_SCLK(x) (((x) >> 29) & 0x1) |
| 241 | #define C_00000D_FORCE_TV_SCLK 0xDFFFFFFF | 241 | #define C_00000D_FORCE_TV_SCLK 0xDFFFFFFF |
| 242 | #define S_00000D_FORCE_SUBPIC(x) (((x) & 0x1) << 30) | 242 | #define S_00000D_FORCE_SU(x) (((x) & 0x1) << 30) |
| 243 | #define G_00000D_FORCE_SUBPIC(x) (((x) >> 30) & 0x1) | 243 | #define G_00000D_FORCE_SU(x) (((x) >> 30) & 0x1) |
| 244 | #define C_00000D_FORCE_SUBPIC 0xBFFFFFFF | 244 | #define C_00000D_FORCE_SU 0xBFFFFFFF |
| 245 | #define S_00000D_FORCE_OV0(x) (((x) & 0x1) << 31) | 245 | #define S_00000D_FORCE_OV0(x) (((x) & 0x1) << 31) |
| 246 | #define G_00000D_FORCE_OV0(x) (((x) >> 31) & 0x1) | 246 | #define G_00000D_FORCE_OV0(x) (((x) >> 31) & 0x1) |
| 247 | #define C_00000D_FORCE_OV0 0x7FFFFFFF | 247 | #define C_00000D_FORCE_OV0 0x7FFFFFFF |
diff --git a/drivers/gpu/drm/radeon/r520.c b/drivers/gpu/drm/radeon/r520.c index 0bf13fccdaf2..a555b7b19b48 100644 --- a/drivers/gpu/drm/radeon/r520.c +++ b/drivers/gpu/drm/radeon/r520.c | |||
| @@ -186,7 +186,7 @@ static int r520_startup(struct radeon_device *rdev) | |||
| 186 | } | 186 | } |
| 187 | /* Enable IRQ */ | 187 | /* Enable IRQ */ |
| 188 | rdev->irq.sw_int = true; | 188 | rdev->irq.sw_int = true; |
| 189 | r100_irq_set(rdev); | 189 | rs600_irq_set(rdev); |
| 190 | /* 1M ring buffer */ | 190 | /* 1M ring buffer */ |
| 191 | r = r100_cp_init(rdev, 1024 * 1024); | 191 | r = r100_cp_init(rdev, 1024 * 1024); |
| 192 | if (r) { | 192 | if (r) { |
| @@ -228,7 +228,6 @@ int r520_init(struct radeon_device *rdev) | |||
| 228 | { | 228 | { |
| 229 | int r; | 229 | int r; |
| 230 | 230 | ||
| 231 | rdev->new_init_path = true; | ||
| 232 | /* Initialize scratch registers */ | 231 | /* Initialize scratch registers */ |
| 233 | radeon_scratch_init(rdev); | 232 | radeon_scratch_init(rdev); |
| 234 | /* Initialize surface registers */ | 233 | /* Initialize surface registers */ |
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index 2e4e60edbff4..609719490ec2 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c | |||
| @@ -65,16 +65,11 @@ MODULE_FIRMWARE("radeon/RV710_me.bin"); | |||
| 65 | 65 | ||
| 66 | int r600_debugfs_mc_info_init(struct radeon_device *rdev); | 66 | int r600_debugfs_mc_info_init(struct radeon_device *rdev); |
| 67 | 67 | ||
| 68 | /* This files gather functions specifics to: | 68 | /* r600,rv610,rv630,rv620,rv635,rv670 */ |
| 69 | * r600,rv610,rv630,rv620,rv635,rv670 | ||
| 70 | * | ||
| 71 | * Some of these functions might be used by newer ASICs. | ||
| 72 | */ | ||
| 73 | int r600_mc_wait_for_idle(struct radeon_device *rdev); | 69 | int r600_mc_wait_for_idle(struct radeon_device *rdev); |
| 74 | void r600_gpu_init(struct radeon_device *rdev); | 70 | void r600_gpu_init(struct radeon_device *rdev); |
| 75 | void r600_fini(struct radeon_device *rdev); | 71 | void r600_fini(struct radeon_device *rdev); |
| 76 | 72 | ||
| 77 | |||
| 78 | /* | 73 | /* |
| 79 | * R600 PCIE GART | 74 | * R600 PCIE GART |
| 80 | */ | 75 | */ |
| @@ -168,7 +163,7 @@ int r600_pcie_gart_enable(struct radeon_device *rdev) | |||
| 168 | WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE); | 163 | WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE); |
| 169 | WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE); | 164 | WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE); |
| 170 | WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12); | 165 | WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12); |
| 171 | WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, (rdev->mc.gtt_end - 1) >> 12); | 166 | WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12); |
| 172 | WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12); | 167 | WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12); |
| 173 | WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) | | 168 | WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) | |
| 174 | RANGE_PROTECTION_FAULT_ENABLE_DEFAULT); | 169 | RANGE_PROTECTION_FAULT_ENABLE_DEFAULT); |
| @@ -225,6 +220,40 @@ void r600_pcie_gart_fini(struct radeon_device *rdev) | |||
| 225 | radeon_gart_fini(rdev); | 220 | radeon_gart_fini(rdev); |
| 226 | } | 221 | } |
| 227 | 222 | ||
| 223 | void r600_agp_enable(struct radeon_device *rdev) | ||
| 224 | { | ||
| 225 | u32 tmp; | ||
| 226 | int i; | ||
| 227 | |||
| 228 | /* Setup L2 cache */ | ||
| 229 | WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING | | ||
| 230 | ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE | | ||
| 231 | EFFECTIVE_L2_QUEUE_SIZE(7)); | ||
| 232 | WREG32(VM_L2_CNTL2, 0); | ||
| 233 | WREG32(VM_L2_CNTL3, BANK_SELECT_0(0) | BANK_SELECT_1(1)); | ||
| 234 | /* Setup TLB control */ | ||
| 235 | tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING | | ||
| 236 | SYSTEM_ACCESS_MODE_NOT_IN_SYS | | ||
| 237 | EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5) | | ||
| 238 | ENABLE_WAIT_L2_QUERY; | ||
| 239 | WREG32(MC_VM_L1_TLB_MCB_RD_SYS_CNTL, tmp); | ||
| 240 | WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL, tmp); | ||
| 241 | WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp | ENABLE_L1_STRICT_ORDERING); | ||
| 242 | WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp); | ||
| 243 | WREG32(MC_VM_L1_TLB_MCD_RD_A_CNTL, tmp); | ||
| 244 | WREG32(MC_VM_L1_TLB_MCD_WR_A_CNTL, tmp); | ||
| 245 | WREG32(MC_VM_L1_TLB_MCD_RD_B_CNTL, tmp); | ||
| 246 | WREG32(MC_VM_L1_TLB_MCD_WR_B_CNTL, tmp); | ||
| 247 | WREG32(MC_VM_L1_TLB_MCB_RD_GFX_CNTL, tmp); | ||
| 248 | WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL, tmp); | ||
| 249 | WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL, tmp); | ||
| 250 | WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL, tmp); | ||
| 251 | WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE); | ||
| 252 | WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE); | ||
| 253 | for (i = 0; i < 7; i++) | ||
| 254 | WREG32(VM_CONTEXT0_CNTL + (i * 4), 0); | ||
| 255 | } | ||
| 256 | |||
| 228 | int r600_mc_wait_for_idle(struct radeon_device *rdev) | 257 | int r600_mc_wait_for_idle(struct radeon_device *rdev) |
| 229 | { | 258 | { |
| 230 | unsigned i; | 259 | unsigned i; |
| @@ -240,14 +269,9 @@ int r600_mc_wait_for_idle(struct radeon_device *rdev) | |||
| 240 | return -1; | 269 | return -1; |
| 241 | } | 270 | } |
| 242 | 271 | ||
| 243 | static void r600_mc_resume(struct radeon_device *rdev) | 272 | static void r600_mc_program(struct radeon_device *rdev) |
| 244 | { | 273 | { |
| 245 | u32 d1vga_control, d2vga_control; | 274 | struct rv515_mc_save save; |
| 246 | u32 vga_render_control, vga_hdp_control; | ||
| 247 | u32 d1crtc_control, d2crtc_control; | ||
| 248 | u32 new_d1grph_primary, new_d1grph_secondary; | ||
| 249 | u32 new_d2grph_primary, new_d2grph_secondary; | ||
| 250 | u64 old_vram_start; | ||
| 251 | u32 tmp; | 275 | u32 tmp; |
| 252 | int i, j; | 276 | int i, j; |
| 253 | 277 | ||
| @@ -261,85 +285,51 @@ static void r600_mc_resume(struct radeon_device *rdev) | |||
| 261 | } | 285 | } |
| 262 | WREG32(HDP_REG_COHERENCY_FLUSH_CNTL, 0); | 286 | WREG32(HDP_REG_COHERENCY_FLUSH_CNTL, 0); |
| 263 | 287 | ||
| 264 | d1vga_control = RREG32(D1VGA_CONTROL); | 288 | rv515_mc_stop(rdev, &save); |
| 265 | d2vga_control = RREG32(D2VGA_CONTROL); | ||
| 266 | vga_render_control = RREG32(VGA_RENDER_CONTROL); | ||
| 267 | vga_hdp_control = RREG32(VGA_HDP_CONTROL); | ||
| 268 | d1crtc_control = RREG32(D1CRTC_CONTROL); | ||
| 269 | d2crtc_control = RREG32(D2CRTC_CONTROL); | ||
| 270 | old_vram_start = (u64)(RREG32(MC_VM_FB_LOCATION) & 0xFFFF) << 24; | ||
| 271 | new_d1grph_primary = RREG32(D1GRPH_PRIMARY_SURFACE_ADDRESS); | ||
| 272 | new_d1grph_secondary = RREG32(D1GRPH_SECONDARY_SURFACE_ADDRESS); | ||
| 273 | new_d1grph_primary += rdev->mc.vram_start - old_vram_start; | ||
| 274 | new_d1grph_secondary += rdev->mc.vram_start - old_vram_start; | ||
| 275 | new_d2grph_primary = RREG32(D2GRPH_PRIMARY_SURFACE_ADDRESS); | ||
| 276 | new_d2grph_secondary = RREG32(D2GRPH_SECONDARY_SURFACE_ADDRESS); | ||
| 277 | new_d2grph_primary += rdev->mc.vram_start - old_vram_start; | ||
| 278 | new_d2grph_secondary += rdev->mc.vram_start - old_vram_start; | ||
| 279 | |||
| 280 | /* Stop all video */ | ||
| 281 | WREG32(D1VGA_CONTROL, 0); | ||
| 282 | WREG32(D2VGA_CONTROL, 0); | ||
| 283 | WREG32(VGA_RENDER_CONTROL, 0); | ||
| 284 | WREG32(D1CRTC_UPDATE_LOCK, 1); | ||
| 285 | WREG32(D2CRTC_UPDATE_LOCK, 1); | ||
| 286 | WREG32(D1CRTC_CONTROL, 0); | ||
| 287 | WREG32(D2CRTC_CONTROL, 0); | ||
| 288 | WREG32(D1CRTC_UPDATE_LOCK, 0); | ||
| 289 | WREG32(D2CRTC_UPDATE_LOCK, 0); | ||
| 290 | |||
| 291 | mdelay(1); | ||
| 292 | if (r600_mc_wait_for_idle(rdev)) { | 289 | if (r600_mc_wait_for_idle(rdev)) { |
| 293 | printk(KERN_WARNING "[drm] MC not idle !\n"); | 290 | dev_warn(rdev->dev, "Wait for MC idle timedout !\n"); |
| 294 | } | 291 | } |
| 295 | 292 | /* Lockout access through VGA aperture (doesn't exist before R600) */ | |
| 296 | /* Lockout access through VGA aperture*/ | ||
| 297 | WREG32(VGA_HDP_CONTROL, VGA_MEMORY_DISABLE); | 293 | WREG32(VGA_HDP_CONTROL, VGA_MEMORY_DISABLE); |
| 298 | |||
| 299 | /* Update configuration */ | 294 | /* Update configuration */ |
| 300 | WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR, rdev->mc.vram_start >> 12); | 295 | if (rdev->flags & RADEON_IS_AGP) { |
| 301 | WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR, (rdev->mc.vram_end - 1) >> 12); | 296 | if (rdev->mc.vram_start < rdev->mc.gtt_start) { |
| 297 | /* VRAM before AGP */ | ||
| 298 | WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR, | ||
| 299 | rdev->mc.vram_start >> 12); | ||
| 300 | WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR, | ||
| 301 | rdev->mc.gtt_end >> 12); | ||
| 302 | } else { | ||
| 303 | /* VRAM after AGP */ | ||
| 304 | WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR, | ||
| 305 | rdev->mc.gtt_start >> 12); | ||
| 306 | WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR, | ||
| 307 | rdev->mc.vram_end >> 12); | ||
| 308 | } | ||
| 309 | } else { | ||
| 310 | WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR, rdev->mc.vram_start >> 12); | ||
| 311 | WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR, rdev->mc.vram_end >> 12); | ||
| 312 | } | ||
| 302 | WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, 0); | 313 | WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, 0); |
| 303 | tmp = (((rdev->mc.vram_end - 1) >> 24) & 0xFFFF) << 16; | 314 | tmp = ((rdev->mc.vram_end >> 24) & 0xFFFF) << 16; |
| 304 | tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF); | 315 | tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF); |
| 305 | WREG32(MC_VM_FB_LOCATION, tmp); | 316 | WREG32(MC_VM_FB_LOCATION, tmp); |
| 306 | WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8)); | 317 | WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8)); |
| 307 | WREG32(HDP_NONSURFACE_INFO, (2 << 7)); | 318 | WREG32(HDP_NONSURFACE_INFO, (2 << 7)); |
| 308 | WREG32(HDP_NONSURFACE_SIZE, (rdev->mc.mc_vram_size - 1) | 0x3FF); | 319 | WREG32(HDP_NONSURFACE_SIZE, rdev->mc.mc_vram_size | 0x3FF); |
| 309 | if (rdev->flags & RADEON_IS_AGP) { | 320 | if (rdev->flags & RADEON_IS_AGP) { |
| 310 | WREG32(MC_VM_AGP_TOP, (rdev->mc.gtt_end - 1) >> 16); | 321 | WREG32(MC_VM_AGP_TOP, rdev->mc.gtt_end >> 22); |
| 311 | WREG32(MC_VM_AGP_BOT, rdev->mc.gtt_start >> 16); | 322 | WREG32(MC_VM_AGP_BOT, rdev->mc.gtt_start >> 22); |
| 312 | WREG32(MC_VM_AGP_BASE, rdev->mc.agp_base >> 22); | 323 | WREG32(MC_VM_AGP_BASE, rdev->mc.agp_base >> 22); |
| 313 | } else { | 324 | } else { |
| 314 | WREG32(MC_VM_AGP_BASE, 0); | 325 | WREG32(MC_VM_AGP_BASE, 0); |
| 315 | WREG32(MC_VM_AGP_TOP, 0x0FFFFFFF); | 326 | WREG32(MC_VM_AGP_TOP, 0x0FFFFFFF); |
| 316 | WREG32(MC_VM_AGP_BOT, 0x0FFFFFFF); | 327 | WREG32(MC_VM_AGP_BOT, 0x0FFFFFFF); |
| 317 | } | 328 | } |
| 318 | WREG32(D1GRPH_PRIMARY_SURFACE_ADDRESS, new_d1grph_primary); | ||
| 319 | WREG32(D1GRPH_SECONDARY_SURFACE_ADDRESS, new_d1grph_secondary); | ||
| 320 | WREG32(D2GRPH_PRIMARY_SURFACE_ADDRESS, new_d2grph_primary); | ||
| 321 | WREG32(D2GRPH_SECONDARY_SURFACE_ADDRESS, new_d2grph_secondary); | ||
| 322 | WREG32(VGA_MEMORY_BASE_ADDRESS, rdev->mc.vram_start); | ||
| 323 | |||
| 324 | /* Unlock host access */ | ||
| 325 | WREG32(VGA_HDP_CONTROL, vga_hdp_control); | ||
| 326 | |||
| 327 | mdelay(1); | ||
| 328 | if (r600_mc_wait_for_idle(rdev)) { | 329 | if (r600_mc_wait_for_idle(rdev)) { |
| 329 | printk(KERN_WARNING "[drm] MC not idle !\n"); | 330 | dev_warn(rdev->dev, "Wait for MC idle timedout !\n"); |
| 330 | } | 331 | } |
| 331 | 332 | rv515_mc_resume(rdev, &save); | |
| 332 | /* Restore video state */ | ||
| 333 | WREG32(D1CRTC_UPDATE_LOCK, 1); | ||
| 334 | WREG32(D2CRTC_UPDATE_LOCK, 1); | ||
| 335 | WREG32(D1CRTC_CONTROL, d1crtc_control); | ||
| 336 | WREG32(D2CRTC_CONTROL, d2crtc_control); | ||
| 337 | WREG32(D1CRTC_UPDATE_LOCK, 0); | ||
| 338 | WREG32(D2CRTC_UPDATE_LOCK, 0); | ||
| 339 | WREG32(D1VGA_CONTROL, d1vga_control); | ||
| 340 | WREG32(D2VGA_CONTROL, d2vga_control); | ||
| 341 | WREG32(VGA_RENDER_CONTROL, vga_render_control); | ||
| 342 | |||
| 343 | /* we need to own VRAM, so turn off the VGA renderer here | 333 | /* we need to own VRAM, so turn off the VGA renderer here |
| 344 | * to stop it overwriting our objects */ | 334 | * to stop it overwriting our objects */ |
| 345 | rv515_vga_render_disable(rdev); | 335 | rv515_vga_render_disable(rdev); |
| @@ -445,9 +435,9 @@ int r600_mc_init(struct radeon_device *rdev) | |||
| 445 | } | 435 | } |
| 446 | } | 436 | } |
| 447 | rdev->mc.vram_start = rdev->mc.vram_location; | 437 | rdev->mc.vram_start = rdev->mc.vram_location; |
| 448 | rdev->mc.vram_end = rdev->mc.vram_location + rdev->mc.mc_vram_size; | 438 | rdev->mc.vram_end = rdev->mc.vram_location + rdev->mc.mc_vram_size - 1; |
| 449 | rdev->mc.gtt_start = rdev->mc.gtt_location; | 439 | rdev->mc.gtt_start = rdev->mc.gtt_location; |
| 450 | rdev->mc.gtt_end = rdev->mc.gtt_location + rdev->mc.gtt_size; | 440 | rdev->mc.gtt_end = rdev->mc.gtt_location + rdev->mc.gtt_size - 1; |
| 451 | /* FIXME: we should enforce default clock in case GPU is not in | 441 | /* FIXME: we should enforce default clock in case GPU is not in |
| 452 | * default setup | 442 | * default setup |
| 453 | */ | 443 | */ |
| @@ -463,6 +453,7 @@ int r600_mc_init(struct radeon_device *rdev) | |||
| 463 | */ | 453 | */ |
| 464 | int r600_gpu_soft_reset(struct radeon_device *rdev) | 454 | int r600_gpu_soft_reset(struct radeon_device *rdev) |
| 465 | { | 455 | { |
| 456 | struct rv515_mc_save save; | ||
| 466 | u32 grbm_busy_mask = S_008010_VC_BUSY(1) | S_008010_VGT_BUSY_NO_DMA(1) | | 457 | u32 grbm_busy_mask = S_008010_VC_BUSY(1) | S_008010_VGT_BUSY_NO_DMA(1) | |
| 467 | S_008010_VGT_BUSY(1) | S_008010_TA03_BUSY(1) | | 458 | S_008010_VGT_BUSY(1) | S_008010_TA03_BUSY(1) | |
| 468 | S_008010_TC_BUSY(1) | S_008010_SX_BUSY(1) | | 459 | S_008010_TC_BUSY(1) | S_008010_SX_BUSY(1) | |
| @@ -480,13 +471,25 @@ int r600_gpu_soft_reset(struct radeon_device *rdev) | |||
| 480 | S_008014_CB0_BUSY(1) | S_008014_CB1_BUSY(1) | | 471 | S_008014_CB0_BUSY(1) | S_008014_CB1_BUSY(1) | |
| 481 | S_008014_CB2_BUSY(1) | S_008014_CB3_BUSY(1); | 472 | S_008014_CB2_BUSY(1) | S_008014_CB3_BUSY(1); |
| 482 | u32 srbm_reset = 0; | 473 | u32 srbm_reset = 0; |
| 474 | u32 tmp; | ||
| 483 | 475 | ||
| 476 | dev_info(rdev->dev, "GPU softreset \n"); | ||
| 477 | dev_info(rdev->dev, " R_008010_GRBM_STATUS=0x%08X\n", | ||
| 478 | RREG32(R_008010_GRBM_STATUS)); | ||
| 479 | dev_info(rdev->dev, " R_008014_GRBM_STATUS2=0x%08X\n", | ||
| 480 | RREG32(R_008014_GRBM_STATUS2)); | ||
| 481 | dev_info(rdev->dev, " R_000E50_SRBM_STATUS=0x%08X\n", | ||
| 482 | RREG32(R_000E50_SRBM_STATUS)); | ||
| 483 | rv515_mc_stop(rdev, &save); | ||
| 484 | if (r600_mc_wait_for_idle(rdev)) { | ||
| 485 | dev_warn(rdev->dev, "Wait for MC idle timedout !\n"); | ||
| 486 | } | ||
| 484 | /* Disable CP parsing/prefetching */ | 487 | /* Disable CP parsing/prefetching */ |
| 485 | WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(0xff)); | 488 | WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(0xff)); |
| 486 | /* Check if any of the rendering block is busy and reset it */ | 489 | /* Check if any of the rendering block is busy and reset it */ |
| 487 | if ((RREG32(R_008010_GRBM_STATUS) & grbm_busy_mask) || | 490 | if ((RREG32(R_008010_GRBM_STATUS) & grbm_busy_mask) || |
| 488 | (RREG32(R_008014_GRBM_STATUS2) & grbm2_busy_mask)) { | 491 | (RREG32(R_008014_GRBM_STATUS2) & grbm2_busy_mask)) { |
| 489 | WREG32(R_008020_GRBM_SOFT_RESET, S_008020_SOFT_RESET_CR(1) | | 492 | tmp = S_008020_SOFT_RESET_CR(1) | |
| 490 | S_008020_SOFT_RESET_DB(1) | | 493 | S_008020_SOFT_RESET_DB(1) | |
| 491 | S_008020_SOFT_RESET_CB(1) | | 494 | S_008020_SOFT_RESET_CB(1) | |
| 492 | S_008020_SOFT_RESET_PA(1) | | 495 | S_008020_SOFT_RESET_PA(1) | |
| @@ -498,14 +501,18 @@ int r600_gpu_soft_reset(struct radeon_device *rdev) | |||
| 498 | S_008020_SOFT_RESET_TC(1) | | 501 | S_008020_SOFT_RESET_TC(1) | |
| 499 | S_008020_SOFT_RESET_TA(1) | | 502 | S_008020_SOFT_RESET_TA(1) | |
| 500 | S_008020_SOFT_RESET_VC(1) | | 503 | S_008020_SOFT_RESET_VC(1) | |
| 501 | S_008020_SOFT_RESET_VGT(1)); | 504 | S_008020_SOFT_RESET_VGT(1); |
| 505 | dev_info(rdev->dev, " R_008020_GRBM_SOFT_RESET=0x%08X\n", tmp); | ||
| 506 | WREG32(R_008020_GRBM_SOFT_RESET, tmp); | ||
| 502 | (void)RREG32(R_008020_GRBM_SOFT_RESET); | 507 | (void)RREG32(R_008020_GRBM_SOFT_RESET); |
| 503 | udelay(50); | 508 | udelay(50); |
| 504 | WREG32(R_008020_GRBM_SOFT_RESET, 0); | 509 | WREG32(R_008020_GRBM_SOFT_RESET, 0); |
| 505 | (void)RREG32(R_008020_GRBM_SOFT_RESET); | 510 | (void)RREG32(R_008020_GRBM_SOFT_RESET); |
| 506 | } | 511 | } |
| 507 | /* Reset CP (we always reset CP) */ | 512 | /* Reset CP (we always reset CP) */ |
| 508 | WREG32(R_008020_GRBM_SOFT_RESET, S_008020_SOFT_RESET_CP(1)); | 513 | tmp = S_008020_SOFT_RESET_CP(1); |
| 514 | dev_info(rdev->dev, "R_008020_GRBM_SOFT_RESET=0x%08X\n", tmp); | ||
| 515 | WREG32(R_008020_GRBM_SOFT_RESET, tmp); | ||
| 509 | (void)RREG32(R_008020_GRBM_SOFT_RESET); | 516 | (void)RREG32(R_008020_GRBM_SOFT_RESET); |
| 510 | udelay(50); | 517 | udelay(50); |
| 511 | WREG32(R_008020_GRBM_SOFT_RESET, 0); | 518 | WREG32(R_008020_GRBM_SOFT_RESET, 0); |
| @@ -533,6 +540,14 @@ int r600_gpu_soft_reset(struct radeon_device *rdev) | |||
| 533 | srbm_reset |= S_000E60_SOFT_RESET_RLC(1); | 540 | srbm_reset |= S_000E60_SOFT_RESET_RLC(1); |
| 534 | if (G_000E50_SEM_BUSY(RREG32(R_000E50_SRBM_STATUS))) | 541 | if (G_000E50_SEM_BUSY(RREG32(R_000E50_SRBM_STATUS))) |
| 535 | srbm_reset |= S_000E60_SOFT_RESET_SEM(1); | 542 | srbm_reset |= S_000E60_SOFT_RESET_SEM(1); |
| 543 | if (G_000E50_BIF_BUSY(RREG32(R_000E50_SRBM_STATUS))) | ||
| 544 | srbm_reset |= S_000E60_SOFT_RESET_BIF(1); | ||
| 545 | dev_info(rdev->dev, " R_000E60_SRBM_SOFT_RESET=0x%08X\n", srbm_reset); | ||
| 546 | WREG32(R_000E60_SRBM_SOFT_RESET, srbm_reset); | ||
| 547 | (void)RREG32(R_000E60_SRBM_SOFT_RESET); | ||
| 548 | udelay(50); | ||
| 549 | WREG32(R_000E60_SRBM_SOFT_RESET, 0); | ||
| 550 | (void)RREG32(R_000E60_SRBM_SOFT_RESET); | ||
| 536 | WREG32(R_000E60_SRBM_SOFT_RESET, srbm_reset); | 551 | WREG32(R_000E60_SRBM_SOFT_RESET, srbm_reset); |
| 537 | (void)RREG32(R_000E60_SRBM_SOFT_RESET); | 552 | (void)RREG32(R_000E60_SRBM_SOFT_RESET); |
| 538 | udelay(50); | 553 | udelay(50); |
| @@ -540,6 +555,17 @@ int r600_gpu_soft_reset(struct radeon_device *rdev) | |||
| 540 | (void)RREG32(R_000E60_SRBM_SOFT_RESET); | 555 | (void)RREG32(R_000E60_SRBM_SOFT_RESET); |
| 541 | /* Wait a little for things to settle down */ | 556 | /* Wait a little for things to settle down */ |
| 542 | udelay(50); | 557 | udelay(50); |
| 558 | dev_info(rdev->dev, " R_008010_GRBM_STATUS=0x%08X\n", | ||
| 559 | RREG32(R_008010_GRBM_STATUS)); | ||
| 560 | dev_info(rdev->dev, " R_008014_GRBM_STATUS2=0x%08X\n", | ||
| 561 | RREG32(R_008014_GRBM_STATUS2)); | ||
| 562 | dev_info(rdev->dev, " R_000E50_SRBM_STATUS=0x%08X\n", | ||
| 563 | RREG32(R_000E50_SRBM_STATUS)); | ||
| 564 | /* After reset we need to reinit the asic as GPU often endup in an | ||
| 565 | * incoherent state. | ||
| 566 | */ | ||
| 567 | atom_asic_init(rdev->mode_info.atom_context); | ||
| 568 | rv515_mc_resume(rdev, &save); | ||
| 543 | return 0; | 569 | return 0; |
| 544 | } | 570 | } |
| 545 | 571 | ||
| @@ -1350,32 +1376,47 @@ int r600_ring_test(struct radeon_device *rdev) | |||
| 1350 | return r; | 1376 | return r; |
| 1351 | } | 1377 | } |
| 1352 | 1378 | ||
| 1353 | /* | 1379 | void r600_wb_disable(struct radeon_device *rdev) |
| 1354 | * Writeback | 1380 | { |
| 1355 | */ | 1381 | WREG32(SCRATCH_UMSK, 0); |
| 1356 | int r600_wb_init(struct radeon_device *rdev) | 1382 | if (rdev->wb.wb_obj) { |
| 1383 | radeon_object_kunmap(rdev->wb.wb_obj); | ||
| 1384 | radeon_object_unpin(rdev->wb.wb_obj); | ||
| 1385 | } | ||
| 1386 | } | ||
| 1387 | |||
| 1388 | void r600_wb_fini(struct radeon_device *rdev) | ||
| 1389 | { | ||
| 1390 | r600_wb_disable(rdev); | ||
| 1391 | if (rdev->wb.wb_obj) { | ||
| 1392 | radeon_object_unref(&rdev->wb.wb_obj); | ||
| 1393 | rdev->wb.wb = NULL; | ||
| 1394 | rdev->wb.wb_obj = NULL; | ||
| 1395 | } | ||
| 1396 | } | ||
| 1397 | |||
| 1398 | int r600_wb_enable(struct radeon_device *rdev) | ||
| 1357 | { | 1399 | { |
| 1358 | int r; | 1400 | int r; |
| 1359 | 1401 | ||
| 1360 | if (rdev->wb.wb_obj == NULL) { | 1402 | if (rdev->wb.wb_obj == NULL) { |
| 1361 | r = radeon_object_create(rdev, NULL, 4096, | 1403 | r = radeon_object_create(rdev, NULL, 4096, true, |
| 1362 | true, | 1404 | RADEON_GEM_DOMAIN_GTT, false, &rdev->wb.wb_obj); |
| 1363 | RADEON_GEM_DOMAIN_GTT, | ||
| 1364 | false, &rdev->wb.wb_obj); | ||
| 1365 | if (r) { | 1405 | if (r) { |
| 1366 | DRM_ERROR("radeon: failed to create WB buffer (%d).\n", r); | 1406 | dev_warn(rdev->dev, "failed to create WB buffer (%d).\n", r); |
| 1367 | return r; | 1407 | return r; |
| 1368 | } | 1408 | } |
| 1369 | r = radeon_object_pin(rdev->wb.wb_obj, | 1409 | r = radeon_object_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT, |
| 1370 | RADEON_GEM_DOMAIN_GTT, | 1410 | &rdev->wb.gpu_addr); |
| 1371 | &rdev->wb.gpu_addr); | ||
| 1372 | if (r) { | 1411 | if (r) { |
| 1373 | DRM_ERROR("radeon: failed to pin WB buffer (%d).\n", r); | 1412 | dev_warn(rdev->dev, "failed to pin WB buffer (%d).\n", r); |
| 1413 | r600_wb_fini(rdev); | ||
| 1374 | return r; | 1414 | return r; |
| 1375 | } | 1415 | } |
| 1376 | r = radeon_object_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb); | 1416 | r = radeon_object_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb); |
| 1377 | if (r) { | 1417 | if (r) { |
| 1378 | DRM_ERROR("radeon: failed to map WB buffer (%d).\n", r); | 1418 | dev_warn(rdev->dev, "failed to map WB buffer (%d).\n", r); |
| 1419 | r600_wb_fini(rdev); | ||
| 1379 | return r; | 1420 | return r; |
| 1380 | } | 1421 | } |
| 1381 | } | 1422 | } |
| @@ -1386,21 +1427,6 @@ int r600_wb_init(struct radeon_device *rdev) | |||
| 1386 | return 0; | 1427 | return 0; |
| 1387 | } | 1428 | } |
| 1388 | 1429 | ||
| 1389 | void r600_wb_fini(struct radeon_device *rdev) | ||
| 1390 | { | ||
| 1391 | if (rdev->wb.wb_obj) { | ||
| 1392 | radeon_object_kunmap(rdev->wb.wb_obj); | ||
| 1393 | radeon_object_unpin(rdev->wb.wb_obj); | ||
| 1394 | radeon_object_unref(&rdev->wb.wb_obj); | ||
| 1395 | rdev->wb.wb = NULL; | ||
| 1396 | rdev->wb.wb_obj = NULL; | ||
| 1397 | } | ||
| 1398 | } | ||
| 1399 | |||
| 1400 | |||
| 1401 | /* | ||
| 1402 | * CS | ||
| 1403 | */ | ||
| 1404 | void r600_fence_ring_emit(struct radeon_device *rdev, | 1430 | void r600_fence_ring_emit(struct radeon_device *rdev, |
| 1405 | struct radeon_fence *fence) | 1431 | struct radeon_fence *fence) |
| 1406 | { | 1432 | { |
| @@ -1477,11 +1503,14 @@ int r600_startup(struct radeon_device *rdev) | |||
| 1477 | { | 1503 | { |
| 1478 | int r; | 1504 | int r; |
| 1479 | 1505 | ||
| 1480 | r600_gpu_reset(rdev); | 1506 | r600_mc_program(rdev); |
| 1481 | r600_mc_resume(rdev); | 1507 | if (rdev->flags & RADEON_IS_AGP) { |
| 1482 | r = r600_pcie_gart_enable(rdev); | 1508 | r600_agp_enable(rdev); |
| 1483 | if (r) | 1509 | } else { |
| 1484 | return r; | 1510 | r = r600_pcie_gart_enable(rdev); |
| 1511 | if (r) | ||
| 1512 | return r; | ||
| 1513 | } | ||
| 1485 | r600_gpu_init(rdev); | 1514 | r600_gpu_init(rdev); |
| 1486 | 1515 | ||
| 1487 | r = radeon_object_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM, | 1516 | r = radeon_object_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM, |
| @@ -1500,9 +1529,8 @@ int r600_startup(struct radeon_device *rdev) | |||
| 1500 | r = r600_cp_resume(rdev); | 1529 | r = r600_cp_resume(rdev); |
| 1501 | if (r) | 1530 | if (r) |
| 1502 | return r; | 1531 | return r; |
| 1503 | r = r600_wb_init(rdev); | 1532 | /* write back buffer are not vital so don't worry about failure */ |
| 1504 | if (r) | 1533 | r600_wb_enable(rdev); |
| 1505 | return r; | ||
| 1506 | return 0; | 1534 | return 0; |
| 1507 | } | 1535 | } |
| 1508 | 1536 | ||
| @@ -1524,15 +1552,12 @@ int r600_resume(struct radeon_device *rdev) | |||
| 1524 | { | 1552 | { |
| 1525 | int r; | 1553 | int r; |
| 1526 | 1554 | ||
| 1527 | if (radeon_gpu_reset(rdev)) { | 1555 | /* Do not reset GPU before posting, on r600 hw unlike on r500 hw, |
| 1528 | /* FIXME: what do we want to do here ? */ | 1556 | * posting will perform necessary task to bring back GPU into good |
| 1529 | } | 1557 | * shape. |
| 1558 | */ | ||
| 1530 | /* post card */ | 1559 | /* post card */ |
| 1531 | if (rdev->is_atom_bios) { | 1560 | atom_asic_init(rdev->mode_info.atom_context); |
| 1532 | atom_asic_init(rdev->mode_info.atom_context); | ||
| 1533 | } else { | ||
| 1534 | radeon_combios_asic_init(rdev->ddev); | ||
| 1535 | } | ||
| 1536 | /* Initialize clocks */ | 1561 | /* Initialize clocks */ |
| 1537 | r = radeon_clocks_init(rdev); | 1562 | r = radeon_clocks_init(rdev); |
| 1538 | if (r) { | 1563 | if (r) { |
| @@ -1545,7 +1570,7 @@ int r600_resume(struct radeon_device *rdev) | |||
| 1545 | return r; | 1570 | return r; |
| 1546 | } | 1571 | } |
| 1547 | 1572 | ||
| 1548 | r = radeon_ib_test(rdev); | 1573 | r = r600_ib_test(rdev); |
| 1549 | if (r) { | 1574 | if (r) { |
| 1550 | DRM_ERROR("radeon: failled testing IB (%d).\n", r); | 1575 | DRM_ERROR("radeon: failled testing IB (%d).\n", r); |
| 1551 | return r; | 1576 | return r; |
| @@ -1553,13 +1578,12 @@ int r600_resume(struct radeon_device *rdev) | |||
| 1553 | return r; | 1578 | return r; |
| 1554 | } | 1579 | } |
| 1555 | 1580 | ||
| 1556 | |||
| 1557 | int r600_suspend(struct radeon_device *rdev) | 1581 | int r600_suspend(struct radeon_device *rdev) |
| 1558 | { | 1582 | { |
| 1559 | /* FIXME: we should wait for ring to be empty */ | 1583 | /* FIXME: we should wait for ring to be empty */ |
| 1560 | r600_cp_stop(rdev); | 1584 | r600_cp_stop(rdev); |
| 1561 | rdev->cp.ready = false; | 1585 | rdev->cp.ready = false; |
| 1562 | 1586 | r600_wb_disable(rdev); | |
| 1563 | r600_pcie_gart_disable(rdev); | 1587 | r600_pcie_gart_disable(rdev); |
| 1564 | /* unpin shaders bo */ | 1588 | /* unpin shaders bo */ |
| 1565 | radeon_object_unpin(rdev->r600_blit.shader_obj); | 1589 | radeon_object_unpin(rdev->r600_blit.shader_obj); |
| @@ -1576,7 +1600,6 @@ int r600_init(struct radeon_device *rdev) | |||
| 1576 | { | 1600 | { |
| 1577 | int r; | 1601 | int r; |
| 1578 | 1602 | ||
| 1579 | rdev->new_init_path = true; | ||
| 1580 | r = radeon_dummy_page_init(rdev); | 1603 | r = radeon_dummy_page_init(rdev); |
| 1581 | if (r) | 1604 | if (r) |
| 1582 | return r; | 1605 | return r; |
| @@ -1593,8 +1616,10 @@ int r600_init(struct radeon_device *rdev) | |||
| 1593 | return -EINVAL; | 1616 | return -EINVAL; |
| 1594 | } | 1617 | } |
| 1595 | /* Must be an ATOMBIOS */ | 1618 | /* Must be an ATOMBIOS */ |
| 1596 | if (!rdev->is_atom_bios) | 1619 | if (!rdev->is_atom_bios) { |
| 1620 | dev_err(rdev->dev, "Expecting atombios for R600 GPU\n"); | ||
| 1597 | return -EINVAL; | 1621 | return -EINVAL; |
| 1622 | } | ||
| 1598 | r = radeon_atombios_init(rdev); | 1623 | r = radeon_atombios_init(rdev); |
| 1599 | if (r) | 1624 | if (r) |
| 1600 | return r; | 1625 | return r; |
| @@ -1616,15 +1641,8 @@ int r600_init(struct radeon_device *rdev) | |||
| 1616 | if (r) | 1641 | if (r) |
| 1617 | return r; | 1642 | return r; |
| 1618 | r = r600_mc_init(rdev); | 1643 | r = r600_mc_init(rdev); |
| 1619 | if (r) { | 1644 | if (r) |
| 1620 | if (rdev->flags & RADEON_IS_AGP) { | ||
| 1621 | /* Retry with disabling AGP */ | ||
| 1622 | r600_fini(rdev); | ||
| 1623 | rdev->flags &= ~RADEON_IS_AGP; | ||
| 1624 | return r600_init(rdev); | ||
| 1625 | } | ||
| 1626 | return r; | 1645 | return r; |
| 1627 | } | ||
| 1628 | /* Memory manager */ | 1646 | /* Memory manager */ |
| 1629 | r = radeon_object_init(rdev); | 1647 | r = radeon_object_init(rdev); |
| 1630 | if (r) | 1648 | if (r) |
| @@ -1653,12 +1671,10 @@ int r600_init(struct radeon_device *rdev) | |||
| 1653 | 1671 | ||
| 1654 | r = r600_startup(rdev); | 1672 | r = r600_startup(rdev); |
| 1655 | if (r) { | 1673 | if (r) { |
| 1656 | if (rdev->flags & RADEON_IS_AGP) { | 1674 | r600_suspend(rdev); |
| 1657 | /* Retry with disabling AGP */ | 1675 | r600_wb_fini(rdev); |
| 1658 | r600_fini(rdev); | 1676 | radeon_ring_fini(rdev); |
| 1659 | rdev->flags &= ~RADEON_IS_AGP; | 1677 | r600_pcie_gart_fini(rdev); |
| 1660 | return r600_init(rdev); | ||
| 1661 | } | ||
| 1662 | rdev->accel_working = false; | 1678 | rdev->accel_working = false; |
| 1663 | } | 1679 | } |
| 1664 | if (rdev->accel_working) { | 1680 | if (rdev->accel_working) { |
| @@ -1667,7 +1683,7 @@ int r600_init(struct radeon_device *rdev) | |||
| 1667 | DRM_ERROR("radeon: failled initializing IB pool (%d).\n", r); | 1683 | DRM_ERROR("radeon: failled initializing IB pool (%d).\n", r); |
| 1668 | rdev->accel_working = false; | 1684 | rdev->accel_working = false; |
| 1669 | } | 1685 | } |
| 1670 | r = radeon_ib_test(rdev); | 1686 | r = r600_ib_test(rdev); |
| 1671 | if (r) { | 1687 | if (r) { |
| 1672 | DRM_ERROR("radeon: failled testing IB (%d).\n", r); | 1688 | DRM_ERROR("radeon: failled testing IB (%d).\n", r); |
| 1673 | rdev->accel_working = false; | 1689 | rdev->accel_working = false; |
| @@ -1683,19 +1699,15 @@ void r600_fini(struct radeon_device *rdev) | |||
| 1683 | 1699 | ||
| 1684 | r600_blit_fini(rdev); | 1700 | r600_blit_fini(rdev); |
| 1685 | radeon_ring_fini(rdev); | 1701 | radeon_ring_fini(rdev); |
| 1702 | r600_wb_fini(rdev); | ||
| 1686 | r600_pcie_gart_fini(rdev); | 1703 | r600_pcie_gart_fini(rdev); |
| 1687 | radeon_gem_fini(rdev); | 1704 | radeon_gem_fini(rdev); |
| 1688 | radeon_fence_driver_fini(rdev); | 1705 | radeon_fence_driver_fini(rdev); |
| 1689 | radeon_clocks_fini(rdev); | 1706 | radeon_clocks_fini(rdev); |
| 1690 | #if __OS_HAS_AGP | ||
| 1691 | if (rdev->flags & RADEON_IS_AGP) | 1707 | if (rdev->flags & RADEON_IS_AGP) |
| 1692 | radeon_agp_fini(rdev); | 1708 | radeon_agp_fini(rdev); |
| 1693 | #endif | ||
| 1694 | radeon_object_fini(rdev); | 1709 | radeon_object_fini(rdev); |
| 1695 | if (rdev->is_atom_bios) | 1710 | radeon_atombios_fini(rdev); |
| 1696 | radeon_atombios_fini(rdev); | ||
| 1697 | else | ||
| 1698 | radeon_combios_fini(rdev); | ||
| 1699 | kfree(rdev->bios); | 1711 | kfree(rdev->bios); |
| 1700 | rdev->bios = NULL; | 1712 | rdev->bios = NULL; |
| 1701 | radeon_dummy_page_fini(rdev); | 1713 | radeon_dummy_page_fini(rdev); |
diff --git a/drivers/gpu/drm/radeon/r600_blit.c b/drivers/gpu/drm/radeon/r600_blit.c index d988eece0187..dec501081608 100644 --- a/drivers/gpu/drm/radeon/r600_blit.c +++ b/drivers/gpu/drm/radeon/r600_blit.c | |||
| @@ -582,8 +582,6 @@ r600_blit_copy(struct drm_device *dev, | |||
| 582 | u64 vb_addr; | 582 | u64 vb_addr; |
| 583 | u32 *vb; | 583 | u32 *vb; |
| 584 | 584 | ||
| 585 | vb = r600_nomm_get_vb_ptr(dev); | ||
| 586 | |||
| 587 | if ((size_bytes & 3) || (src_gpu_addr & 3) || (dst_gpu_addr & 3)) { | 585 | if ((size_bytes & 3) || (src_gpu_addr & 3) || (dst_gpu_addr & 3)) { |
| 588 | max_bytes = 8192; | 586 | max_bytes = 8192; |
| 589 | 587 | ||
| @@ -619,8 +617,8 @@ r600_blit_copy(struct drm_device *dev, | |||
| 619 | if (!dev_priv->blit_vb) | 617 | if (!dev_priv->blit_vb) |
| 620 | return; | 618 | return; |
| 621 | set_shaders(dev); | 619 | set_shaders(dev); |
| 622 | vb = r600_nomm_get_vb_ptr(dev); | ||
| 623 | } | 620 | } |
| 621 | vb = r600_nomm_get_vb_ptr(dev); | ||
| 624 | 622 | ||
| 625 | vb[0] = i2f(dst_x); | 623 | vb[0] = i2f(dst_x); |
| 626 | vb[1] = 0; | 624 | vb[1] = 0; |
| @@ -708,8 +706,8 @@ r600_blit_copy(struct drm_device *dev, | |||
| 708 | return; | 706 | return; |
| 709 | 707 | ||
| 710 | set_shaders(dev); | 708 | set_shaders(dev); |
| 711 | vb = r600_nomm_get_vb_ptr(dev); | ||
| 712 | } | 709 | } |
| 710 | vb = r600_nomm_get_vb_ptr(dev); | ||
| 713 | 711 | ||
| 714 | vb[0] = i2f(dst_x / 4); | 712 | vb[0] = i2f(dst_x / 4); |
| 715 | vb[1] = 0; | 713 | vb[1] = 0; |
| @@ -777,8 +775,6 @@ r600_blit_swap(struct drm_device *dev, | |||
| 777 | u64 vb_addr; | 775 | u64 vb_addr; |
| 778 | u32 *vb; | 776 | u32 *vb; |
| 779 | 777 | ||
| 780 | vb = r600_nomm_get_vb_ptr(dev); | ||
| 781 | |||
| 782 | if ((dev_priv->blit_vb->used + 48) > dev_priv->blit_vb->total) { | 778 | if ((dev_priv->blit_vb->used + 48) > dev_priv->blit_vb->total) { |
| 783 | 779 | ||
| 784 | r600_nomm_put_vb(dev); | 780 | r600_nomm_put_vb(dev); |
| @@ -787,8 +783,8 @@ r600_blit_swap(struct drm_device *dev, | |||
| 787 | return; | 783 | return; |
| 788 | 784 | ||
| 789 | set_shaders(dev); | 785 | set_shaders(dev); |
| 790 | vb = r600_nomm_get_vb_ptr(dev); | ||
| 791 | } | 786 | } |
| 787 | vb = r600_nomm_get_vb_ptr(dev); | ||
| 792 | 788 | ||
| 793 | if (cpp == 4) { | 789 | if (cpp == 4) { |
| 794 | cb_format = COLOR_8_8_8_8; | 790 | cb_format = COLOR_8_8_8_8; |
diff --git a/drivers/gpu/drm/radeon/r600_blit_kms.c b/drivers/gpu/drm/radeon/r600_blit_kms.c index acae33e2ad51..93108bb31d1d 100644 --- a/drivers/gpu/drm/radeon/r600_blit_kms.c +++ b/drivers/gpu/drm/radeon/r600_blit_kms.c | |||
| @@ -610,7 +610,6 @@ void r600_kms_blit_copy(struct radeon_device *rdev, | |||
| 610 | 610 | ||
| 611 | DRM_DEBUG("emitting copy %16llx %16llx %d %d\n", src_gpu_addr, dst_gpu_addr, | 611 | DRM_DEBUG("emitting copy %16llx %16llx %d %d\n", src_gpu_addr, dst_gpu_addr, |
| 612 | size_bytes, rdev->r600_blit.vb_used); | 612 | size_bytes, rdev->r600_blit.vb_used); |
| 613 | vb = (u32 *)(rdev->r600_blit.vb_ib->ptr + rdev->r600_blit.vb_used); | ||
| 614 | if ((size_bytes & 3) || (src_gpu_addr & 3) || (dst_gpu_addr & 3)) { | 613 | if ((size_bytes & 3) || (src_gpu_addr & 3) || (dst_gpu_addr & 3)) { |
| 615 | max_bytes = 8192; | 614 | max_bytes = 8192; |
| 616 | 615 | ||
| @@ -653,6 +652,7 @@ void r600_kms_blit_copy(struct radeon_device *rdev, | |||
| 653 | vb = r600_nomm_get_vb_ptr(dev); | 652 | vb = r600_nomm_get_vb_ptr(dev); |
| 654 | #endif | 653 | #endif |
| 655 | } | 654 | } |
| 655 | vb = (u32 *)(rdev->r600_blit.vb_ib->ptr + rdev->r600_blit.vb_used); | ||
| 656 | 656 | ||
| 657 | vb[0] = i2f(dst_x); | 657 | vb[0] = i2f(dst_x); |
| 658 | vb[1] = 0; | 658 | vb[1] = 0; |
| @@ -747,6 +747,7 @@ void r600_kms_blit_copy(struct radeon_device *rdev, | |||
| 747 | vb = r600_nomm_get_vb_ptr(dev); | 747 | vb = r600_nomm_get_vb_ptr(dev); |
| 748 | } | 748 | } |
| 749 | #endif | 749 | #endif |
| 750 | vb = (u32 *)(rdev->r600_blit.vb_ib->ptr + rdev->r600_blit.vb_used); | ||
| 750 | 751 | ||
| 751 | vb[0] = i2f(dst_x / 4); | 752 | vb[0] = i2f(dst_x / 4); |
| 752 | vb[1] = 0; | 753 | vb[1] = 0; |
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c index d28970db6a2d..17e42195c632 100644 --- a/drivers/gpu/drm/radeon/r600_cs.c +++ b/drivers/gpu/drm/radeon/r600_cs.c | |||
| @@ -252,7 +252,7 @@ static int r600_cs_packet_parse_vline(struct radeon_cs_parser *p) | |||
| 252 | 252 | ||
| 253 | header = radeon_get_ib_value(p, h_idx); | 253 | header = radeon_get_ib_value(p, h_idx); |
| 254 | crtc_id = radeon_get_ib_value(p, h_idx + 2 + 7 + 1); | 254 | crtc_id = radeon_get_ib_value(p, h_idx + 2 + 7 + 1); |
| 255 | reg = header >> 2; | 255 | reg = CP_PACKET0_GET_REG(header); |
| 256 | mutex_lock(&p->rdev->ddev->mode_config.mutex); | 256 | mutex_lock(&p->rdev->ddev->mode_config.mutex); |
| 257 | obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC); | 257 | obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC); |
| 258 | if (!obj) { | 258 | if (!obj) { |
diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h index 4a9028a85c9b..9b64d47f1f82 100644 --- a/drivers/gpu/drm/radeon/r600d.h +++ b/drivers/gpu/drm/radeon/r600d.h | |||
| @@ -643,6 +643,7 @@ | |||
| 643 | #define G_000E50_MCDW_BUSY(x) (((x) >> 13) & 1) | 643 | #define G_000E50_MCDW_BUSY(x) (((x) >> 13) & 1) |
| 644 | #define G_000E50_SEM_BUSY(x) (((x) >> 14) & 1) | 644 | #define G_000E50_SEM_BUSY(x) (((x) >> 14) & 1) |
| 645 | #define G_000E50_RLC_BUSY(x) (((x) >> 15) & 1) | 645 | #define G_000E50_RLC_BUSY(x) (((x) >> 15) & 1) |
| 646 | #define G_000E50_BIF_BUSY(x) (((x) >> 29) & 1) | ||
| 646 | #define R_000E60_SRBM_SOFT_RESET 0x0E60 | 647 | #define R_000E60_SRBM_SOFT_RESET 0x0E60 |
| 647 | #define S_000E60_SOFT_RESET_BIF(x) (((x) & 1) << 1) | 648 | #define S_000E60_SOFT_RESET_BIF(x) (((x) & 1) << 1) |
| 648 | #define S_000E60_SOFT_RESET_CG(x) (((x) & 1) << 2) | 649 | #define S_000E60_SOFT_RESET_CG(x) (((x) & 1) << 2) |
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 950b346e343f..5ab35b81c86b 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h | |||
| @@ -590,18 +590,8 @@ struct radeon_asic { | |||
| 590 | void (*fini)(struct radeon_device *rdev); | 590 | void (*fini)(struct radeon_device *rdev); |
| 591 | int (*resume)(struct radeon_device *rdev); | 591 | int (*resume)(struct radeon_device *rdev); |
| 592 | int (*suspend)(struct radeon_device *rdev); | 592 | int (*suspend)(struct radeon_device *rdev); |
| 593 | void (*errata)(struct radeon_device *rdev); | ||
| 594 | void (*vram_info)(struct radeon_device *rdev); | ||
| 595 | void (*vga_set_state)(struct radeon_device *rdev, bool state); | 593 | void (*vga_set_state)(struct radeon_device *rdev, bool state); |
| 596 | int (*gpu_reset)(struct radeon_device *rdev); | 594 | int (*gpu_reset)(struct radeon_device *rdev); |
| 597 | int (*mc_init)(struct radeon_device *rdev); | ||
| 598 | void (*mc_fini)(struct radeon_device *rdev); | ||
| 599 | int (*wb_init)(struct radeon_device *rdev); | ||
| 600 | void (*wb_fini)(struct radeon_device *rdev); | ||
| 601 | int (*gart_init)(struct radeon_device *rdev); | ||
| 602 | void (*gart_fini)(struct radeon_device *rdev); | ||
| 603 | int (*gart_enable)(struct radeon_device *rdev); | ||
| 604 | void (*gart_disable)(struct radeon_device *rdev); | ||
| 605 | void (*gart_tlb_flush)(struct radeon_device *rdev); | 595 | void (*gart_tlb_flush)(struct radeon_device *rdev); |
| 606 | int (*gart_set_page)(struct radeon_device *rdev, int i, uint64_t addr); | 596 | int (*gart_set_page)(struct radeon_device *rdev, int i, uint64_t addr); |
| 607 | int (*cp_init)(struct radeon_device *rdev, unsigned ring_size); | 597 | int (*cp_init)(struct radeon_device *rdev, unsigned ring_size); |
| @@ -611,7 +601,6 @@ struct radeon_asic { | |||
| 611 | void (*ring_start)(struct radeon_device *rdev); | 601 | void (*ring_start)(struct radeon_device *rdev); |
| 612 | int (*ring_test)(struct radeon_device *rdev); | 602 | int (*ring_test)(struct radeon_device *rdev); |
| 613 | void (*ring_ib_execute)(struct radeon_device *rdev, struct radeon_ib *ib); | 603 | void (*ring_ib_execute)(struct radeon_device *rdev, struct radeon_ib *ib); |
| 614 | int (*ib_test)(struct radeon_device *rdev); | ||
| 615 | int (*irq_set)(struct radeon_device *rdev); | 604 | int (*irq_set)(struct radeon_device *rdev); |
| 616 | int (*irq_process)(struct radeon_device *rdev); | 605 | int (*irq_process)(struct radeon_device *rdev); |
| 617 | u32 (*get_vblank_counter)(struct radeon_device *rdev, int crtc); | 606 | u32 (*get_vblank_counter)(struct radeon_device *rdev, int crtc); |
| @@ -789,7 +778,6 @@ struct radeon_device { | |||
| 789 | bool shutdown; | 778 | bool shutdown; |
| 790 | bool suspend; | 779 | bool suspend; |
| 791 | bool need_dma32; | 780 | bool need_dma32; |
| 792 | bool new_init_path; | ||
| 793 | bool accel_working; | 781 | bool accel_working; |
| 794 | struct radeon_surface_reg surface_regs[RADEON_GEM_MAX_SURFACES]; | 782 | struct radeon_surface_reg surface_regs[RADEON_GEM_MAX_SURFACES]; |
| 795 | const struct firmware *me_fw; /* all family ME firmware */ | 783 | const struct firmware *me_fw; /* all family ME firmware */ |
| @@ -949,28 +937,14 @@ static inline void radeon_ring_write(struct radeon_device *rdev, uint32_t v) | |||
| 949 | #define radeon_resume(rdev) (rdev)->asic->resume((rdev)) | 937 | #define radeon_resume(rdev) (rdev)->asic->resume((rdev)) |
| 950 | #define radeon_suspend(rdev) (rdev)->asic->suspend((rdev)) | 938 | #define radeon_suspend(rdev) (rdev)->asic->suspend((rdev)) |
| 951 | #define radeon_cs_parse(p) rdev->asic->cs_parse((p)) | 939 | #define radeon_cs_parse(p) rdev->asic->cs_parse((p)) |
| 952 | #define radeon_errata(rdev) (rdev)->asic->errata((rdev)) | ||
| 953 | #define radeon_vram_info(rdev) (rdev)->asic->vram_info((rdev)) | ||
| 954 | #define radeon_vga_set_state(rdev, state) (rdev)->asic->vga_set_state((rdev), (state)) | 940 | #define radeon_vga_set_state(rdev, state) (rdev)->asic->vga_set_state((rdev), (state)) |
| 955 | #define radeon_gpu_reset(rdev) (rdev)->asic->gpu_reset((rdev)) | 941 | #define radeon_gpu_reset(rdev) (rdev)->asic->gpu_reset((rdev)) |
| 956 | #define radeon_mc_init(rdev) (rdev)->asic->mc_init((rdev)) | ||
| 957 | #define radeon_mc_fini(rdev) (rdev)->asic->mc_fini((rdev)) | ||
| 958 | #define radeon_wb_init(rdev) (rdev)->asic->wb_init((rdev)) | ||
| 959 | #define radeon_wb_fini(rdev) (rdev)->asic->wb_fini((rdev)) | ||
| 960 | #define radeon_gpu_gart_init(rdev) (rdev)->asic->gart_init((rdev)) | ||
| 961 | #define radeon_gpu_gart_fini(rdev) (rdev)->asic->gart_fini((rdev)) | ||
| 962 | #define radeon_gart_enable(rdev) (rdev)->asic->gart_enable((rdev)) | ||
| 963 | #define radeon_gart_disable(rdev) (rdev)->asic->gart_disable((rdev)) | ||
| 964 | #define radeon_gart_tlb_flush(rdev) (rdev)->asic->gart_tlb_flush((rdev)) | 942 | #define radeon_gart_tlb_flush(rdev) (rdev)->asic->gart_tlb_flush((rdev)) |
| 965 | #define radeon_gart_set_page(rdev, i, p) (rdev)->asic->gart_set_page((rdev), (i), (p)) | 943 | #define radeon_gart_set_page(rdev, i, p) (rdev)->asic->gart_set_page((rdev), (i), (p)) |
| 966 | #define radeon_cp_init(rdev,rsize) (rdev)->asic->cp_init((rdev), (rsize)) | ||
| 967 | #define radeon_cp_fini(rdev) (rdev)->asic->cp_fini((rdev)) | ||
| 968 | #define radeon_cp_disable(rdev) (rdev)->asic->cp_disable((rdev)) | ||
| 969 | #define radeon_cp_commit(rdev) (rdev)->asic->cp_commit((rdev)) | 944 | #define radeon_cp_commit(rdev) (rdev)->asic->cp_commit((rdev)) |
| 970 | #define radeon_ring_start(rdev) (rdev)->asic->ring_start((rdev)) | 945 | #define radeon_ring_start(rdev) (rdev)->asic->ring_start((rdev)) |
| 971 | #define radeon_ring_test(rdev) (rdev)->asic->ring_test((rdev)) | 946 | #define radeon_ring_test(rdev) (rdev)->asic->ring_test((rdev)) |
| 972 | #define radeon_ring_ib_execute(rdev, ib) (rdev)->asic->ring_ib_execute((rdev), (ib)) | 947 | #define radeon_ring_ib_execute(rdev, ib) (rdev)->asic->ring_ib_execute((rdev), (ib)) |
| 973 | #define radeon_ib_test(rdev) (rdev)->asic->ib_test((rdev)) | ||
| 974 | #define radeon_irq_set(rdev) (rdev)->asic->irq_set((rdev)) | 948 | #define radeon_irq_set(rdev) (rdev)->asic->irq_set((rdev)) |
| 975 | #define radeon_irq_process(rdev) (rdev)->asic->irq_process((rdev)) | 949 | #define radeon_irq_process(rdev) (rdev)->asic->irq_process((rdev)) |
| 976 | #define radeon_get_vblank_counter(rdev, crtc) (rdev)->asic->get_vblank_counter((rdev), (crtc)) | 950 | #define radeon_get_vblank_counter(rdev, crtc) (rdev)->asic->get_vblank_counter((rdev), (crtc)) |
| @@ -996,6 +970,7 @@ extern void radeon_clocks_fini(struct radeon_device *rdev); | |||
| 996 | extern void radeon_scratch_init(struct radeon_device *rdev); | 970 | extern void radeon_scratch_init(struct radeon_device *rdev); |
| 997 | extern void radeon_surface_init(struct radeon_device *rdev); | 971 | extern void radeon_surface_init(struct radeon_device *rdev); |
| 998 | extern int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data); | 972 | extern int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data); |
| 973 | extern void radeon_legacy_set_clock_gating(struct radeon_device *rdev, int enable); | ||
| 999 | extern void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable); | 974 | extern void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable); |
| 1000 | 975 | ||
| 1001 | /* r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280 */ | 976 | /* r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280 */ |
| @@ -1031,11 +1006,27 @@ extern int r100_wb_init(struct radeon_device *rdev); | |||
| 1031 | extern void r100_hdp_reset(struct radeon_device *rdev); | 1006 | extern void r100_hdp_reset(struct radeon_device *rdev); |
| 1032 | extern int r100_rb2d_reset(struct radeon_device *rdev); | 1007 | extern int r100_rb2d_reset(struct radeon_device *rdev); |
| 1033 | extern int r100_cp_reset(struct radeon_device *rdev); | 1008 | extern int r100_cp_reset(struct radeon_device *rdev); |
| 1009 | extern void r100_vga_render_disable(struct radeon_device *rdev); | ||
| 1010 | extern int r100_cs_track_check_pkt3_indx_buffer(struct radeon_cs_parser *p, | ||
| 1011 | struct radeon_cs_packet *pkt, | ||
| 1012 | struct radeon_object *robj); | ||
| 1013 | extern int r100_cs_parse_packet0(struct radeon_cs_parser *p, | ||
| 1014 | struct radeon_cs_packet *pkt, | ||
| 1015 | const unsigned *auth, unsigned n, | ||
| 1016 | radeon_packet0_check_t check); | ||
| 1017 | extern int r100_cs_packet_parse(struct radeon_cs_parser *p, | ||
| 1018 | struct radeon_cs_packet *pkt, | ||
| 1019 | unsigned idx); | ||
| 1020 | |||
| 1021 | /* rv200,rv250,rv280 */ | ||
| 1022 | extern void r200_set_safe_registers(struct radeon_device *rdev); | ||
| 1034 | 1023 | ||
| 1035 | /* r300,r350,rv350,rv370,rv380 */ | 1024 | /* r300,r350,rv350,rv370,rv380 */ |
| 1036 | extern void r300_set_reg_safe(struct radeon_device *rdev); | 1025 | extern void r300_set_reg_safe(struct radeon_device *rdev); |
| 1037 | extern void r300_mc_program(struct radeon_device *rdev); | 1026 | extern void r300_mc_program(struct radeon_device *rdev); |
| 1038 | extern void r300_vram_info(struct radeon_device *rdev); | 1027 | extern void r300_vram_info(struct radeon_device *rdev); |
| 1028 | extern void r300_clock_startup(struct radeon_device *rdev); | ||
| 1029 | extern int r300_mc_wait_for_idle(struct radeon_device *rdev); | ||
| 1039 | extern int rv370_pcie_gart_init(struct radeon_device *rdev); | 1030 | extern int rv370_pcie_gart_init(struct radeon_device *rdev); |
| 1040 | extern void rv370_pcie_gart_fini(struct radeon_device *rdev); | 1031 | extern void rv370_pcie_gart_fini(struct radeon_device *rdev); |
| 1041 | extern int rv370_pcie_gart_enable(struct radeon_device *rdev); | 1032 | extern int rv370_pcie_gart_enable(struct radeon_device *rdev); |
| @@ -1066,6 +1057,18 @@ extern void rv515_clock_startup(struct radeon_device *rdev); | |||
| 1066 | extern void rv515_debugfs(struct radeon_device *rdev); | 1057 | extern void rv515_debugfs(struct radeon_device *rdev); |
| 1067 | extern int rv515_suspend(struct radeon_device *rdev); | 1058 | extern int rv515_suspend(struct radeon_device *rdev); |
| 1068 | 1059 | ||
| 1060 | /* rs400 */ | ||
| 1061 | extern int rs400_gart_init(struct radeon_device *rdev); | ||
| 1062 | extern int rs400_gart_enable(struct radeon_device *rdev); | ||
| 1063 | extern void rs400_gart_adjust_size(struct radeon_device *rdev); | ||
| 1064 | extern void rs400_gart_disable(struct radeon_device *rdev); | ||
| 1065 | extern void rs400_gart_fini(struct radeon_device *rdev); | ||
| 1066 | |||
| 1067 | /* rs600 */ | ||
| 1068 | extern void rs600_set_safe_registers(struct radeon_device *rdev); | ||
| 1069 | extern int rs600_irq_set(struct radeon_device *rdev); | ||
| 1070 | extern void rs600_irq_disable(struct radeon_device *rdev); | ||
| 1071 | |||
| 1069 | /* rs690, rs740 */ | 1072 | /* rs690, rs740 */ |
| 1070 | extern void rs690_line_buffer_adjust(struct radeon_device *rdev, | 1073 | extern void rs690_line_buffer_adjust(struct radeon_device *rdev, |
| 1071 | struct drm_display_mode *mode1, | 1074 | struct drm_display_mode *mode1, |
| @@ -1083,8 +1086,9 @@ extern int r600_pcie_gart_init(struct radeon_device *rdev); | |||
| 1083 | extern void r600_pcie_gart_tlb_flush(struct radeon_device *rdev); | 1086 | extern void r600_pcie_gart_tlb_flush(struct radeon_device *rdev); |
| 1084 | extern int r600_ib_test(struct radeon_device *rdev); | 1087 | extern int r600_ib_test(struct radeon_device *rdev); |
| 1085 | extern int r600_ring_test(struct radeon_device *rdev); | 1088 | extern int r600_ring_test(struct radeon_device *rdev); |
| 1086 | extern int r600_wb_init(struct radeon_device *rdev); | ||
| 1087 | extern void r600_wb_fini(struct radeon_device *rdev); | 1089 | extern void r600_wb_fini(struct radeon_device *rdev); |
| 1090 | extern int r600_wb_enable(struct radeon_device *rdev); | ||
| 1091 | extern void r600_wb_disable(struct radeon_device *rdev); | ||
| 1088 | extern void r600_scratch_init(struct radeon_device *rdev); | 1092 | extern void r600_scratch_init(struct radeon_device *rdev); |
| 1089 | extern int r600_blit_init(struct radeon_device *rdev); | 1093 | extern int r600_blit_init(struct radeon_device *rdev); |
| 1090 | extern void r600_blit_fini(struct radeon_device *rdev); | 1094 | extern void r600_blit_fini(struct radeon_device *rdev); |
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h index c8a4e7b5663d..c3532c7a6f3f 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.h +++ b/drivers/gpu/drm/radeon/radeon_asic.h | |||
| @@ -41,28 +41,17 @@ void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable); | |||
| 41 | /* | 41 | /* |
| 42 | * r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280 | 42 | * r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280 |
| 43 | */ | 43 | */ |
| 44 | int r100_init(struct radeon_device *rdev); | 44 | extern int r100_init(struct radeon_device *rdev); |
| 45 | int r200_init(struct radeon_device *rdev); | 45 | extern void r100_fini(struct radeon_device *rdev); |
| 46 | extern int r100_suspend(struct radeon_device *rdev); | ||
| 47 | extern int r100_resume(struct radeon_device *rdev); | ||
| 46 | uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg); | 48 | uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg); |
| 47 | void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); | 49 | void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); |
| 48 | void r100_errata(struct radeon_device *rdev); | ||
| 49 | void r100_vram_info(struct radeon_device *rdev); | ||
| 50 | void r100_vga_set_state(struct radeon_device *rdev, bool state); | 50 | void r100_vga_set_state(struct radeon_device *rdev, bool state); |
| 51 | int r100_gpu_reset(struct radeon_device *rdev); | 51 | int r100_gpu_reset(struct radeon_device *rdev); |
| 52 | int r100_mc_init(struct radeon_device *rdev); | ||
| 53 | void r100_mc_fini(struct radeon_device *rdev); | ||
| 54 | u32 r100_get_vblank_counter(struct radeon_device *rdev, int crtc); | 52 | u32 r100_get_vblank_counter(struct radeon_device *rdev, int crtc); |
| 55 | int r100_wb_init(struct radeon_device *rdev); | ||
| 56 | void r100_wb_fini(struct radeon_device *rdev); | ||
| 57 | int r100_pci_gart_init(struct radeon_device *rdev); | ||
| 58 | void r100_pci_gart_fini(struct radeon_device *rdev); | ||
| 59 | int r100_pci_gart_enable(struct radeon_device *rdev); | ||
| 60 | void r100_pci_gart_disable(struct radeon_device *rdev); | ||
| 61 | void r100_pci_gart_tlb_flush(struct radeon_device *rdev); | 53 | void r100_pci_gart_tlb_flush(struct radeon_device *rdev); |
| 62 | int r100_pci_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr); | 54 | int r100_pci_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr); |
| 63 | int r100_cp_init(struct radeon_device *rdev, unsigned ring_size); | ||
| 64 | void r100_cp_fini(struct radeon_device *rdev); | ||
| 65 | void r100_cp_disable(struct radeon_device *rdev); | ||
| 66 | void r100_cp_commit(struct radeon_device *rdev); | 55 | void r100_cp_commit(struct radeon_device *rdev); |
| 67 | void r100_ring_start(struct radeon_device *rdev); | 56 | void r100_ring_start(struct radeon_device *rdev); |
| 68 | int r100_irq_set(struct radeon_device *rdev); | 57 | int r100_irq_set(struct radeon_device *rdev); |
| @@ -83,33 +72,21 @@ int r100_set_surface_reg(struct radeon_device *rdev, int reg, | |||
| 83 | int r100_clear_surface_reg(struct radeon_device *rdev, int reg); | 72 | int r100_clear_surface_reg(struct radeon_device *rdev, int reg); |
| 84 | void r100_bandwidth_update(struct radeon_device *rdev); | 73 | void r100_bandwidth_update(struct radeon_device *rdev); |
| 85 | void r100_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib); | 74 | void r100_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib); |
| 86 | int r100_ib_test(struct radeon_device *rdev); | ||
| 87 | int r100_ring_test(struct radeon_device *rdev); | 75 | int r100_ring_test(struct radeon_device *rdev); |
| 88 | 76 | ||
| 89 | static struct radeon_asic r100_asic = { | 77 | static struct radeon_asic r100_asic = { |
| 90 | .init = &r100_init, | 78 | .init = &r100_init, |
| 91 | .errata = &r100_errata, | 79 | .fini = &r100_fini, |
| 92 | .vram_info = &r100_vram_info, | 80 | .suspend = &r100_suspend, |
| 81 | .resume = &r100_resume, | ||
| 93 | .vga_set_state = &r100_vga_set_state, | 82 | .vga_set_state = &r100_vga_set_state, |
| 94 | .gpu_reset = &r100_gpu_reset, | 83 | .gpu_reset = &r100_gpu_reset, |
| 95 | .mc_init = &r100_mc_init, | ||
| 96 | .mc_fini = &r100_mc_fini, | ||
| 97 | .wb_init = &r100_wb_init, | ||
| 98 | .wb_fini = &r100_wb_fini, | ||
| 99 | .gart_init = &r100_pci_gart_init, | ||
| 100 | .gart_fini = &r100_pci_gart_fini, | ||
| 101 | .gart_enable = &r100_pci_gart_enable, | ||
| 102 | .gart_disable = &r100_pci_gart_disable, | ||
| 103 | .gart_tlb_flush = &r100_pci_gart_tlb_flush, | 84 | .gart_tlb_flush = &r100_pci_gart_tlb_flush, |
| 104 | .gart_set_page = &r100_pci_gart_set_page, | 85 | .gart_set_page = &r100_pci_gart_set_page, |
| 105 | .cp_init = &r100_cp_init, | ||
| 106 | .cp_fini = &r100_cp_fini, | ||
| 107 | .cp_disable = &r100_cp_disable, | ||
| 108 | .cp_commit = &r100_cp_commit, | 86 | .cp_commit = &r100_cp_commit, |
| 109 | .ring_start = &r100_ring_start, | 87 | .ring_start = &r100_ring_start, |
| 110 | .ring_test = &r100_ring_test, | 88 | .ring_test = &r100_ring_test, |
| 111 | .ring_ib_execute = &r100_ring_ib_execute, | 89 | .ring_ib_execute = &r100_ring_ib_execute, |
| 112 | .ib_test = &r100_ib_test, | ||
| 113 | .irq_set = &r100_irq_set, | 90 | .irq_set = &r100_irq_set, |
| 114 | .irq_process = &r100_irq_process, | 91 | .irq_process = &r100_irq_process, |
| 115 | .get_vblank_counter = &r100_get_vblank_counter, | 92 | .get_vblank_counter = &r100_get_vblank_counter, |
| @@ -131,55 +108,38 @@ static struct radeon_asic r100_asic = { | |||
| 131 | /* | 108 | /* |
| 132 | * r300,r350,rv350,rv380 | 109 | * r300,r350,rv350,rv380 |
| 133 | */ | 110 | */ |
| 134 | int r300_init(struct radeon_device *rdev); | 111 | extern int r300_init(struct radeon_device *rdev); |
| 135 | void r300_errata(struct radeon_device *rdev); | 112 | extern void r300_fini(struct radeon_device *rdev); |
| 136 | void r300_vram_info(struct radeon_device *rdev); | 113 | extern int r300_suspend(struct radeon_device *rdev); |
| 137 | int r300_gpu_reset(struct radeon_device *rdev); | 114 | extern int r300_resume(struct radeon_device *rdev); |
| 138 | int r300_mc_init(struct radeon_device *rdev); | 115 | extern int r300_gpu_reset(struct radeon_device *rdev); |
| 139 | void r300_mc_fini(struct radeon_device *rdev); | 116 | extern void r300_ring_start(struct radeon_device *rdev); |
| 140 | void r300_ring_start(struct radeon_device *rdev); | 117 | extern void r300_fence_ring_emit(struct radeon_device *rdev, |
| 141 | void r300_fence_ring_emit(struct radeon_device *rdev, | 118 | struct radeon_fence *fence); |
| 142 | struct radeon_fence *fence); | 119 | extern int r300_cs_parse(struct radeon_cs_parser *p); |
| 143 | int r300_cs_parse(struct radeon_cs_parser *p); | 120 | extern void rv370_pcie_gart_tlb_flush(struct radeon_device *rdev); |
| 144 | int rv370_pcie_gart_init(struct radeon_device *rdev); | 121 | extern int rv370_pcie_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr); |
| 145 | void rv370_pcie_gart_fini(struct radeon_device *rdev); | 122 | extern uint32_t rv370_pcie_rreg(struct radeon_device *rdev, uint32_t reg); |
| 146 | int rv370_pcie_gart_enable(struct radeon_device *rdev); | 123 | extern void rv370_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); |
| 147 | void rv370_pcie_gart_disable(struct radeon_device *rdev); | 124 | extern void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes); |
| 148 | void rv370_pcie_gart_tlb_flush(struct radeon_device *rdev); | 125 | extern int r300_copy_dma(struct radeon_device *rdev, |
| 149 | int rv370_pcie_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr); | 126 | uint64_t src_offset, |
| 150 | uint32_t rv370_pcie_rreg(struct radeon_device *rdev, uint32_t reg); | 127 | uint64_t dst_offset, |
| 151 | void rv370_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); | 128 | unsigned num_pages, |
| 152 | void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes); | 129 | struct radeon_fence *fence); |
| 153 | int r300_copy_dma(struct radeon_device *rdev, | ||
| 154 | uint64_t src_offset, | ||
| 155 | uint64_t dst_offset, | ||
| 156 | unsigned num_pages, | ||
| 157 | struct radeon_fence *fence); | ||
| 158 | |||
| 159 | static struct radeon_asic r300_asic = { | 130 | static struct radeon_asic r300_asic = { |
| 160 | .init = &r300_init, | 131 | .init = &r300_init, |
| 161 | .errata = &r300_errata, | 132 | .fini = &r300_fini, |
| 162 | .vram_info = &r300_vram_info, | 133 | .suspend = &r300_suspend, |
| 134 | .resume = &r300_resume, | ||
| 163 | .vga_set_state = &r100_vga_set_state, | 135 | .vga_set_state = &r100_vga_set_state, |
| 164 | .gpu_reset = &r300_gpu_reset, | 136 | .gpu_reset = &r300_gpu_reset, |
| 165 | .mc_init = &r300_mc_init, | ||
| 166 | .mc_fini = &r300_mc_fini, | ||
| 167 | .wb_init = &r100_wb_init, | ||
| 168 | .wb_fini = &r100_wb_fini, | ||
| 169 | .gart_init = &r100_pci_gart_init, | ||
| 170 | .gart_fini = &r100_pci_gart_fini, | ||
| 171 | .gart_enable = &r100_pci_gart_enable, | ||
| 172 | .gart_disable = &r100_pci_gart_disable, | ||
| 173 | .gart_tlb_flush = &r100_pci_gart_tlb_flush, | 137 | .gart_tlb_flush = &r100_pci_gart_tlb_flush, |
| 174 | .gart_set_page = &r100_pci_gart_set_page, | 138 | .gart_set_page = &r100_pci_gart_set_page, |
| 175 | .cp_init = &r100_cp_init, | ||
| 176 | .cp_fini = &r100_cp_fini, | ||
| 177 | .cp_disable = &r100_cp_disable, | ||
| 178 | .cp_commit = &r100_cp_commit, | 139 | .cp_commit = &r100_cp_commit, |
| 179 | .ring_start = &r300_ring_start, | 140 | .ring_start = &r300_ring_start, |
| 180 | .ring_test = &r100_ring_test, | 141 | .ring_test = &r100_ring_test, |
| 181 | .ring_ib_execute = &r100_ring_ib_execute, | 142 | .ring_ib_execute = &r100_ring_ib_execute, |
| 182 | .ib_test = &r100_ib_test, | ||
| 183 | .irq_set = &r100_irq_set, | 143 | .irq_set = &r100_irq_set, |
| 184 | .irq_process = &r100_irq_process, | 144 | .irq_process = &r100_irq_process, |
| 185 | .get_vblank_counter = &r100_get_vblank_counter, | 145 | .get_vblank_counter = &r100_get_vblank_counter, |
| @@ -209,26 +169,14 @@ static struct radeon_asic r420_asic = { | |||
| 209 | .fini = &r420_fini, | 169 | .fini = &r420_fini, |
| 210 | .suspend = &r420_suspend, | 170 | .suspend = &r420_suspend, |
| 211 | .resume = &r420_resume, | 171 | .resume = &r420_resume, |
| 212 | .errata = NULL, | ||
| 213 | .vram_info = NULL, | ||
| 214 | .vga_set_state = &r100_vga_set_state, | 172 | .vga_set_state = &r100_vga_set_state, |
| 215 | .gpu_reset = &r300_gpu_reset, | 173 | .gpu_reset = &r300_gpu_reset, |
| 216 | .mc_init = NULL, | ||
| 217 | .mc_fini = NULL, | ||
| 218 | .wb_init = NULL, | ||
| 219 | .wb_fini = NULL, | ||
| 220 | .gart_enable = NULL, | ||
| 221 | .gart_disable = NULL, | ||
| 222 | .gart_tlb_flush = &rv370_pcie_gart_tlb_flush, | 174 | .gart_tlb_flush = &rv370_pcie_gart_tlb_flush, |
| 223 | .gart_set_page = &rv370_pcie_gart_set_page, | 175 | .gart_set_page = &rv370_pcie_gart_set_page, |
| 224 | .cp_init = NULL, | ||
| 225 | .cp_fini = NULL, | ||
| 226 | .cp_disable = NULL, | ||
| 227 | .cp_commit = &r100_cp_commit, | 176 | .cp_commit = &r100_cp_commit, |
| 228 | .ring_start = &r300_ring_start, | 177 | .ring_start = &r300_ring_start, |
| 229 | .ring_test = &r100_ring_test, | 178 | .ring_test = &r100_ring_test, |
| 230 | .ring_ib_execute = &r100_ring_ib_execute, | 179 | .ring_ib_execute = &r100_ring_ib_execute, |
| 231 | .ib_test = NULL, | ||
| 232 | .irq_set = &r100_irq_set, | 180 | .irq_set = &r100_irq_set, |
| 233 | .irq_process = &r100_irq_process, | 181 | .irq_process = &r100_irq_process, |
| 234 | .get_vblank_counter = &r100_get_vblank_counter, | 182 | .get_vblank_counter = &r100_get_vblank_counter, |
| @@ -250,42 +198,27 @@ static struct radeon_asic r420_asic = { | |||
| 250 | /* | 198 | /* |
| 251 | * rs400,rs480 | 199 | * rs400,rs480 |
| 252 | */ | 200 | */ |
| 253 | void rs400_errata(struct radeon_device *rdev); | 201 | extern int rs400_init(struct radeon_device *rdev); |
| 254 | void rs400_vram_info(struct radeon_device *rdev); | 202 | extern void rs400_fini(struct radeon_device *rdev); |
| 255 | int rs400_mc_init(struct radeon_device *rdev); | 203 | extern int rs400_suspend(struct radeon_device *rdev); |
| 256 | void rs400_mc_fini(struct radeon_device *rdev); | 204 | extern int rs400_resume(struct radeon_device *rdev); |
| 257 | int rs400_gart_init(struct radeon_device *rdev); | ||
| 258 | void rs400_gart_fini(struct radeon_device *rdev); | ||
| 259 | int rs400_gart_enable(struct radeon_device *rdev); | ||
| 260 | void rs400_gart_disable(struct radeon_device *rdev); | ||
| 261 | void rs400_gart_tlb_flush(struct radeon_device *rdev); | 205 | void rs400_gart_tlb_flush(struct radeon_device *rdev); |
| 262 | int rs400_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr); | 206 | int rs400_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr); |
| 263 | uint32_t rs400_mc_rreg(struct radeon_device *rdev, uint32_t reg); | 207 | uint32_t rs400_mc_rreg(struct radeon_device *rdev, uint32_t reg); |
| 264 | void rs400_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); | 208 | void rs400_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); |
| 265 | static struct radeon_asic rs400_asic = { | 209 | static struct radeon_asic rs400_asic = { |
| 266 | .init = &r300_init, | 210 | .init = &rs400_init, |
| 267 | .errata = &rs400_errata, | 211 | .fini = &rs400_fini, |
| 268 | .vram_info = &rs400_vram_info, | 212 | .suspend = &rs400_suspend, |
| 213 | .resume = &rs400_resume, | ||
| 269 | .vga_set_state = &r100_vga_set_state, | 214 | .vga_set_state = &r100_vga_set_state, |
| 270 | .gpu_reset = &r300_gpu_reset, | 215 | .gpu_reset = &r300_gpu_reset, |
| 271 | .mc_init = &rs400_mc_init, | ||
| 272 | .mc_fini = &rs400_mc_fini, | ||
| 273 | .wb_init = &r100_wb_init, | ||
| 274 | .wb_fini = &r100_wb_fini, | ||
| 275 | .gart_init = &rs400_gart_init, | ||
| 276 | .gart_fini = &rs400_gart_fini, | ||
| 277 | .gart_enable = &rs400_gart_enable, | ||
| 278 | .gart_disable = &rs400_gart_disable, | ||
| 279 | .gart_tlb_flush = &rs400_gart_tlb_flush, | 216 | .gart_tlb_flush = &rs400_gart_tlb_flush, |
| 280 | .gart_set_page = &rs400_gart_set_page, | 217 | .gart_set_page = &rs400_gart_set_page, |
| 281 | .cp_init = &r100_cp_init, | ||
| 282 | .cp_fini = &r100_cp_fini, | ||
| 283 | .cp_disable = &r100_cp_disable, | ||
| 284 | .cp_commit = &r100_cp_commit, | 218 | .cp_commit = &r100_cp_commit, |
| 285 | .ring_start = &r300_ring_start, | 219 | .ring_start = &r300_ring_start, |
| 286 | .ring_test = &r100_ring_test, | 220 | .ring_test = &r100_ring_test, |
| 287 | .ring_ib_execute = &r100_ring_ib_execute, | 221 | .ring_ib_execute = &r100_ring_ib_execute, |
| 288 | .ib_test = &r100_ib_test, | ||
| 289 | .irq_set = &r100_irq_set, | 222 | .irq_set = &r100_irq_set, |
| 290 | .irq_process = &r100_irq_process, | 223 | .irq_process = &r100_irq_process, |
| 291 | .get_vblank_counter = &r100_get_vblank_counter, | 224 | .get_vblank_counter = &r100_get_vblank_counter, |
| @@ -307,18 +240,13 @@ static struct radeon_asic rs400_asic = { | |||
| 307 | /* | 240 | /* |
| 308 | * rs600. | 241 | * rs600. |
| 309 | */ | 242 | */ |
| 310 | int rs600_init(struct radeon_device *rdev); | 243 | extern int rs600_init(struct radeon_device *rdev); |
| 311 | void rs600_errata(struct radeon_device *rdev); | 244 | extern void rs600_fini(struct radeon_device *rdev); |
| 312 | void rs600_vram_info(struct radeon_device *rdev); | 245 | extern int rs600_suspend(struct radeon_device *rdev); |
| 313 | int rs600_mc_init(struct radeon_device *rdev); | 246 | extern int rs600_resume(struct radeon_device *rdev); |
| 314 | void rs600_mc_fini(struct radeon_device *rdev); | ||
| 315 | int rs600_irq_set(struct radeon_device *rdev); | 247 | int rs600_irq_set(struct radeon_device *rdev); |
| 316 | int rs600_irq_process(struct radeon_device *rdev); | 248 | int rs600_irq_process(struct radeon_device *rdev); |
| 317 | u32 rs600_get_vblank_counter(struct radeon_device *rdev, int crtc); | 249 | u32 rs600_get_vblank_counter(struct radeon_device *rdev, int crtc); |
| 318 | int rs600_gart_init(struct radeon_device *rdev); | ||
| 319 | void rs600_gart_fini(struct radeon_device *rdev); | ||
| 320 | int rs600_gart_enable(struct radeon_device *rdev); | ||
| 321 | void rs600_gart_disable(struct radeon_device *rdev); | ||
| 322 | void rs600_gart_tlb_flush(struct radeon_device *rdev); | 250 | void rs600_gart_tlb_flush(struct radeon_device *rdev); |
| 323 | int rs600_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr); | 251 | int rs600_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr); |
| 324 | uint32_t rs600_mc_rreg(struct radeon_device *rdev, uint32_t reg); | 252 | uint32_t rs600_mc_rreg(struct radeon_device *rdev, uint32_t reg); |
| @@ -326,28 +254,17 @@ void rs600_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); | |||
| 326 | void rs600_bandwidth_update(struct radeon_device *rdev); | 254 | void rs600_bandwidth_update(struct radeon_device *rdev); |
| 327 | static struct radeon_asic rs600_asic = { | 255 | static struct radeon_asic rs600_asic = { |
| 328 | .init = &rs600_init, | 256 | .init = &rs600_init, |
| 329 | .errata = &rs600_errata, | 257 | .fini = &rs600_fini, |
| 330 | .vram_info = &rs600_vram_info, | 258 | .suspend = &rs600_suspend, |
| 259 | .resume = &rs600_resume, | ||
| 331 | .vga_set_state = &r100_vga_set_state, | 260 | .vga_set_state = &r100_vga_set_state, |
| 332 | .gpu_reset = &r300_gpu_reset, | 261 | .gpu_reset = &r300_gpu_reset, |
| 333 | .mc_init = &rs600_mc_init, | ||
| 334 | .mc_fini = &rs600_mc_fini, | ||
| 335 | .wb_init = &r100_wb_init, | ||
| 336 | .wb_fini = &r100_wb_fini, | ||
| 337 | .gart_init = &rs600_gart_init, | ||
| 338 | .gart_fini = &rs600_gart_fini, | ||
| 339 | .gart_enable = &rs600_gart_enable, | ||
| 340 | .gart_disable = &rs600_gart_disable, | ||
| 341 | .gart_tlb_flush = &rs600_gart_tlb_flush, | 262 | .gart_tlb_flush = &rs600_gart_tlb_flush, |
| 342 | .gart_set_page = &rs600_gart_set_page, | 263 | .gart_set_page = &rs600_gart_set_page, |
| 343 | .cp_init = &r100_cp_init, | ||
| 344 | .cp_fini = &r100_cp_fini, | ||
| 345 | .cp_disable = &r100_cp_disable, | ||
| 346 | .cp_commit = &r100_cp_commit, | 264 | .cp_commit = &r100_cp_commit, |
| 347 | .ring_start = &r300_ring_start, | 265 | .ring_start = &r300_ring_start, |
| 348 | .ring_test = &r100_ring_test, | 266 | .ring_test = &r100_ring_test, |
| 349 | .ring_ib_execute = &r100_ring_ib_execute, | 267 | .ring_ib_execute = &r100_ring_ib_execute, |
| 350 | .ib_test = &r100_ib_test, | ||
| 351 | .irq_set = &rs600_irq_set, | 268 | .irq_set = &rs600_irq_set, |
| 352 | .irq_process = &rs600_irq_process, | 269 | .irq_process = &rs600_irq_process, |
| 353 | .get_vblank_counter = &rs600_get_vblank_counter, | 270 | .get_vblank_counter = &rs600_get_vblank_counter, |
| @@ -367,37 +284,26 @@ static struct radeon_asic rs600_asic = { | |||
| 367 | /* | 284 | /* |
| 368 | * rs690,rs740 | 285 | * rs690,rs740 |
| 369 | */ | 286 | */ |
| 370 | void rs690_errata(struct radeon_device *rdev); | 287 | int rs690_init(struct radeon_device *rdev); |
| 371 | void rs690_vram_info(struct radeon_device *rdev); | 288 | void rs690_fini(struct radeon_device *rdev); |
| 372 | int rs690_mc_init(struct radeon_device *rdev); | 289 | int rs690_resume(struct radeon_device *rdev); |
| 373 | void rs690_mc_fini(struct radeon_device *rdev); | 290 | int rs690_suspend(struct radeon_device *rdev); |
| 374 | uint32_t rs690_mc_rreg(struct radeon_device *rdev, uint32_t reg); | 291 | uint32_t rs690_mc_rreg(struct radeon_device *rdev, uint32_t reg); |
| 375 | void rs690_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); | 292 | void rs690_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); |
| 376 | void rs690_bandwidth_update(struct radeon_device *rdev); | 293 | void rs690_bandwidth_update(struct radeon_device *rdev); |
| 377 | static struct radeon_asic rs690_asic = { | 294 | static struct radeon_asic rs690_asic = { |
| 378 | .init = &rs600_init, | 295 | .init = &rs690_init, |
| 379 | .errata = &rs690_errata, | 296 | .fini = &rs690_fini, |
| 380 | .vram_info = &rs690_vram_info, | 297 | .suspend = &rs690_suspend, |
| 298 | .resume = &rs690_resume, | ||
| 381 | .vga_set_state = &r100_vga_set_state, | 299 | .vga_set_state = &r100_vga_set_state, |
| 382 | .gpu_reset = &r300_gpu_reset, | 300 | .gpu_reset = &r300_gpu_reset, |
| 383 | .mc_init = &rs690_mc_init, | ||
| 384 | .mc_fini = &rs690_mc_fini, | ||
| 385 | .wb_init = &r100_wb_init, | ||
| 386 | .wb_fini = &r100_wb_fini, | ||
| 387 | .gart_init = &rs400_gart_init, | ||
| 388 | .gart_fini = &rs400_gart_fini, | ||
| 389 | .gart_enable = &rs400_gart_enable, | ||
| 390 | .gart_disable = &rs400_gart_disable, | ||
| 391 | .gart_tlb_flush = &rs400_gart_tlb_flush, | 301 | .gart_tlb_flush = &rs400_gart_tlb_flush, |
| 392 | .gart_set_page = &rs400_gart_set_page, | 302 | .gart_set_page = &rs400_gart_set_page, |
| 393 | .cp_init = &r100_cp_init, | ||
| 394 | .cp_fini = &r100_cp_fini, | ||
| 395 | .cp_disable = &r100_cp_disable, | ||
| 396 | .cp_commit = &r100_cp_commit, | 303 | .cp_commit = &r100_cp_commit, |
| 397 | .ring_start = &r300_ring_start, | 304 | .ring_start = &r300_ring_start, |
| 398 | .ring_test = &r100_ring_test, | 305 | .ring_test = &r100_ring_test, |
| 399 | .ring_ib_execute = &r100_ring_ib_execute, | 306 | .ring_ib_execute = &r100_ring_ib_execute, |
| 400 | .ib_test = &r100_ib_test, | ||
| 401 | .irq_set = &rs600_irq_set, | 307 | .irq_set = &rs600_irq_set, |
| 402 | .irq_process = &rs600_irq_process, | 308 | .irq_process = &rs600_irq_process, |
| 403 | .get_vblank_counter = &rs600_get_vblank_counter, | 309 | .get_vblank_counter = &rs600_get_vblank_counter, |
| @@ -435,28 +341,14 @@ static struct radeon_asic rv515_asic = { | |||
| 435 | .fini = &rv515_fini, | 341 | .fini = &rv515_fini, |
| 436 | .suspend = &rv515_suspend, | 342 | .suspend = &rv515_suspend, |
| 437 | .resume = &rv515_resume, | 343 | .resume = &rv515_resume, |
| 438 | .errata = NULL, | ||
| 439 | .vram_info = NULL, | ||
| 440 | .vga_set_state = &r100_vga_set_state, | 344 | .vga_set_state = &r100_vga_set_state, |
| 441 | .gpu_reset = &rv515_gpu_reset, | 345 | .gpu_reset = &rv515_gpu_reset, |
| 442 | .mc_init = NULL, | ||
| 443 | .mc_fini = NULL, | ||
| 444 | .wb_init = NULL, | ||
| 445 | .wb_fini = NULL, | ||
| 446 | .gart_init = &rv370_pcie_gart_init, | ||
| 447 | .gart_fini = &rv370_pcie_gart_fini, | ||
| 448 | .gart_enable = NULL, | ||
| 449 | .gart_disable = NULL, | ||
| 450 | .gart_tlb_flush = &rv370_pcie_gart_tlb_flush, | 346 | .gart_tlb_flush = &rv370_pcie_gart_tlb_flush, |
| 451 | .gart_set_page = &rv370_pcie_gart_set_page, | 347 | .gart_set_page = &rv370_pcie_gart_set_page, |
| 452 | .cp_init = NULL, | ||
| 453 | .cp_fini = NULL, | ||
| 454 | .cp_disable = NULL, | ||
| 455 | .cp_commit = &r100_cp_commit, | 348 | .cp_commit = &r100_cp_commit, |
| 456 | .ring_start = &rv515_ring_start, | 349 | .ring_start = &rv515_ring_start, |
| 457 | .ring_test = &r100_ring_test, | 350 | .ring_test = &r100_ring_test, |
| 458 | .ring_ib_execute = &r100_ring_ib_execute, | 351 | .ring_ib_execute = &r100_ring_ib_execute, |
| 459 | .ib_test = NULL, | ||
| 460 | .irq_set = &rs600_irq_set, | 352 | .irq_set = &rs600_irq_set, |
| 461 | .irq_process = &rs600_irq_process, | 353 | .irq_process = &rs600_irq_process, |
| 462 | .get_vblank_counter = &rs600_get_vblank_counter, | 354 | .get_vblank_counter = &rs600_get_vblank_counter, |
| @@ -485,28 +377,14 @@ static struct radeon_asic r520_asic = { | |||
| 485 | .fini = &rv515_fini, | 377 | .fini = &rv515_fini, |
| 486 | .suspend = &rv515_suspend, | 378 | .suspend = &rv515_suspend, |
| 487 | .resume = &r520_resume, | 379 | .resume = &r520_resume, |
| 488 | .errata = NULL, | ||
| 489 | .vram_info = NULL, | ||
| 490 | .vga_set_state = &r100_vga_set_state, | 380 | .vga_set_state = &r100_vga_set_state, |
| 491 | .gpu_reset = &rv515_gpu_reset, | 381 | .gpu_reset = &rv515_gpu_reset, |
| 492 | .mc_init = NULL, | ||
| 493 | .mc_fini = NULL, | ||
| 494 | .wb_init = NULL, | ||
| 495 | .wb_fini = NULL, | ||
| 496 | .gart_init = NULL, | ||
| 497 | .gart_fini = NULL, | ||
| 498 | .gart_enable = NULL, | ||
| 499 | .gart_disable = NULL, | ||
| 500 | .gart_tlb_flush = &rv370_pcie_gart_tlb_flush, | 382 | .gart_tlb_flush = &rv370_pcie_gart_tlb_flush, |
| 501 | .gart_set_page = &rv370_pcie_gart_set_page, | 383 | .gart_set_page = &rv370_pcie_gart_set_page, |
| 502 | .cp_init = NULL, | ||
| 503 | .cp_fini = NULL, | ||
| 504 | .cp_disable = NULL, | ||
| 505 | .cp_commit = &r100_cp_commit, | 384 | .cp_commit = &r100_cp_commit, |
| 506 | .ring_start = &rv515_ring_start, | 385 | .ring_start = &rv515_ring_start, |
| 507 | .ring_test = &r100_ring_test, | 386 | .ring_test = &r100_ring_test, |
| 508 | .ring_ib_execute = &r100_ring_ib_execute, | 387 | .ring_ib_execute = &r100_ring_ib_execute, |
| 509 | .ib_test = NULL, | ||
| 510 | .irq_set = &rs600_irq_set, | 388 | .irq_set = &rs600_irq_set, |
| 511 | .irq_process = &rs600_irq_process, | 389 | .irq_process = &rs600_irq_process, |
| 512 | .get_vblank_counter = &rs600_get_vblank_counter, | 390 | .get_vblank_counter = &rs600_get_vblank_counter, |
| @@ -554,37 +432,23 @@ int r600_set_surface_reg(struct radeon_device *rdev, int reg, | |||
| 554 | uint32_t offset, uint32_t obj_size); | 432 | uint32_t offset, uint32_t obj_size); |
| 555 | int r600_clear_surface_reg(struct radeon_device *rdev, int reg); | 433 | int r600_clear_surface_reg(struct radeon_device *rdev, int reg); |
| 556 | void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib); | 434 | void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib); |
| 557 | int r600_ib_test(struct radeon_device *rdev); | ||
| 558 | int r600_ring_test(struct radeon_device *rdev); | 435 | int r600_ring_test(struct radeon_device *rdev); |
| 559 | int r600_copy_blit(struct radeon_device *rdev, | 436 | int r600_copy_blit(struct radeon_device *rdev, |
| 560 | uint64_t src_offset, uint64_t dst_offset, | 437 | uint64_t src_offset, uint64_t dst_offset, |
| 561 | unsigned num_pages, struct radeon_fence *fence); | 438 | unsigned num_pages, struct radeon_fence *fence); |
| 562 | 439 | ||
| 563 | static struct radeon_asic r600_asic = { | 440 | static struct radeon_asic r600_asic = { |
| 564 | .errata = NULL, | ||
| 565 | .init = &r600_init, | 441 | .init = &r600_init, |
| 566 | .fini = &r600_fini, | 442 | .fini = &r600_fini, |
| 567 | .suspend = &r600_suspend, | 443 | .suspend = &r600_suspend, |
| 568 | .resume = &r600_resume, | 444 | .resume = &r600_resume, |
| 569 | .cp_commit = &r600_cp_commit, | 445 | .cp_commit = &r600_cp_commit, |
| 570 | .vram_info = NULL, | ||
| 571 | .vga_set_state = &r600_vga_set_state, | 446 | .vga_set_state = &r600_vga_set_state, |
| 572 | .gpu_reset = &r600_gpu_reset, | 447 | .gpu_reset = &r600_gpu_reset, |
| 573 | .mc_init = NULL, | ||
| 574 | .mc_fini = NULL, | ||
| 575 | .wb_init = &r600_wb_init, | ||
| 576 | .wb_fini = &r600_wb_fini, | ||
| 577 | .gart_enable = NULL, | ||
| 578 | .gart_disable = NULL, | ||
| 579 | .gart_tlb_flush = &r600_pcie_gart_tlb_flush, | 448 | .gart_tlb_flush = &r600_pcie_gart_tlb_flush, |
| 580 | .gart_set_page = &rs600_gart_set_page, | 449 | .gart_set_page = &rs600_gart_set_page, |
| 581 | .cp_init = NULL, | ||
| 582 | .cp_fini = NULL, | ||
| 583 | .cp_disable = NULL, | ||
| 584 | .ring_start = NULL, | ||
| 585 | .ring_test = &r600_ring_test, | 450 | .ring_test = &r600_ring_test, |
| 586 | .ring_ib_execute = &r600_ring_ib_execute, | 451 | .ring_ib_execute = &r600_ring_ib_execute, |
| 587 | .ib_test = &r600_ib_test, | ||
| 588 | .irq_set = &r600_irq_set, | 452 | .irq_set = &r600_irq_set, |
| 589 | .irq_process = &r600_irq_process, | 453 | .irq_process = &r600_irq_process, |
| 590 | .fence_ring_emit = &r600_fence_ring_emit, | 454 | .fence_ring_emit = &r600_fence_ring_emit, |
| @@ -611,30 +475,17 @@ int rv770_resume(struct radeon_device *rdev); | |||
| 611 | int rv770_gpu_reset(struct radeon_device *rdev); | 475 | int rv770_gpu_reset(struct radeon_device *rdev); |
| 612 | 476 | ||
| 613 | static struct radeon_asic rv770_asic = { | 477 | static struct radeon_asic rv770_asic = { |
| 614 | .errata = NULL, | ||
| 615 | .init = &rv770_init, | 478 | .init = &rv770_init, |
| 616 | .fini = &rv770_fini, | 479 | .fini = &rv770_fini, |
| 617 | .suspend = &rv770_suspend, | 480 | .suspend = &rv770_suspend, |
| 618 | .resume = &rv770_resume, | 481 | .resume = &rv770_resume, |
| 619 | .cp_commit = &r600_cp_commit, | 482 | .cp_commit = &r600_cp_commit, |
| 620 | .vram_info = NULL, | ||
| 621 | .gpu_reset = &rv770_gpu_reset, | 483 | .gpu_reset = &rv770_gpu_reset, |
| 622 | .vga_set_state = &r600_vga_set_state, | 484 | .vga_set_state = &r600_vga_set_state, |
| 623 | .mc_init = NULL, | ||
| 624 | .mc_fini = NULL, | ||
| 625 | .wb_init = &r600_wb_init, | ||
| 626 | .wb_fini = &r600_wb_fini, | ||
| 627 | .gart_enable = NULL, | ||
| 628 | .gart_disable = NULL, | ||
| 629 | .gart_tlb_flush = &r600_pcie_gart_tlb_flush, | 485 | .gart_tlb_flush = &r600_pcie_gart_tlb_flush, |
| 630 | .gart_set_page = &rs600_gart_set_page, | 486 | .gart_set_page = &rs600_gart_set_page, |
| 631 | .cp_init = NULL, | ||
| 632 | .cp_fini = NULL, | ||
| 633 | .cp_disable = NULL, | ||
| 634 | .ring_start = NULL, | ||
| 635 | .ring_test = &r600_ring_test, | 487 | .ring_test = &r600_ring_test, |
| 636 | .ring_ib_execute = &r600_ring_ib_execute, | 488 | .ring_ib_execute = &r600_ring_ib_execute, |
| 637 | .ib_test = &r600_ib_test, | ||
| 638 | .irq_set = &r600_irq_set, | 489 | .irq_set = &r600_irq_set, |
| 639 | .irq_process = &r600_irq_process, | 490 | .irq_process = &r600_irq_process, |
| 640 | .fence_ring_emit = &r600_fence_ring_emit, | 491 | .fence_ring_emit = &r600_fence_ring_emit, |
diff --git a/drivers/gpu/drm/radeon/radeon_bios.c b/drivers/gpu/drm/radeon/radeon_bios.c index 96e37a6e7ce4..34a9b9119518 100644 --- a/drivers/gpu/drm/radeon/radeon_bios.c +++ b/drivers/gpu/drm/radeon/radeon_bios.c | |||
| @@ -33,12 +33,50 @@ | |||
| 33 | /* | 33 | /* |
| 34 | * BIOS. | 34 | * BIOS. |
| 35 | */ | 35 | */ |
| 36 | |||
| 37 | /* If you boot an IGP board with a discrete card as the primary, | ||
| 38 | * the IGP rom is not accessible via the rom bar as the IGP rom is | ||
| 39 | * part of the system bios. On boot, the system bios puts a | ||
| 40 | * copy of the igp rom at the start of vram if a discrete card is | ||
| 41 | * present. | ||
| 42 | */ | ||
| 43 | static bool igp_read_bios_from_vram(struct radeon_device *rdev) | ||
| 44 | { | ||
| 45 | uint8_t __iomem *bios; | ||
| 46 | resource_size_t vram_base; | ||
| 47 | resource_size_t size = 256 * 1024; /* ??? */ | ||
| 48 | |||
| 49 | rdev->bios = NULL; | ||
| 50 | vram_base = drm_get_resource_start(rdev->ddev, 0); | ||
| 51 | bios = ioremap(vram_base, size); | ||
| 52 | if (!bios) { | ||
| 53 | DRM_ERROR("Unable to mmap vram\n"); | ||
| 54 | return false; | ||
| 55 | } | ||
| 56 | |||
| 57 | if (size == 0 || bios[0] != 0x55 || bios[1] != 0xaa) { | ||
| 58 | iounmap(bios); | ||
| 59 | DRM_ERROR("bad rom signature\n"); | ||
| 60 | return false; | ||
| 61 | } | ||
| 62 | rdev->bios = kmalloc(size, GFP_KERNEL); | ||
| 63 | if (rdev->bios == NULL) { | ||
| 64 | iounmap(bios); | ||
| 65 | DRM_ERROR("kmalloc failed\n"); | ||
| 66 | return false; | ||
| 67 | } | ||
| 68 | memcpy(rdev->bios, bios, size); | ||
| 69 | iounmap(bios); | ||
| 70 | return true; | ||
| 71 | } | ||
| 72 | |||
| 36 | static bool radeon_read_bios(struct radeon_device *rdev) | 73 | static bool radeon_read_bios(struct radeon_device *rdev) |
| 37 | { | 74 | { |
| 38 | uint8_t __iomem *bios; | 75 | uint8_t __iomem *bios; |
| 39 | size_t size; | 76 | size_t size; |
| 40 | 77 | ||
| 41 | rdev->bios = NULL; | 78 | rdev->bios = NULL; |
| 79 | /* XXX: some cards may return 0 for rom size? ddx has a workaround */ | ||
| 42 | bios = pci_map_rom(rdev->pdev, &size); | 80 | bios = pci_map_rom(rdev->pdev, &size); |
| 43 | if (!bios) { | 81 | if (!bios) { |
| 44 | return false; | 82 | return false; |
| @@ -341,7 +379,9 @@ static bool legacy_read_disabled_bios(struct radeon_device *rdev) | |||
| 341 | 379 | ||
| 342 | static bool radeon_read_disabled_bios(struct radeon_device *rdev) | 380 | static bool radeon_read_disabled_bios(struct radeon_device *rdev) |
| 343 | { | 381 | { |
| 344 | if (rdev->family >= CHIP_RV770) | 382 | if (rdev->flags & RADEON_IS_IGP) |
| 383 | return igp_read_bios_from_vram(rdev); | ||
| 384 | else if (rdev->family >= CHIP_RV770) | ||
| 345 | return r700_read_disabled_bios(rdev); | 385 | return r700_read_disabled_bios(rdev); |
| 346 | else if (rdev->family >= CHIP_R600) | 386 | else if (rdev->family >= CHIP_R600) |
| 347 | return r600_read_disabled_bios(rdev); | 387 | return r600_read_disabled_bios(rdev); |
| @@ -356,7 +396,12 @@ bool radeon_get_bios(struct radeon_device *rdev) | |||
| 356 | bool r; | 396 | bool r; |
| 357 | uint16_t tmp; | 397 | uint16_t tmp; |
| 358 | 398 | ||
| 359 | r = radeon_read_bios(rdev); | 399 | if (rdev->flags & RADEON_IS_IGP) { |
| 400 | r = igp_read_bios_from_vram(rdev); | ||
| 401 | if (r == false) | ||
| 402 | r = radeon_read_bios(rdev); | ||
| 403 | } else | ||
| 404 | r = radeon_read_bios(rdev); | ||
| 360 | if (r == false) { | 405 | if (r == false) { |
| 361 | r = radeon_read_disabled_bios(rdev); | 406 | r = radeon_read_disabled_bios(rdev); |
| 362 | } | 407 | } |
diff --git a/drivers/gpu/drm/radeon/radeon_clocks.c b/drivers/gpu/drm/radeon/radeon_clocks.c index 152eef13197a..f5c32a766b10 100644 --- a/drivers/gpu/drm/radeon/radeon_clocks.c +++ b/drivers/gpu/drm/radeon/radeon_clocks.c | |||
| @@ -411,7 +411,7 @@ void radeon_legacy_set_clock_gating(struct radeon_device *rdev, int enable) | |||
| 411 | R300_PIXCLK_TRANS_ALWAYS_ONb | | 411 | R300_PIXCLK_TRANS_ALWAYS_ONb | |
| 412 | R300_PIXCLK_TVO_ALWAYS_ONb | | 412 | R300_PIXCLK_TVO_ALWAYS_ONb | |
| 413 | R300_P2G2CLK_ALWAYS_ONb | | 413 | R300_P2G2CLK_ALWAYS_ONb | |
| 414 | R300_P2G2CLK_ALWAYS_ONb); | 414 | R300_P2G2CLK_DAC_ALWAYS_ONb); |
| 415 | WREG32_PLL(RADEON_PIXCLKS_CNTL, tmp); | 415 | WREG32_PLL(RADEON_PIXCLKS_CNTL, tmp); |
| 416 | } else if (rdev->family >= CHIP_RV350) { | 416 | } else if (rdev->family >= CHIP_RV350) { |
| 417 | tmp = RREG32_PLL(R300_SCLK_CNTL2); | 417 | tmp = RREG32_PLL(R300_SCLK_CNTL2); |
| @@ -464,7 +464,7 @@ void radeon_legacy_set_clock_gating(struct radeon_device *rdev, int enable) | |||
| 464 | R300_PIXCLK_TRANS_ALWAYS_ONb | | 464 | R300_PIXCLK_TRANS_ALWAYS_ONb | |
| 465 | R300_PIXCLK_TVO_ALWAYS_ONb | | 465 | R300_PIXCLK_TVO_ALWAYS_ONb | |
| 466 | R300_P2G2CLK_ALWAYS_ONb | | 466 | R300_P2G2CLK_ALWAYS_ONb | |
| 467 | R300_P2G2CLK_ALWAYS_ONb); | 467 | R300_P2G2CLK_DAC_ALWAYS_ONb); |
| 468 | WREG32_PLL(RADEON_PIXCLKS_CNTL, tmp); | 468 | WREG32_PLL(RADEON_PIXCLKS_CNTL, tmp); |
| 469 | 469 | ||
| 470 | tmp = RREG32_PLL(RADEON_MCLK_MISC); | 470 | tmp = RREG32_PLL(RADEON_MCLK_MISC); |
| @@ -654,7 +654,7 @@ void radeon_legacy_set_clock_gating(struct radeon_device *rdev, int enable) | |||
| 654 | R300_PIXCLK_TRANS_ALWAYS_ONb | | 654 | R300_PIXCLK_TRANS_ALWAYS_ONb | |
| 655 | R300_PIXCLK_TVO_ALWAYS_ONb | | 655 | R300_PIXCLK_TVO_ALWAYS_ONb | |
| 656 | R300_P2G2CLK_ALWAYS_ONb | | 656 | R300_P2G2CLK_ALWAYS_ONb | |
| 657 | R300_P2G2CLK_ALWAYS_ONb | | 657 | R300_P2G2CLK_DAC_ALWAYS_ONb | |
| 658 | R300_DISP_DAC_PIXCLK_DAC2_BLANK_OFF); | 658 | R300_DISP_DAC_PIXCLK_DAC2_BLANK_OFF); |
| 659 | WREG32_PLL(RADEON_PIXCLKS_CNTL, tmp); | 659 | WREG32_PLL(RADEON_PIXCLKS_CNTL, tmp); |
| 660 | } else if (rdev->family >= CHIP_RV350) { | 660 | } else if (rdev->family >= CHIP_RV350) { |
| @@ -705,7 +705,7 @@ void radeon_legacy_set_clock_gating(struct radeon_device *rdev, int enable) | |||
| 705 | R300_PIXCLK_TRANS_ALWAYS_ONb | | 705 | R300_PIXCLK_TRANS_ALWAYS_ONb | |
| 706 | R300_PIXCLK_TVO_ALWAYS_ONb | | 706 | R300_PIXCLK_TVO_ALWAYS_ONb | |
| 707 | R300_P2G2CLK_ALWAYS_ONb | | 707 | R300_P2G2CLK_ALWAYS_ONb | |
| 708 | R300_P2G2CLK_ALWAYS_ONb | | 708 | R300_P2G2CLK_DAC_ALWAYS_ONb | |
| 709 | R300_DISP_DAC_PIXCLK_DAC2_BLANK_OFF); | 709 | R300_DISP_DAC_PIXCLK_DAC2_BLANK_OFF); |
| 710 | WREG32_PLL(RADEON_PIXCLKS_CNTL, tmp); | 710 | WREG32_PLL(RADEON_PIXCLKS_CNTL, tmp); |
| 711 | } else { | 711 | } else { |
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index ec835d56d30a..3d667031de6e 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c | |||
| @@ -322,10 +322,6 @@ int radeon_asic_init(struct radeon_device *rdev) | |||
| 322 | case CHIP_RV380: | 322 | case CHIP_RV380: |
| 323 | rdev->asic = &r300_asic; | 323 | rdev->asic = &r300_asic; |
| 324 | if (rdev->flags & RADEON_IS_PCIE) { | 324 | if (rdev->flags & RADEON_IS_PCIE) { |
| 325 | rdev->asic->gart_init = &rv370_pcie_gart_init; | ||
| 326 | rdev->asic->gart_fini = &rv370_pcie_gart_fini; | ||
| 327 | rdev->asic->gart_enable = &rv370_pcie_gart_enable; | ||
| 328 | rdev->asic->gart_disable = &rv370_pcie_gart_disable; | ||
| 329 | rdev->asic->gart_tlb_flush = &rv370_pcie_gart_tlb_flush; | 325 | rdev->asic->gart_tlb_flush = &rv370_pcie_gart_tlb_flush; |
| 330 | rdev->asic->gart_set_page = &rv370_pcie_gart_set_page; | 326 | rdev->asic->gart_set_page = &rv370_pcie_gart_set_page; |
| 331 | } | 327 | } |
| @@ -485,7 +481,6 @@ void radeon_combios_fini(struct radeon_device *rdev) | |||
| 485 | static unsigned int radeon_vga_set_decode(void *cookie, bool state) | 481 | static unsigned int radeon_vga_set_decode(void *cookie, bool state) |
| 486 | { | 482 | { |
| 487 | struct radeon_device *rdev = cookie; | 483 | struct radeon_device *rdev = cookie; |
| 488 | |||
| 489 | radeon_vga_set_state(rdev, state); | 484 | radeon_vga_set_state(rdev, state); |
| 490 | if (state) | 485 | if (state) |
| 491 | return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM | | 486 | return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM | |
| @@ -493,6 +488,29 @@ static unsigned int radeon_vga_set_decode(void *cookie, bool state) | |||
| 493 | else | 488 | else |
| 494 | return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; | 489 | return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; |
| 495 | } | 490 | } |
| 491 | |||
| 492 | void radeon_agp_disable(struct radeon_device *rdev) | ||
| 493 | { | ||
| 494 | rdev->flags &= ~RADEON_IS_AGP; | ||
| 495 | if (rdev->family >= CHIP_R600) { | ||
| 496 | DRM_INFO("Forcing AGP to PCIE mode\n"); | ||
| 497 | rdev->flags |= RADEON_IS_PCIE; | ||
| 498 | } else if (rdev->family >= CHIP_RV515 || | ||
| 499 | rdev->family == CHIP_RV380 || | ||
| 500 | rdev->family == CHIP_RV410 || | ||
| 501 | rdev->family == CHIP_R423) { | ||
| 502 | DRM_INFO("Forcing AGP to PCIE mode\n"); | ||
| 503 | rdev->flags |= RADEON_IS_PCIE; | ||
| 504 | rdev->asic->gart_tlb_flush = &rv370_pcie_gart_tlb_flush; | ||
| 505 | rdev->asic->gart_set_page = &rv370_pcie_gart_set_page; | ||
| 506 | } else { | ||
| 507 | DRM_INFO("Forcing AGP to PCI mode\n"); | ||
| 508 | rdev->flags |= RADEON_IS_PCI; | ||
| 509 | rdev->asic->gart_tlb_flush = &r100_pci_gart_tlb_flush; | ||
| 510 | rdev->asic->gart_set_page = &r100_pci_gart_set_page; | ||
| 511 | } | ||
| 512 | } | ||
| 513 | |||
| 496 | /* | 514 | /* |
| 497 | * Radeon device. | 515 | * Radeon device. |
| 498 | */ | 516 | */ |
| @@ -531,32 +549,7 @@ int radeon_device_init(struct radeon_device *rdev, | |||
| 531 | } | 549 | } |
| 532 | 550 | ||
| 533 | if (radeon_agpmode == -1) { | 551 | if (radeon_agpmode == -1) { |
| 534 | rdev->flags &= ~RADEON_IS_AGP; | 552 | radeon_agp_disable(rdev); |
| 535 | if (rdev->family >= CHIP_R600) { | ||
| 536 | DRM_INFO("Forcing AGP to PCIE mode\n"); | ||
| 537 | rdev->flags |= RADEON_IS_PCIE; | ||
| 538 | } else if (rdev->family >= CHIP_RV515 || | ||
| 539 | rdev->family == CHIP_RV380 || | ||
| 540 | rdev->family == CHIP_RV410 || | ||
| 541 | rdev->family == CHIP_R423) { | ||
| 542 | DRM_INFO("Forcing AGP to PCIE mode\n"); | ||
| 543 | rdev->flags |= RADEON_IS_PCIE; | ||
| 544 | rdev->asic->gart_init = &rv370_pcie_gart_init; | ||
| 545 | rdev->asic->gart_fini = &rv370_pcie_gart_fini; | ||
| 546 | rdev->asic->gart_enable = &rv370_pcie_gart_enable; | ||
| 547 | rdev->asic->gart_disable = &rv370_pcie_gart_disable; | ||
| 548 | rdev->asic->gart_tlb_flush = &rv370_pcie_gart_tlb_flush; | ||
| 549 | rdev->asic->gart_set_page = &rv370_pcie_gart_set_page; | ||
| 550 | } else { | ||
| 551 | DRM_INFO("Forcing AGP to PCI mode\n"); | ||
| 552 | rdev->flags |= RADEON_IS_PCI; | ||
| 553 | rdev->asic->gart_init = &r100_pci_gart_init; | ||
| 554 | rdev->asic->gart_fini = &r100_pci_gart_fini; | ||
| 555 | rdev->asic->gart_enable = &r100_pci_gart_enable; | ||
| 556 | rdev->asic->gart_disable = &r100_pci_gart_disable; | ||
| 557 | rdev->asic->gart_tlb_flush = &r100_pci_gart_tlb_flush; | ||
| 558 | rdev->asic->gart_set_page = &r100_pci_gart_set_page; | ||
| 559 | } | ||
| 560 | } | 553 | } |
| 561 | 554 | ||
| 562 | /* set DMA mask + need_dma32 flags. | 555 | /* set DMA mask + need_dma32 flags. |
| @@ -588,111 +581,27 @@ int radeon_device_init(struct radeon_device *rdev, | |||
| 588 | DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)rdev->rmmio_base); | 581 | DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)rdev->rmmio_base); |
| 589 | DRM_INFO("register mmio size: %u\n", (unsigned)rdev->rmmio_size); | 582 | DRM_INFO("register mmio size: %u\n", (unsigned)rdev->rmmio_size); |
| 590 | 583 | ||
| 591 | rdev->new_init_path = false; | ||
| 592 | r = radeon_init(rdev); | ||
| 593 | if (r) { | ||
| 594 | return r; | ||
| 595 | } | ||
| 596 | |||
| 597 | /* if we have > 1 VGA cards, then disable the radeon VGA resources */ | 584 | /* if we have > 1 VGA cards, then disable the radeon VGA resources */ |
| 598 | r = vga_client_register(rdev->pdev, rdev, NULL, radeon_vga_set_decode); | 585 | r = vga_client_register(rdev->pdev, rdev, NULL, radeon_vga_set_decode); |
| 599 | if (r) { | 586 | if (r) { |
| 600 | return -EINVAL; | 587 | return -EINVAL; |
| 601 | } | 588 | } |
| 602 | 589 | ||
| 603 | if (!rdev->new_init_path) { | 590 | r = radeon_init(rdev); |
| 604 | /* Setup errata flags */ | 591 | if (r) |
| 605 | radeon_errata(rdev); | 592 | return r; |
| 606 | /* Initialize scratch registers */ | ||
| 607 | radeon_scratch_init(rdev); | ||
| 608 | /* Initialize surface registers */ | ||
| 609 | radeon_surface_init(rdev); | ||
| 610 | |||
| 611 | /* BIOS*/ | ||
| 612 | if (!radeon_get_bios(rdev)) { | ||
| 613 | if (ASIC_IS_AVIVO(rdev)) | ||
| 614 | return -EINVAL; | ||
| 615 | } | ||
| 616 | if (rdev->is_atom_bios) { | ||
| 617 | r = radeon_atombios_init(rdev); | ||
| 618 | if (r) { | ||
| 619 | return r; | ||
| 620 | } | ||
| 621 | } else { | ||
| 622 | r = radeon_combios_init(rdev); | ||
| 623 | if (r) { | ||
| 624 | return r; | ||
| 625 | } | ||
| 626 | } | ||
| 627 | /* Reset gpu before posting otherwise ATOM will enter infinite loop */ | ||
| 628 | if (radeon_gpu_reset(rdev)) { | ||
| 629 | /* FIXME: what do we want to do here ? */ | ||
| 630 | } | ||
| 631 | /* check if cards are posted or not */ | ||
| 632 | if (!radeon_card_posted(rdev) && rdev->bios) { | ||
| 633 | DRM_INFO("GPU not posted. posting now...\n"); | ||
| 634 | if (rdev->is_atom_bios) { | ||
| 635 | atom_asic_init(rdev->mode_info.atom_context); | ||
| 636 | } else { | ||
| 637 | radeon_combios_asic_init(rdev->ddev); | ||
| 638 | } | ||
| 639 | } | ||
| 640 | /* Get clock & vram information */ | ||
| 641 | radeon_get_clock_info(rdev->ddev); | ||
| 642 | radeon_vram_info(rdev); | ||
| 643 | /* Initialize clocks */ | ||
| 644 | r = radeon_clocks_init(rdev); | ||
| 645 | if (r) { | ||
| 646 | return r; | ||
| 647 | } | ||
| 648 | 593 | ||
| 649 | /* Initialize memory controller (also test AGP) */ | 594 | if (rdev->flags & RADEON_IS_AGP && !rdev->accel_working) { |
| 650 | r = radeon_mc_init(rdev); | 595 | /* Acceleration not working on AGP card try again |
| 651 | if (r) { | 596 | * with fallback to PCI or PCIE GART |
| 652 | return r; | 597 | */ |
| 653 | } | 598 | radeon_gpu_reset(rdev); |
| 654 | /* Fence driver */ | 599 | radeon_fini(rdev); |
| 655 | r = radeon_fence_driver_init(rdev); | 600 | radeon_agp_disable(rdev); |
| 656 | if (r) { | 601 | r = radeon_init(rdev); |
| 657 | return r; | ||
| 658 | } | ||
| 659 | r = radeon_irq_kms_init(rdev); | ||
| 660 | if (r) { | ||
| 661 | return r; | ||
| 662 | } | ||
| 663 | /* Memory manager */ | ||
| 664 | r = radeon_object_init(rdev); | ||
| 665 | if (r) { | ||
| 666 | return r; | ||
| 667 | } | ||
| 668 | r = radeon_gpu_gart_init(rdev); | ||
| 669 | if (r) | 602 | if (r) |
| 670 | return r; | 603 | return r; |
| 671 | /* Initialize GART (initialize after TTM so we can allocate | ||
| 672 | * memory through TTM but finalize after TTM) */ | ||
| 673 | r = radeon_gart_enable(rdev); | ||
| 674 | if (r) | ||
| 675 | return 0; | ||
| 676 | r = radeon_gem_init(rdev); | ||
| 677 | if (r) | ||
| 678 | return 0; | ||
| 679 | |||
| 680 | /* 1M ring buffer */ | ||
| 681 | r = radeon_cp_init(rdev, 1024 * 1024); | ||
| 682 | if (r) | ||
| 683 | return 0; | ||
| 684 | r = radeon_wb_init(rdev); | ||
| 685 | if (r) | ||
| 686 | DRM_ERROR("radeon: failled initializing WB (%d).\n", r); | ||
| 687 | r = radeon_ib_pool_init(rdev); | ||
| 688 | if (r) | ||
| 689 | return 0; | ||
| 690 | r = radeon_ib_test(rdev); | ||
| 691 | if (r) | ||
| 692 | return 0; | ||
| 693 | rdev->accel_working = true; | ||
| 694 | } | 604 | } |
| 695 | DRM_INFO("radeon: kernel modesetting successfully initialized.\n"); | ||
| 696 | if (radeon_testing) { | 605 | if (radeon_testing) { |
| 697 | radeon_test_moves(rdev); | 606 | radeon_test_moves(rdev); |
| 698 | } | 607 | } |
| @@ -706,32 +615,8 @@ void radeon_device_fini(struct radeon_device *rdev) | |||
| 706 | { | 615 | { |
| 707 | DRM_INFO("radeon: finishing device.\n"); | 616 | DRM_INFO("radeon: finishing device.\n"); |
| 708 | rdev->shutdown = true; | 617 | rdev->shutdown = true; |
| 709 | /* Order matter so becarefull if you rearrange anythings */ | 618 | radeon_fini(rdev); |
| 710 | if (!rdev->new_init_path) { | 619 | vga_client_register(rdev->pdev, NULL, NULL, NULL); |
| 711 | radeon_ib_pool_fini(rdev); | ||
| 712 | radeon_cp_fini(rdev); | ||
| 713 | radeon_wb_fini(rdev); | ||
| 714 | radeon_gpu_gart_fini(rdev); | ||
| 715 | radeon_gem_fini(rdev); | ||
| 716 | radeon_mc_fini(rdev); | ||
| 717 | #if __OS_HAS_AGP | ||
| 718 | radeon_agp_fini(rdev); | ||
| 719 | #endif | ||
| 720 | radeon_irq_kms_fini(rdev); | ||
| 721 | vga_client_register(rdev->pdev, NULL, NULL, NULL); | ||
| 722 | radeon_fence_driver_fini(rdev); | ||
| 723 | radeon_clocks_fini(rdev); | ||
| 724 | radeon_object_fini(rdev); | ||
| 725 | if (rdev->is_atom_bios) { | ||
| 726 | radeon_atombios_fini(rdev); | ||
| 727 | } else { | ||
| 728 | radeon_combios_fini(rdev); | ||
| 729 | } | ||
| 730 | kfree(rdev->bios); | ||
| 731 | rdev->bios = NULL; | ||
| 732 | } else { | ||
| 733 | radeon_fini(rdev); | ||
| 734 | } | ||
| 735 | iounmap(rdev->rmmio); | 620 | iounmap(rdev->rmmio); |
| 736 | rdev->rmmio = NULL; | 621 | rdev->rmmio = NULL; |
| 737 | } | 622 | } |
| @@ -771,14 +656,7 @@ int radeon_suspend_kms(struct drm_device *dev, pm_message_t state) | |||
| 771 | 656 | ||
| 772 | radeon_save_bios_scratch_regs(rdev); | 657 | radeon_save_bios_scratch_regs(rdev); |
| 773 | 658 | ||
| 774 | if (!rdev->new_init_path) { | 659 | radeon_suspend(rdev); |
| 775 | radeon_cp_disable(rdev); | ||
| 776 | radeon_gart_disable(rdev); | ||
| 777 | rdev->irq.sw_int = false; | ||
| 778 | radeon_irq_set(rdev); | ||
| 779 | } else { | ||
| 780 | radeon_suspend(rdev); | ||
| 781 | } | ||
| 782 | /* evict remaining vram memory */ | 660 | /* evict remaining vram memory */ |
| 783 | radeon_object_evict_vram(rdev); | 661 | radeon_object_evict_vram(rdev); |
| 784 | 662 | ||
| @@ -797,7 +675,6 @@ int radeon_suspend_kms(struct drm_device *dev, pm_message_t state) | |||
| 797 | int radeon_resume_kms(struct drm_device *dev) | 675 | int radeon_resume_kms(struct drm_device *dev) |
| 798 | { | 676 | { |
| 799 | struct radeon_device *rdev = dev->dev_private; | 677 | struct radeon_device *rdev = dev->dev_private; |
| 800 | int r; | ||
| 801 | 678 | ||
| 802 | acquire_console_sem(); | 679 | acquire_console_sem(); |
| 803 | pci_set_power_state(dev->pdev, PCI_D0); | 680 | pci_set_power_state(dev->pdev, PCI_D0); |
| @@ -807,43 +684,7 @@ int radeon_resume_kms(struct drm_device *dev) | |||
| 807 | return -1; | 684 | return -1; |
| 808 | } | 685 | } |
| 809 | pci_set_master(dev->pdev); | 686 | pci_set_master(dev->pdev); |
| 810 | /* Reset gpu before posting otherwise ATOM will enter infinite loop */ | 687 | radeon_resume(rdev); |
| 811 | if (!rdev->new_init_path) { | ||
| 812 | if (radeon_gpu_reset(rdev)) { | ||
| 813 | /* FIXME: what do we want to do here ? */ | ||
| 814 | } | ||
| 815 | /* post card */ | ||
| 816 | if (rdev->is_atom_bios) { | ||
| 817 | atom_asic_init(rdev->mode_info.atom_context); | ||
| 818 | } else { | ||
| 819 | radeon_combios_asic_init(rdev->ddev); | ||
| 820 | } | ||
| 821 | /* Initialize clocks */ | ||
| 822 | r = radeon_clocks_init(rdev); | ||
| 823 | if (r) { | ||
| 824 | release_console_sem(); | ||
| 825 | return r; | ||
| 826 | } | ||
| 827 | /* Enable IRQ */ | ||
| 828 | rdev->irq.sw_int = true; | ||
| 829 | radeon_irq_set(rdev); | ||
| 830 | /* Initialize GPU Memory Controller */ | ||
| 831 | r = radeon_mc_init(rdev); | ||
| 832 | if (r) { | ||
| 833 | goto out; | ||
| 834 | } | ||
| 835 | r = radeon_gart_enable(rdev); | ||
| 836 | if (r) { | ||
| 837 | goto out; | ||
| 838 | } | ||
| 839 | r = radeon_cp_init(rdev, rdev->cp.ring_size); | ||
| 840 | if (r) { | ||
| 841 | goto out; | ||
| 842 | } | ||
| 843 | } else { | ||
| 844 | radeon_resume(rdev); | ||
| 845 | } | ||
| 846 | out: | ||
| 847 | radeon_restore_bios_scratch_regs(rdev); | 688 | radeon_restore_bios_scratch_regs(rdev); |
| 848 | fb_set_suspend(rdev->fbdev_info, 0); | 689 | fb_set_suspend(rdev->fbdev_info, 0); |
| 849 | release_console_sem(); | 690 | release_console_sem(); |
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c index 5d8141b13765..3655d91993a6 100644 --- a/drivers/gpu/drm/radeon/radeon_display.c +++ b/drivers/gpu/drm/radeon/radeon_display.c | |||
| @@ -106,24 +106,33 @@ void radeon_crtc_load_lut(struct drm_crtc *crtc) | |||
| 106 | legacy_crtc_load_lut(crtc); | 106 | legacy_crtc_load_lut(crtc); |
| 107 | } | 107 | } |
| 108 | 108 | ||
| 109 | /** Sets the color ramps on behalf of RandR */ | 109 | /** Sets the color ramps on behalf of fbcon */ |
| 110 | void radeon_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green, | 110 | void radeon_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green, |
| 111 | u16 blue, int regno) | 111 | u16 blue, int regno) |
| 112 | { | 112 | { |
| 113 | struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); | 113 | struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); |
| 114 | 114 | ||
| 115 | if (regno == 0) | ||
| 116 | DRM_DEBUG("gamma set %d\n", radeon_crtc->crtc_id); | ||
| 117 | radeon_crtc->lut_r[regno] = red >> 6; | 115 | radeon_crtc->lut_r[regno] = red >> 6; |
| 118 | radeon_crtc->lut_g[regno] = green >> 6; | 116 | radeon_crtc->lut_g[regno] = green >> 6; |
| 119 | radeon_crtc->lut_b[regno] = blue >> 6; | 117 | radeon_crtc->lut_b[regno] = blue >> 6; |
| 120 | } | 118 | } |
| 121 | 119 | ||
| 120 | /** Gets the color ramps on behalf of fbcon */ | ||
| 121 | void radeon_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green, | ||
| 122 | u16 *blue, int regno) | ||
| 123 | { | ||
| 124 | struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); | ||
| 125 | |||
| 126 | *red = radeon_crtc->lut_r[regno] << 6; | ||
| 127 | *green = radeon_crtc->lut_g[regno] << 6; | ||
| 128 | *blue = radeon_crtc->lut_b[regno] << 6; | ||
| 129 | } | ||
| 130 | |||
| 122 | static void radeon_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, | 131 | static void radeon_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, |
| 123 | u16 *blue, uint32_t size) | 132 | u16 *blue, uint32_t size) |
| 124 | { | 133 | { |
| 125 | struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); | 134 | struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); |
| 126 | int i, j; | 135 | int i; |
| 127 | 136 | ||
| 128 | if (size != 256) { | 137 | if (size != 256) { |
| 129 | return; | 138 | return; |
| @@ -132,23 +141,11 @@ static void radeon_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, | |||
| 132 | return; | 141 | return; |
| 133 | } | 142 | } |
| 134 | 143 | ||
| 135 | if (crtc->fb->depth == 16) { | 144 | /* userspace palettes are always correct as is */ |
| 136 | for (i = 0; i < 64; i++) { | 145 | for (i = 0; i < 256; i++) { |
| 137 | if (i <= 31) { | 146 | radeon_crtc->lut_r[i] = red[i] >> 6; |
| 138 | for (j = 0; j < 8; j++) { | 147 | radeon_crtc->lut_g[i] = green[i] >> 6; |
| 139 | radeon_crtc->lut_r[i * 8 + j] = red[i] >> 6; | 148 | radeon_crtc->lut_b[i] = blue[i] >> 6; |
| 140 | radeon_crtc->lut_b[i * 8 + j] = blue[i] >> 6; | ||
| 141 | } | ||
| 142 | } | ||
| 143 | for (j = 0; j < 4; j++) | ||
| 144 | radeon_crtc->lut_g[i * 4 + j] = green[i] >> 6; | ||
| 145 | } | ||
| 146 | } else { | ||
| 147 | for (i = 0; i < 256; i++) { | ||
| 148 | radeon_crtc->lut_r[i] = red[i] >> 6; | ||
| 149 | radeon_crtc->lut_g[i] = green[i] >> 6; | ||
| 150 | radeon_crtc->lut_b[i] = blue[i] >> 6; | ||
| 151 | } | ||
| 152 | } | 149 | } |
| 153 | 150 | ||
| 154 | radeon_crtc_load_lut(crtc); | 151 | radeon_crtc_load_lut(crtc); |
| @@ -724,7 +721,11 @@ int radeon_modeset_init(struct radeon_device *rdev) | |||
| 724 | if (ret) { | 721 | if (ret) { |
| 725 | return ret; | 722 | return ret; |
| 726 | } | 723 | } |
| 727 | /* allocate crtcs - TODO single crtc */ | 724 | |
| 725 | if (rdev->flags & RADEON_SINGLE_CRTC) | ||
| 726 | num_crtc = 1; | ||
| 727 | |||
| 728 | /* allocate crtcs */ | ||
| 728 | for (i = 0; i < num_crtc; i++) { | 729 | for (i = 0; i < num_crtc; i++) { |
| 729 | radeon_crtc_init(rdev->ddev, i); | 730 | radeon_crtc_init(rdev->ddev, i); |
| 730 | } | 731 | } |
diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c index 621646752cd2..a65ab1a0dad2 100644 --- a/drivers/gpu/drm/radeon/radeon_encoders.c +++ b/drivers/gpu/drm/radeon/radeon_encoders.c | |||
| @@ -1345,6 +1345,7 @@ radeon_atombios_set_dig_info(struct radeon_encoder *radeon_encoder) | |||
| 1345 | void | 1345 | void |
| 1346 | radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_id, uint32_t supported_device) | 1346 | radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_id, uint32_t supported_device) |
| 1347 | { | 1347 | { |
| 1348 | struct radeon_device *rdev = dev->dev_private; | ||
| 1348 | struct drm_encoder *encoder; | 1349 | struct drm_encoder *encoder; |
| 1349 | struct radeon_encoder *radeon_encoder; | 1350 | struct radeon_encoder *radeon_encoder; |
| 1350 | 1351 | ||
| @@ -1364,7 +1365,10 @@ radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_id, uint32_t su | |||
| 1364 | return; | 1365 | return; |
| 1365 | 1366 | ||
| 1366 | encoder = &radeon_encoder->base; | 1367 | encoder = &radeon_encoder->base; |
| 1367 | encoder->possible_crtcs = 0x3; | 1368 | if (rdev->flags & RADEON_SINGLE_CRTC) |
| 1369 | encoder->possible_crtcs = 0x1; | ||
| 1370 | else | ||
| 1371 | encoder->possible_crtcs = 0x3; | ||
| 1368 | encoder->possible_clones = 0; | 1372 | encoder->possible_clones = 0; |
| 1369 | 1373 | ||
| 1370 | radeon_encoder->enc_priv = NULL; | 1374 | radeon_encoder->enc_priv = NULL; |
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c index 1ba704eedefb..b38c4c8e2c61 100644 --- a/drivers/gpu/drm/radeon/radeon_fb.c +++ b/drivers/gpu/drm/radeon/radeon_fb.c | |||
| @@ -55,6 +55,7 @@ static struct fb_ops radeonfb_ops = { | |||
| 55 | .fb_imageblit = cfb_imageblit, | 55 | .fb_imageblit = cfb_imageblit, |
| 56 | .fb_pan_display = drm_fb_helper_pan_display, | 56 | .fb_pan_display = drm_fb_helper_pan_display, |
| 57 | .fb_blank = drm_fb_helper_blank, | 57 | .fb_blank = drm_fb_helper_blank, |
| 58 | .fb_setcmap = drm_fb_helper_setcmap, | ||
| 58 | }; | 59 | }; |
| 59 | 60 | ||
| 60 | /** | 61 | /** |
| @@ -123,6 +124,7 @@ static int radeon_align_pitch(struct radeon_device *rdev, int width, int bpp, bo | |||
| 123 | 124 | ||
| 124 | static struct drm_fb_helper_funcs radeon_fb_helper_funcs = { | 125 | static struct drm_fb_helper_funcs radeon_fb_helper_funcs = { |
| 125 | .gamma_set = radeon_crtc_fb_gamma_set, | 126 | .gamma_set = radeon_crtc_fb_gamma_set, |
| 127 | .gamma_get = radeon_crtc_fb_gamma_get, | ||
| 126 | }; | 128 | }; |
| 127 | 129 | ||
| 128 | int radeonfb_create(struct drm_device *dev, | 130 | int radeonfb_create(struct drm_device *dev, |
| @@ -146,9 +148,15 @@ int radeonfb_create(struct drm_device *dev, | |||
| 146 | unsigned long tmp; | 148 | unsigned long tmp; |
| 147 | bool fb_tiled = false; /* useful for testing */ | 149 | bool fb_tiled = false; /* useful for testing */ |
| 148 | u32 tiling_flags = 0; | 150 | u32 tiling_flags = 0; |
| 151 | int crtc_count; | ||
| 149 | 152 | ||
| 150 | mode_cmd.width = surface_width; | 153 | mode_cmd.width = surface_width; |
| 151 | mode_cmd.height = surface_height; | 154 | mode_cmd.height = surface_height; |
| 155 | |||
| 156 | /* avivo can't scanout real 24bpp */ | ||
| 157 | if ((surface_bpp == 24) && ASIC_IS_AVIVO(rdev)) | ||
| 158 | surface_bpp = 32; | ||
| 159 | |||
| 152 | mode_cmd.bpp = surface_bpp; | 160 | mode_cmd.bpp = surface_bpp; |
| 153 | /* need to align pitch with crtc limits */ | 161 | /* need to align pitch with crtc limits */ |
| 154 | mode_cmd.pitch = radeon_align_pitch(rdev, mode_cmd.width, mode_cmd.bpp, fb_tiled) * ((mode_cmd.bpp + 1) / 8); | 162 | mode_cmd.pitch = radeon_align_pitch(rdev, mode_cmd.width, mode_cmd.bpp, fb_tiled) * ((mode_cmd.bpp + 1) / 8); |
| @@ -217,7 +225,11 @@ int radeonfb_create(struct drm_device *dev, | |||
| 217 | rfbdev = info->par; | 225 | rfbdev = info->par; |
| 218 | rfbdev->helper.funcs = &radeon_fb_helper_funcs; | 226 | rfbdev->helper.funcs = &radeon_fb_helper_funcs; |
| 219 | rfbdev->helper.dev = dev; | 227 | rfbdev->helper.dev = dev; |
| 220 | ret = drm_fb_helper_init_crtc_count(&rfbdev->helper, 2, | 228 | if (rdev->flags & RADEON_SINGLE_CRTC) |
| 229 | crtc_count = 1; | ||
| 230 | else | ||
| 231 | crtc_count = 2; | ||
| 232 | ret = drm_fb_helper_init_crtc_count(&rfbdev->helper, crtc_count, | ||
| 221 | RADEONFB_CONN_LIMIT); | 233 | RADEONFB_CONN_LIMIT); |
| 222 | if (ret) | 234 | if (ret) |
| 223 | goto out_unref; | 235 | goto out_unref; |
| @@ -234,7 +246,7 @@ int radeonfb_create(struct drm_device *dev, | |||
| 234 | 246 | ||
| 235 | strcpy(info->fix.id, "radeondrmfb"); | 247 | strcpy(info->fix.id, "radeondrmfb"); |
| 236 | 248 | ||
| 237 | drm_fb_helper_fill_fix(info, fb->pitch); | 249 | drm_fb_helper_fill_fix(info, fb->pitch, fb->depth); |
| 238 | 250 | ||
| 239 | info->flags = FBINFO_DEFAULT; | 251 | info->flags = FBINFO_DEFAULT; |
| 240 | info->fbops = &radeonfb_ops; | 252 | info->fbops = &radeonfb_ops; |
| @@ -309,7 +321,7 @@ int radeon_parse_options(char *options) | |||
| 309 | 321 | ||
| 310 | int radeonfb_probe(struct drm_device *dev) | 322 | int radeonfb_probe(struct drm_device *dev) |
| 311 | { | 323 | { |
| 312 | return drm_fb_helper_single_fb_probe(dev, &radeonfb_create); | 324 | return drm_fb_helper_single_fb_probe(dev, 32, &radeonfb_create); |
| 313 | } | 325 | } |
| 314 | 326 | ||
| 315 | int radeonfb_remove(struct drm_device *dev, struct drm_framebuffer *fb) | 327 | int radeonfb_remove(struct drm_device *dev, struct drm_framebuffer *fb) |
diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c index 1841145a7c4f..8e0a8759e428 100644 --- a/drivers/gpu/drm/radeon/radeon_irq_kms.c +++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c | |||
| @@ -83,8 +83,12 @@ void radeon_driver_irq_uninstall_kms(struct drm_device *dev) | |||
| 83 | int radeon_irq_kms_init(struct radeon_device *rdev) | 83 | int radeon_irq_kms_init(struct radeon_device *rdev) |
| 84 | { | 84 | { |
| 85 | int r = 0; | 85 | int r = 0; |
| 86 | int num_crtc = 2; | ||
| 86 | 87 | ||
| 87 | r = drm_vblank_init(rdev->ddev, 2); | 88 | if (rdev->flags & RADEON_SINGLE_CRTC) |
| 89 | num_crtc = 1; | ||
| 90 | |||
| 91 | r = drm_vblank_init(rdev->ddev, num_crtc); | ||
| 88 | if (r) { | 92 | if (r) { |
| 89 | return r; | 93 | return r; |
| 90 | } | 94 | } |
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c index 2b997a15fb1f..36410f85d705 100644 --- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c +++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c | |||
| @@ -1053,6 +1053,7 @@ static const struct drm_crtc_helper_funcs legacy_helper_funcs = { | |||
| 1053 | .mode_set_base = radeon_crtc_set_base, | 1053 | .mode_set_base = radeon_crtc_set_base, |
| 1054 | .prepare = radeon_crtc_prepare, | 1054 | .prepare = radeon_crtc_prepare, |
| 1055 | .commit = radeon_crtc_commit, | 1055 | .commit = radeon_crtc_commit, |
| 1056 | .load_lut = radeon_crtc_load_lut, | ||
| 1056 | }; | 1057 | }; |
| 1057 | 1058 | ||
| 1058 | 1059 | ||
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c index b1547f700d73..6ceb958fd194 100644 --- a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c +++ b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c | |||
| @@ -881,7 +881,7 @@ static void radeon_legacy_tv_dac_mode_set(struct drm_encoder *encoder, | |||
| 881 | R420_TV_DAC_DACADJ_MASK | | 881 | R420_TV_DAC_DACADJ_MASK | |
| 882 | R420_TV_DAC_RDACPD | | 882 | R420_TV_DAC_RDACPD | |
| 883 | R420_TV_DAC_GDACPD | | 883 | R420_TV_DAC_GDACPD | |
| 884 | R420_TV_DAC_GDACPD | | 884 | R420_TV_DAC_BDACPD | |
| 885 | R420_TV_DAC_TVENABLE); | 885 | R420_TV_DAC_TVENABLE); |
| 886 | } else { | 886 | } else { |
| 887 | tv_dac_cntl &= ~(RADEON_TV_DAC_STD_MASK | | 887 | tv_dac_cntl &= ~(RADEON_TV_DAC_STD_MASK | |
| @@ -889,7 +889,7 @@ static void radeon_legacy_tv_dac_mode_set(struct drm_encoder *encoder, | |||
| 889 | RADEON_TV_DAC_DACADJ_MASK | | 889 | RADEON_TV_DAC_DACADJ_MASK | |
| 890 | RADEON_TV_DAC_RDACPD | | 890 | RADEON_TV_DAC_RDACPD | |
| 891 | RADEON_TV_DAC_GDACPD | | 891 | RADEON_TV_DAC_GDACPD | |
| 892 | RADEON_TV_DAC_GDACPD); | 892 | RADEON_TV_DAC_BDACPD); |
| 893 | } | 893 | } |
| 894 | 894 | ||
| 895 | /* FIXME TV */ | 895 | /* FIXME TV */ |
| @@ -1318,7 +1318,10 @@ radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_id, uint32_t | |||
| 1318 | return; | 1318 | return; |
| 1319 | 1319 | ||
| 1320 | encoder = &radeon_encoder->base; | 1320 | encoder = &radeon_encoder->base; |
| 1321 | encoder->possible_crtcs = 0x3; | 1321 | if (rdev->flags & RADEON_SINGLE_CRTC) |
| 1322 | encoder->possible_crtcs = 0x1; | ||
| 1323 | else | ||
| 1324 | encoder->possible_crtcs = 0x3; | ||
| 1322 | encoder->possible_clones = 0; | 1325 | encoder->possible_clones = 0; |
| 1323 | 1326 | ||
| 1324 | radeon_encoder->enc_priv = NULL; | 1327 | radeon_encoder->enc_priv = NULL; |
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h index 570a58729daf..e61226817ccf 100644 --- a/drivers/gpu/drm/radeon/radeon_mode.h +++ b/drivers/gpu/drm/radeon/radeon_mode.h | |||
| @@ -407,6 +407,8 @@ extern void | |||
| 407 | radeon_combios_encoder_dpms_scratch_regs(struct drm_encoder *encoder, bool on); | 407 | radeon_combios_encoder_dpms_scratch_regs(struct drm_encoder *encoder, bool on); |
| 408 | extern void radeon_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green, | 408 | extern void radeon_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green, |
| 409 | u16 blue, int regno); | 409 | u16 blue, int regno); |
| 410 | extern void radeon_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green, | ||
| 411 | u16 *blue, int regno); | ||
| 410 | struct drm_framebuffer *radeon_framebuffer_create(struct drm_device *dev, | 412 | struct drm_framebuffer *radeon_framebuffer_create(struct drm_device *dev, |
| 411 | struct drm_mode_fb_cmd *mode_cmd, | 413 | struct drm_mode_fb_cmd *mode_cmd, |
| 412 | struct drm_gem_object *obj); | 414 | struct drm_gem_object *obj); |
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c index 73af463b7a59..1f056dadc5c2 100644 --- a/drivers/gpu/drm/radeon/radeon_object.c +++ b/drivers/gpu/drm/radeon/radeon_object.c | |||
| @@ -400,11 +400,9 @@ void radeon_object_list_add_object(struct radeon_object_list *lobj, | |||
| 400 | int radeon_object_list_reserve(struct list_head *head) | 400 | int radeon_object_list_reserve(struct list_head *head) |
| 401 | { | 401 | { |
| 402 | struct radeon_object_list *lobj; | 402 | struct radeon_object_list *lobj; |
| 403 | struct list_head *i; | ||
| 404 | int r; | 403 | int r; |
| 405 | 404 | ||
| 406 | list_for_each(i, head) { | 405 | list_for_each_entry(lobj, head, list){ |
| 407 | lobj = list_entry(i, struct radeon_object_list, list); | ||
| 408 | if (!lobj->robj->pin_count) { | 406 | if (!lobj->robj->pin_count) { |
| 409 | r = radeon_object_reserve(lobj->robj, true); | 407 | r = radeon_object_reserve(lobj->robj, true); |
| 410 | if (unlikely(r != 0)) { | 408 | if (unlikely(r != 0)) { |
| @@ -420,13 +418,10 @@ int radeon_object_list_reserve(struct list_head *head) | |||
| 420 | void radeon_object_list_unreserve(struct list_head *head) | 418 | void radeon_object_list_unreserve(struct list_head *head) |
| 421 | { | 419 | { |
| 422 | struct radeon_object_list *lobj; | 420 | struct radeon_object_list *lobj; |
| 423 | struct list_head *i; | ||
| 424 | 421 | ||
| 425 | list_for_each(i, head) { | 422 | list_for_each_entry(lobj, head, list) { |
| 426 | lobj = list_entry(i, struct radeon_object_list, list); | ||
| 427 | if (!lobj->robj->pin_count) { | 423 | if (!lobj->robj->pin_count) { |
| 428 | radeon_object_unreserve(lobj->robj); | 424 | radeon_object_unreserve(lobj->robj); |
| 429 | } else { | ||
| 430 | } | 425 | } |
| 431 | } | 426 | } |
| 432 | } | 427 | } |
| @@ -436,7 +431,6 @@ int radeon_object_list_validate(struct list_head *head, void *fence) | |||
| 436 | struct radeon_object_list *lobj; | 431 | struct radeon_object_list *lobj; |
| 437 | struct radeon_object *robj; | 432 | struct radeon_object *robj; |
| 438 | struct radeon_fence *old_fence = NULL; | 433 | struct radeon_fence *old_fence = NULL; |
| 439 | struct list_head *i; | ||
| 440 | int r; | 434 | int r; |
| 441 | 435 | ||
| 442 | r = radeon_object_list_reserve(head); | 436 | r = radeon_object_list_reserve(head); |
| @@ -444,8 +438,7 @@ int radeon_object_list_validate(struct list_head *head, void *fence) | |||
| 444 | radeon_object_list_unreserve(head); | 438 | radeon_object_list_unreserve(head); |
| 445 | return r; | 439 | return r; |
| 446 | } | 440 | } |
| 447 | list_for_each(i, head) { | 441 | list_for_each_entry(lobj, head, list) { |
| 448 | lobj = list_entry(i, struct radeon_object_list, list); | ||
| 449 | robj = lobj->robj; | 442 | robj = lobj->robj; |
| 450 | if (!robj->pin_count) { | 443 | if (!robj->pin_count) { |
| 451 | if (lobj->wdomain) { | 444 | if (lobj->wdomain) { |
| @@ -482,10 +475,8 @@ void radeon_object_list_unvalidate(struct list_head *head) | |||
| 482 | { | 475 | { |
| 483 | struct radeon_object_list *lobj; | 476 | struct radeon_object_list *lobj; |
| 484 | struct radeon_fence *old_fence = NULL; | 477 | struct radeon_fence *old_fence = NULL; |
| 485 | struct list_head *i; | ||
| 486 | 478 | ||
| 487 | list_for_each(i, head) { | 479 | list_for_each_entry(lobj, head, list) { |
| 488 | lobj = list_entry(i, struct radeon_object_list, list); | ||
| 489 | old_fence = (struct radeon_fence *)lobj->robj->tobj.sync_obj; | 480 | old_fence = (struct radeon_fence *)lobj->robj->tobj.sync_obj; |
| 490 | lobj->robj->tobj.sync_obj = NULL; | 481 | lobj->robj->tobj.sync_obj = NULL; |
| 491 | if (old_fence) { | 482 | if (old_fence) { |
diff --git a/drivers/gpu/drm/radeon/rs100d.h b/drivers/gpu/drm/radeon/rs100d.h new file mode 100644 index 000000000000..48a913a06cfd --- /dev/null +++ b/drivers/gpu/drm/radeon/rs100d.h | |||
| @@ -0,0 +1,40 @@ | |||
| 1 | /* | ||
| 2 | * Copyright 2008 Advanced Micro Devices, Inc. | ||
| 3 | * Copyright 2008 Red Hat Inc. | ||
| 4 | * Copyright 2009 Jerome Glisse. | ||
| 5 | * | ||
| 6 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
| 7 | * copy of this software and associated documentation files (the "Software"), | ||
| 8 | * to deal in the Software without restriction, including without limitation | ||
| 9 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
| 10 | * and/or sell copies of the Software, and to permit persons to whom the | ||
| 11 | * Software is furnished to do so, subject to the following conditions: | ||
| 12 | * | ||
| 13 | * The above copyright notice and this permission notice shall be included in | ||
| 14 | * all copies or substantial portions of the Software. | ||
| 15 | * | ||
| 16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
| 17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
| 18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
| 19 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
| 20 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
| 21 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
| 22 | * OTHER DEALINGS IN THE SOFTWARE. | ||
| 23 | * | ||
| 24 | * Authors: Dave Airlie | ||
| 25 | * Alex Deucher | ||
| 26 | * Jerome Glisse | ||
| 27 | */ | ||
| 28 | #ifndef __RS100D_H__ | ||
| 29 | #define __RS100D_H__ | ||
| 30 | |||
| 31 | /* Registers */ | ||
| 32 | #define R_00015C_NB_TOM 0x00015C | ||
| 33 | #define S_00015C_MC_FB_START(x) (((x) & 0xFFFF) << 0) | ||
| 34 | #define G_00015C_MC_FB_START(x) (((x) >> 0) & 0xFFFF) | ||
| 35 | #define C_00015C_MC_FB_START 0xFFFF0000 | ||
| 36 | #define S_00015C_MC_FB_TOP(x) (((x) & 0xFFFF) << 16) | ||
| 37 | #define G_00015C_MC_FB_TOP(x) (((x) >> 16) & 0xFFFF) | ||
| 38 | #define C_00015C_MC_FB_TOP 0x0000FFFF | ||
| 39 | |||
| 40 | #endif | ||
diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c index a3fbdad938c7..a769c296f6a6 100644 --- a/drivers/gpu/drm/radeon/rs400.c +++ b/drivers/gpu/drm/radeon/rs400.c | |||
| @@ -27,27 +27,12 @@ | |||
| 27 | */ | 27 | */ |
| 28 | #include <linux/seq_file.h> | 28 | #include <linux/seq_file.h> |
| 29 | #include <drm/drmP.h> | 29 | #include <drm/drmP.h> |
| 30 | #include "radeon_reg.h" | ||
| 31 | #include "radeon.h" | 30 | #include "radeon.h" |
| 31 | #include "rs400d.h" | ||
| 32 | 32 | ||
| 33 | /* rs400,rs480 depends on : */ | 33 | /* This files gather functions specifics to : rs400,rs480 */ |
| 34 | void r100_hdp_reset(struct radeon_device *rdev); | 34 | static int rs400_debugfs_pcie_gart_info_init(struct radeon_device *rdev); |
| 35 | void r100_mc_disable_clients(struct radeon_device *rdev); | ||
| 36 | int r300_mc_wait_for_idle(struct radeon_device *rdev); | ||
| 37 | void r420_pipes_init(struct radeon_device *rdev); | ||
| 38 | 35 | ||
| 39 | /* This files gather functions specifics to : | ||
| 40 | * rs400,rs480 | ||
| 41 | * | ||
| 42 | * Some of these functions might be used by newer ASICs. | ||
| 43 | */ | ||
| 44 | void rs400_gpu_init(struct radeon_device *rdev); | ||
| 45 | int rs400_debugfs_pcie_gart_info_init(struct radeon_device *rdev); | ||
| 46 | |||
| 47 | |||
| 48 | /* | ||
| 49 | * GART functions. | ||
| 50 | */ | ||
| 51 | void rs400_gart_adjust_size(struct radeon_device *rdev) | 36 | void rs400_gart_adjust_size(struct radeon_device *rdev) |
| 52 | { | 37 | { |
| 53 | /* Check gart size */ | 38 | /* Check gart size */ |
| @@ -238,61 +223,6 @@ int rs400_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr) | |||
| 238 | return 0; | 223 | return 0; |
| 239 | } | 224 | } |
| 240 | 225 | ||
| 241 | |||
| 242 | /* | ||
| 243 | * MC functions. | ||
| 244 | */ | ||
| 245 | int rs400_mc_init(struct radeon_device *rdev) | ||
| 246 | { | ||
| 247 | uint32_t tmp; | ||
| 248 | int r; | ||
| 249 | |||
| 250 | if (r100_debugfs_rbbm_init(rdev)) { | ||
| 251 | DRM_ERROR("Failed to register debugfs file for RBBM !\n"); | ||
| 252 | } | ||
| 253 | |||
| 254 | rs400_gpu_init(rdev); | ||
| 255 | rs400_gart_disable(rdev); | ||
| 256 | rdev->mc.gtt_location = rdev->mc.mc_vram_size; | ||
| 257 | rdev->mc.gtt_location += (rdev->mc.gtt_size - 1); | ||
| 258 | rdev->mc.gtt_location &= ~(rdev->mc.gtt_size - 1); | ||
| 259 | r = radeon_mc_setup(rdev); | ||
| 260 | if (r) { | ||
| 261 | return r; | ||
| 262 | } | ||
| 263 | |||
| 264 | r100_mc_disable_clients(rdev); | ||
| 265 | if (r300_mc_wait_for_idle(rdev)) { | ||
| 266 | printk(KERN_WARNING "Failed to wait MC idle while " | ||
| 267 | "programming pipes. Bad things might happen.\n"); | ||
| 268 | } | ||
| 269 | |||
| 270 | tmp = rdev->mc.vram_location + rdev->mc.mc_vram_size - 1; | ||
| 271 | tmp = REG_SET(RADEON_MC_FB_TOP, tmp >> 16); | ||
| 272 | tmp |= REG_SET(RADEON_MC_FB_START, rdev->mc.vram_location >> 16); | ||
| 273 | WREG32(RADEON_MC_FB_LOCATION, tmp); | ||
| 274 | tmp = RREG32(RADEON_HOST_PATH_CNTL) | RADEON_HP_LIN_RD_CACHE_DIS; | ||
| 275 | WREG32(RADEON_HOST_PATH_CNTL, tmp | RADEON_HDP_SOFT_RESET | RADEON_HDP_READ_BUFFER_INVALIDATE); | ||
| 276 | (void)RREG32(RADEON_HOST_PATH_CNTL); | ||
| 277 | WREG32(RADEON_HOST_PATH_CNTL, tmp); | ||
| 278 | (void)RREG32(RADEON_HOST_PATH_CNTL); | ||
| 279 | |||
| 280 | return 0; | ||
| 281 | } | ||
| 282 | |||
| 283 | void rs400_mc_fini(struct radeon_device *rdev) | ||
| 284 | { | ||
| 285 | } | ||
| 286 | |||
| 287 | |||
| 288 | /* | ||
| 289 | * Global GPU functions | ||
| 290 | */ | ||
| 291 | void rs400_errata(struct radeon_device *rdev) | ||
| 292 | { | ||
| 293 | rdev->pll_errata = 0; | ||
| 294 | } | ||
| 295 | |||
| 296 | void rs400_gpu_init(struct radeon_device *rdev) | 226 | void rs400_gpu_init(struct radeon_device *rdev) |
| 297 | { | 227 | { |
| 298 | /* FIXME: HDP same place on rs400 ? */ | 228 | /* FIXME: HDP same place on rs400 ? */ |
| @@ -305,10 +235,6 @@ void rs400_gpu_init(struct radeon_device *rdev) | |||
| 305 | } | 235 | } |
| 306 | } | 236 | } |
| 307 | 237 | ||
| 308 | |||
| 309 | /* | ||
| 310 | * VRAM info. | ||
| 311 | */ | ||
| 312 | void rs400_vram_info(struct radeon_device *rdev) | 238 | void rs400_vram_info(struct radeon_device *rdev) |
| 313 | { | 239 | { |
| 314 | rs400_gart_adjust_size(rdev); | 240 | rs400_gart_adjust_size(rdev); |
| @@ -319,10 +245,6 @@ void rs400_vram_info(struct radeon_device *rdev) | |||
| 319 | r100_vram_init_sizes(rdev); | 245 | r100_vram_init_sizes(rdev); |
| 320 | } | 246 | } |
| 321 | 247 | ||
| 322 | |||
| 323 | /* | ||
| 324 | * Indirect registers accessor | ||
| 325 | */ | ||
| 326 | uint32_t rs400_mc_rreg(struct radeon_device *rdev, uint32_t reg) | 248 | uint32_t rs400_mc_rreg(struct radeon_device *rdev, uint32_t reg) |
| 327 | { | 249 | { |
| 328 | uint32_t r; | 250 | uint32_t r; |
| @@ -340,10 +262,6 @@ void rs400_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) | |||
| 340 | WREG32(RS480_NB_MC_INDEX, 0xff); | 262 | WREG32(RS480_NB_MC_INDEX, 0xff); |
| 341 | } | 263 | } |
| 342 | 264 | ||
| 343 | |||
| 344 | /* | ||
| 345 | * Debugfs info | ||
| 346 | */ | ||
| 347 | #if defined(CONFIG_DEBUG_FS) | 265 | #if defined(CONFIG_DEBUG_FS) |
| 348 | static int rs400_debugfs_gart_info(struct seq_file *m, void *data) | 266 | static int rs400_debugfs_gart_info(struct seq_file *m, void *data) |
| 349 | { | 267 | { |
| @@ -419,7 +337,7 @@ static struct drm_info_list rs400_gart_info_list[] = { | |||
| 419 | }; | 337 | }; |
| 420 | #endif | 338 | #endif |
| 421 | 339 | ||
| 422 | int rs400_debugfs_pcie_gart_info_init(struct radeon_device *rdev) | 340 | static int rs400_debugfs_pcie_gart_info_init(struct radeon_device *rdev) |
| 423 | { | 341 | { |
| 424 | #if defined(CONFIG_DEBUG_FS) | 342 | #if defined(CONFIG_DEBUG_FS) |
| 425 | return radeon_debugfs_add_files(rdev, rs400_gart_info_list, 1); | 343 | return radeon_debugfs_add_files(rdev, rs400_gart_info_list, 1); |
| @@ -427,3 +345,188 @@ int rs400_debugfs_pcie_gart_info_init(struct radeon_device *rdev) | |||
| 427 | return 0; | 345 | return 0; |
| 428 | #endif | 346 | #endif |
| 429 | } | 347 | } |
| 348 | |||
| 349 | static int rs400_mc_init(struct radeon_device *rdev) | ||
| 350 | { | ||
| 351 | int r; | ||
| 352 | u32 tmp; | ||
| 353 | |||
| 354 | /* Setup GPU memory space */ | ||
| 355 | tmp = G_00015C_MC_FB_START(RREG32(R_00015C_NB_TOM)); | ||
| 356 | rdev->mc.vram_location = G_00015C_MC_FB_START(tmp) << 16; | ||
| 357 | rdev->mc.gtt_location = 0xFFFFFFFFUL; | ||
| 358 | r = radeon_mc_setup(rdev); | ||
| 359 | if (r) | ||
| 360 | return r; | ||
| 361 | return 0; | ||
| 362 | } | ||
| 363 | |||
| 364 | void rs400_mc_program(struct radeon_device *rdev) | ||
| 365 | { | ||
| 366 | struct r100_mc_save save; | ||
| 367 | |||
| 368 | /* Stops all mc clients */ | ||
| 369 | r100_mc_stop(rdev, &save); | ||
| 370 | |||
| 371 | /* Wait for mc idle */ | ||
| 372 | if (r300_mc_wait_for_idle(rdev)) | ||
| 373 | dev_warn(rdev->dev, "Wait MC idle timeout before updating MC.\n"); | ||
| 374 | WREG32(R_000148_MC_FB_LOCATION, | ||
| 375 | S_000148_MC_FB_START(rdev->mc.vram_start >> 16) | | ||
| 376 | S_000148_MC_FB_TOP(rdev->mc.vram_end >> 16)); | ||
| 377 | |||
| 378 | r100_mc_resume(rdev, &save); | ||
| 379 | } | ||
| 380 | |||
| 381 | static int rs400_startup(struct radeon_device *rdev) | ||
| 382 | { | ||
| 383 | int r; | ||
| 384 | |||
| 385 | rs400_mc_program(rdev); | ||
| 386 | /* Resume clock */ | ||
| 387 | r300_clock_startup(rdev); | ||
| 388 | /* Initialize GPU configuration (# pipes, ...) */ | ||
| 389 | rs400_gpu_init(rdev); | ||
| 390 | /* Initialize GART (initialize after TTM so we can allocate | ||
| 391 | * memory through TTM but finalize after TTM) */ | ||
| 392 | r = rs400_gart_enable(rdev); | ||
| 393 | if (r) | ||
| 394 | return r; | ||
| 395 | /* Enable IRQ */ | ||
| 396 | rdev->irq.sw_int = true; | ||
| 397 | r100_irq_set(rdev); | ||
| 398 | /* 1M ring buffer */ | ||
| 399 | r = r100_cp_init(rdev, 1024 * 1024); | ||
| 400 | if (r) { | ||
| 401 | dev_err(rdev->dev, "failled initializing CP (%d).\n", r); | ||
| 402 | return r; | ||
| 403 | } | ||
| 404 | r = r100_wb_init(rdev); | ||
| 405 | if (r) | ||
| 406 | dev_err(rdev->dev, "failled initializing WB (%d).\n", r); | ||
| 407 | r = r100_ib_init(rdev); | ||
| 408 | if (r) { | ||
| 409 | dev_err(rdev->dev, "failled initializing IB (%d).\n", r); | ||
| 410 | return r; | ||
| 411 | } | ||
| 412 | return 0; | ||
| 413 | } | ||
| 414 | |||
| 415 | int rs400_resume(struct radeon_device *rdev) | ||
| 416 | { | ||
| 417 | /* Make sur GART are not working */ | ||
| 418 | rs400_gart_disable(rdev); | ||
| 419 | /* Resume clock before doing reset */ | ||
| 420 | r300_clock_startup(rdev); | ||
| 421 | /* Reset gpu before posting otherwise ATOM will enter infinite loop */ | ||
| 422 | if (radeon_gpu_reset(rdev)) { | ||
| 423 | dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", | ||
| 424 | RREG32(R_000E40_RBBM_STATUS), | ||
| 425 | RREG32(R_0007C0_CP_STAT)); | ||
| 426 | } | ||
| 427 | /* post */ | ||
| 428 | radeon_combios_asic_init(rdev->ddev); | ||
| 429 | /* Resume clock after posting */ | ||
| 430 | r300_clock_startup(rdev); | ||
| 431 | return rs400_startup(rdev); | ||
| 432 | } | ||
| 433 | |||
| 434 | int rs400_suspend(struct radeon_device *rdev) | ||
| 435 | { | ||
| 436 | r100_cp_disable(rdev); | ||
| 437 | r100_wb_disable(rdev); | ||
| 438 | r100_irq_disable(rdev); | ||
| 439 | rs400_gart_disable(rdev); | ||
| 440 | return 0; | ||
| 441 | } | ||
| 442 | |||
| 443 | void rs400_fini(struct radeon_device *rdev) | ||
| 444 | { | ||
| 445 | rs400_suspend(rdev); | ||
| 446 | r100_cp_fini(rdev); | ||
| 447 | r100_wb_fini(rdev); | ||
| 448 | r100_ib_fini(rdev); | ||
| 449 | radeon_gem_fini(rdev); | ||
| 450 | rs400_gart_fini(rdev); | ||
| 451 | radeon_irq_kms_fini(rdev); | ||
| 452 | radeon_fence_driver_fini(rdev); | ||
| 453 | radeon_object_fini(rdev); | ||
| 454 | radeon_atombios_fini(rdev); | ||
| 455 | kfree(rdev->bios); | ||
| 456 | rdev->bios = NULL; | ||
| 457 | } | ||
| 458 | |||
| 459 | int rs400_init(struct radeon_device *rdev) | ||
| 460 | { | ||
| 461 | int r; | ||
| 462 | |||
| 463 | /* Disable VGA */ | ||
| 464 | r100_vga_render_disable(rdev); | ||
| 465 | /* Initialize scratch registers */ | ||
| 466 | radeon_scratch_init(rdev); | ||
| 467 | /* Initialize surface registers */ | ||
| 468 | radeon_surface_init(rdev); | ||
| 469 | /* TODO: disable VGA need to use VGA request */ | ||
| 470 | /* BIOS*/ | ||
| 471 | if (!radeon_get_bios(rdev)) { | ||
| 472 | if (ASIC_IS_AVIVO(rdev)) | ||
| 473 | return -EINVAL; | ||
| 474 | } | ||
| 475 | if (rdev->is_atom_bios) { | ||
| 476 | dev_err(rdev->dev, "Expecting combios for RS400/RS480 GPU\n"); | ||
| 477 | return -EINVAL; | ||
| 478 | } else { | ||
| 479 | r = radeon_combios_init(rdev); | ||
| 480 | if (r) | ||
| 481 | return r; | ||
| 482 | } | ||
| 483 | /* Reset gpu before posting otherwise ATOM will enter infinite loop */ | ||
| 484 | if (radeon_gpu_reset(rdev)) { | ||
| 485 | dev_warn(rdev->dev, | ||
| 486 | "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", | ||
| 487 | RREG32(R_000E40_RBBM_STATUS), | ||
| 488 | RREG32(R_0007C0_CP_STAT)); | ||
| 489 | } | ||
| 490 | /* check if cards are posted or not */ | ||
| 491 | if (!radeon_card_posted(rdev) && rdev->bios) { | ||
| 492 | DRM_INFO("GPU not posted. posting now...\n"); | ||
| 493 | radeon_combios_asic_init(rdev->ddev); | ||
| 494 | } | ||
| 495 | /* Initialize clocks */ | ||
| 496 | radeon_get_clock_info(rdev->ddev); | ||
| 497 | /* Get vram informations */ | ||
| 498 | rs400_vram_info(rdev); | ||
| 499 | /* Initialize memory controller (also test AGP) */ | ||
| 500 | r = rs400_mc_init(rdev); | ||
| 501 | if (r) | ||
| 502 | return r; | ||
| 503 | /* Fence driver */ | ||
| 504 | r = radeon_fence_driver_init(rdev); | ||
| 505 | if (r) | ||
| 506 | return r; | ||
| 507 | r = radeon_irq_kms_init(rdev); | ||
| 508 | if (r) | ||
| 509 | return r; | ||
| 510 | /* Memory manager */ | ||
| 511 | r = radeon_object_init(rdev); | ||
| 512 | if (r) | ||
| 513 | return r; | ||
| 514 | r = rs400_gart_init(rdev); | ||
| 515 | if (r) | ||
| 516 | return r; | ||
| 517 | r300_set_reg_safe(rdev); | ||
| 518 | rdev->accel_working = true; | ||
| 519 | r = rs400_startup(rdev); | ||
| 520 | if (r) { | ||
| 521 | /* Somethings want wront with the accel init stop accel */ | ||
| 522 | dev_err(rdev->dev, "Disabling GPU acceleration\n"); | ||
| 523 | rs400_suspend(rdev); | ||
| 524 | r100_cp_fini(rdev); | ||
| 525 | r100_wb_fini(rdev); | ||
| 526 | r100_ib_fini(rdev); | ||
| 527 | rs400_gart_fini(rdev); | ||
| 528 | radeon_irq_kms_fini(rdev); | ||
| 529 | rdev->accel_working = false; | ||
| 530 | } | ||
| 531 | return 0; | ||
| 532 | } | ||
diff --git a/drivers/gpu/drm/radeon/rs400d.h b/drivers/gpu/drm/radeon/rs400d.h new file mode 100644 index 000000000000..6d8bac58ced9 --- /dev/null +++ b/drivers/gpu/drm/radeon/rs400d.h | |||
| @@ -0,0 +1,160 @@ | |||
| 1 | /* | ||
| 2 | * Copyright 2008 Advanced Micro Devices, Inc. | ||
| 3 | * Copyright 2008 Red Hat Inc. | ||
| 4 | * Copyright 2009 Jerome Glisse. | ||
| 5 | * | ||
| 6 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
| 7 | * copy of this software and associated documentation files (the "Software"), | ||
| 8 | * to deal in the Software without restriction, including without limitation | ||
| 9 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
| 10 | * and/or sell copies of the Software, and to permit persons to whom the | ||
| 11 | * Software is furnished to do so, subject to the following conditions: | ||
| 12 | * | ||
| 13 | * The above copyright notice and this permission notice shall be included in | ||
| 14 | * all copies or substantial portions of the Software. | ||
| 15 | * | ||
| 16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
| 17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
| 18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
| 19 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
| 20 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
| 21 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
| 22 | * OTHER DEALINGS IN THE SOFTWARE. | ||
| 23 | * | ||
| 24 | * Authors: Dave Airlie | ||
| 25 | * Alex Deucher | ||
| 26 | * Jerome Glisse | ||
| 27 | */ | ||
| 28 | #ifndef __RS400D_H__ | ||
| 29 | #define __RS400D_H__ | ||
| 30 | |||
| 31 | /* Registers */ | ||
| 32 | #define R_000148_MC_FB_LOCATION 0x000148 | ||
| 33 | #define S_000148_MC_FB_START(x) (((x) & 0xFFFF) << 0) | ||
| 34 | #define G_000148_MC_FB_START(x) (((x) >> 0) & 0xFFFF) | ||
| 35 | #define C_000148_MC_FB_START 0xFFFF0000 | ||
| 36 | #define S_000148_MC_FB_TOP(x) (((x) & 0xFFFF) << 16) | ||
| 37 | #define G_000148_MC_FB_TOP(x) (((x) >> 16) & 0xFFFF) | ||
| 38 | #define C_000148_MC_FB_TOP 0x0000FFFF | ||
| 39 | #define R_00015C_NB_TOM 0x00015C | ||
| 40 | #define S_00015C_MC_FB_START(x) (((x) & 0xFFFF) << 0) | ||
| 41 | #define G_00015C_MC_FB_START(x) (((x) >> 0) & 0xFFFF) | ||
| 42 | #define C_00015C_MC_FB_START 0xFFFF0000 | ||
| 43 | #define S_00015C_MC_FB_TOP(x) (((x) & 0xFFFF) << 16) | ||
| 44 | #define G_00015C_MC_FB_TOP(x) (((x) >> 16) & 0xFFFF) | ||
| 45 | #define C_00015C_MC_FB_TOP 0x0000FFFF | ||
| 46 | #define R_0007C0_CP_STAT 0x0007C0 | ||
| 47 | #define S_0007C0_MRU_BUSY(x) (((x) & 0x1) << 0) | ||
| 48 | #define G_0007C0_MRU_BUSY(x) (((x) >> 0) & 0x1) | ||
| 49 | #define C_0007C0_MRU_BUSY 0xFFFFFFFE | ||
| 50 | #define S_0007C0_MWU_BUSY(x) (((x) & 0x1) << 1) | ||
| 51 | #define G_0007C0_MWU_BUSY(x) (((x) >> 1) & 0x1) | ||
| 52 | #define C_0007C0_MWU_BUSY 0xFFFFFFFD | ||
| 53 | #define S_0007C0_RSIU_BUSY(x) (((x) & 0x1) << 2) | ||
| 54 | #define G_0007C0_RSIU_BUSY(x) (((x) >> 2) & 0x1) | ||
| 55 | #define C_0007C0_RSIU_BUSY 0xFFFFFFFB | ||
| 56 | #define S_0007C0_RCIU_BUSY(x) (((x) & 0x1) << 3) | ||
| 57 | #define G_0007C0_RCIU_BUSY(x) (((x) >> 3) & 0x1) | ||
| 58 | #define C_0007C0_RCIU_BUSY 0xFFFFFFF7 | ||
| 59 | #define S_0007C0_CSF_PRIMARY_BUSY(x) (((x) & 0x1) << 9) | ||
| 60 | #define G_0007C0_CSF_PRIMARY_BUSY(x) (((x) >> 9) & 0x1) | ||
| 61 | #define C_0007C0_CSF_PRIMARY_BUSY 0xFFFFFDFF | ||
| 62 | #define S_0007C0_CSF_INDIRECT_BUSY(x) (((x) & 0x1) << 10) | ||
| 63 | #define G_0007C0_CSF_INDIRECT_BUSY(x) (((x) >> 10) & 0x1) | ||
| 64 | #define C_0007C0_CSF_INDIRECT_BUSY 0xFFFFFBFF | ||
| 65 | #define S_0007C0_CSQ_PRIMARY_BUSY(x) (((x) & 0x1) << 11) | ||
| 66 | #define G_0007C0_CSQ_PRIMARY_BUSY(x) (((x) >> 11) & 0x1) | ||
| 67 | #define C_0007C0_CSQ_PRIMARY_BUSY 0xFFFFF7FF | ||
| 68 | #define S_0007C0_CSQ_INDIRECT_BUSY(x) (((x) & 0x1) << 12) | ||
| 69 | #define G_0007C0_CSQ_INDIRECT_BUSY(x) (((x) >> 12) & 0x1) | ||
| 70 | #define C_0007C0_CSQ_INDIRECT_BUSY 0xFFFFEFFF | ||
| 71 | #define S_0007C0_CSI_BUSY(x) (((x) & 0x1) << 13) | ||
| 72 | #define G_0007C0_CSI_BUSY(x) (((x) >> 13) & 0x1) | ||
| 73 | #define C_0007C0_CSI_BUSY 0xFFFFDFFF | ||
| 74 | #define S_0007C0_CSF_INDIRECT2_BUSY(x) (((x) & 0x1) << 14) | ||
| 75 | #define G_0007C0_CSF_INDIRECT2_BUSY(x) (((x) >> 14) & 0x1) | ||
| 76 | #define C_0007C0_CSF_INDIRECT2_BUSY 0xFFFFBFFF | ||
| 77 | #define S_0007C0_CSQ_INDIRECT2_BUSY(x) (((x) & 0x1) << 15) | ||
| 78 | #define G_0007C0_CSQ_INDIRECT2_BUSY(x) (((x) >> 15) & 0x1) | ||
| 79 | #define C_0007C0_CSQ_INDIRECT2_BUSY 0xFFFF7FFF | ||
| 80 | #define S_0007C0_GUIDMA_BUSY(x) (((x) & 0x1) << 28) | ||
| 81 | #define G_0007C0_GUIDMA_BUSY(x) (((x) >> 28) & 0x1) | ||
| 82 | #define C_0007C0_GUIDMA_BUSY 0xEFFFFFFF | ||
| 83 | #define S_0007C0_VIDDMA_BUSY(x) (((x) & 0x1) << 29) | ||
| 84 | #define G_0007C0_VIDDMA_BUSY(x) (((x) >> 29) & 0x1) | ||
| 85 | #define C_0007C0_VIDDMA_BUSY 0xDFFFFFFF | ||
| 86 | #define S_0007C0_CMDSTRM_BUSY(x) (((x) & 0x1) << 30) | ||
| 87 | #define G_0007C0_CMDSTRM_BUSY(x) (((x) >> 30) & 0x1) | ||
| 88 | #define C_0007C0_CMDSTRM_BUSY 0xBFFFFFFF | ||
| 89 | #define S_0007C0_CP_BUSY(x) (((x) & 0x1) << 31) | ||
| 90 | #define G_0007C0_CP_BUSY(x) (((x) >> 31) & 0x1) | ||
| 91 | #define C_0007C0_CP_BUSY 0x7FFFFFFF | ||
| 92 | #define R_000E40_RBBM_STATUS 0x000E40 | ||
| 93 | #define S_000E40_CMDFIFO_AVAIL(x) (((x) & 0x7F) << 0) | ||
| 94 | #define G_000E40_CMDFIFO_AVAIL(x) (((x) >> 0) & 0x7F) | ||
| 95 | #define C_000E40_CMDFIFO_AVAIL 0xFFFFFF80 | ||
| 96 | #define S_000E40_HIRQ_ON_RBB(x) (((x) & 0x1) << 8) | ||
| 97 | #define G_000E40_HIRQ_ON_RBB(x) (((x) >> 8) & 0x1) | ||
| 98 | #define C_000E40_HIRQ_ON_RBB 0xFFFFFEFF | ||
| 99 | #define S_000E40_CPRQ_ON_RBB(x) (((x) & 0x1) << 9) | ||
| 100 | #define G_000E40_CPRQ_ON_RBB(x) (((x) >> 9) & 0x1) | ||
| 101 | #define C_000E40_CPRQ_ON_RBB 0xFFFFFDFF | ||
| 102 | #define S_000E40_CFRQ_ON_RBB(x) (((x) & 0x1) << 10) | ||
| 103 | #define G_000E40_CFRQ_ON_RBB(x) (((x) >> 10) & 0x1) | ||
| 104 | #define C_000E40_CFRQ_ON_RBB 0xFFFFFBFF | ||
| 105 | #define S_000E40_HIRQ_IN_RTBUF(x) (((x) & 0x1) << 11) | ||
| 106 | #define G_000E40_HIRQ_IN_RTBUF(x) (((x) >> 11) & 0x1) | ||
| 107 | #define C_000E40_HIRQ_IN_RTBUF 0xFFFFF7FF | ||
| 108 | #define S_000E40_CPRQ_IN_RTBUF(x) (((x) & 0x1) << 12) | ||
| 109 | #define G_000E40_CPRQ_IN_RTBUF(x) (((x) >> 12) & 0x1) | ||
| 110 | #define C_000E40_CPRQ_IN_RTBUF 0xFFFFEFFF | ||
| 111 | #define S_000E40_CFRQ_IN_RTBUF(x) (((x) & 0x1) << 13) | ||
| 112 | #define G_000E40_CFRQ_IN_RTBUF(x) (((x) >> 13) & 0x1) | ||
| 113 | #define C_000E40_CFRQ_IN_RTBUF 0xFFFFDFFF | ||
| 114 | #define S_000E40_CF_PIPE_BUSY(x) (((x) & 0x1) << 14) | ||
| 115 | #define G_000E40_CF_PIPE_BUSY(x) (((x) >> 14) & 0x1) | ||
| 116 | #define C_000E40_CF_PIPE_BUSY 0xFFFFBFFF | ||
| 117 | #define S_000E40_ENG_EV_BUSY(x) (((x) & 0x1) << 15) | ||
| 118 | #define G_000E40_ENG_EV_BUSY(x) (((x) >> 15) & 0x1) | ||
| 119 | #define C_000E40_ENG_EV_BUSY 0xFFFF7FFF | ||
| 120 | #define S_000E40_CP_CMDSTRM_BUSY(x) (((x) & 0x1) << 16) | ||
| 121 | #define G_000E40_CP_CMDSTRM_BUSY(x) (((x) >> 16) & 0x1) | ||
| 122 | #define C_000E40_CP_CMDSTRM_BUSY 0xFFFEFFFF | ||
| 123 | #define S_000E40_E2_BUSY(x) (((x) & 0x1) << 17) | ||
| 124 | #define G_000E40_E2_BUSY(x) (((x) >> 17) & 0x1) | ||
| 125 | #define C_000E40_E2_BUSY 0xFFFDFFFF | ||
| 126 | #define S_000E40_RB2D_BUSY(x) (((x) & 0x1) << 18) | ||
| 127 | #define G_000E40_RB2D_BUSY(x) (((x) >> 18) & 0x1) | ||
| 128 | #define C_000E40_RB2D_BUSY 0xFFFBFFFF | ||
| 129 | #define S_000E40_RB3D_BUSY(x) (((x) & 0x1) << 19) | ||
| 130 | #define G_000E40_RB3D_BUSY(x) (((x) >> 19) & 0x1) | ||
| 131 | #define C_000E40_RB3D_BUSY 0xFFF7FFFF | ||
| 132 | #define S_000E40_VAP_BUSY(x) (((x) & 0x1) << 20) | ||
| 133 | #define G_000E40_VAP_BUSY(x) (((x) >> 20) & 0x1) | ||
| 134 | #define C_000E40_VAP_BUSY 0xFFEFFFFF | ||
| 135 | #define S_000E40_RE_BUSY(x) (((x) & 0x1) << 21) | ||
| 136 | #define G_000E40_RE_BUSY(x) (((x) >> 21) & 0x1) | ||
| 137 | #define C_000E40_RE_BUSY 0xFFDFFFFF | ||
| 138 | #define S_000E40_TAM_BUSY(x) (((x) & 0x1) << 22) | ||
| 139 | #define G_000E40_TAM_BUSY(x) (((x) >> 22) & 0x1) | ||
| 140 | #define C_000E40_TAM_BUSY 0xFFBFFFFF | ||
| 141 | #define S_000E40_TDM_BUSY(x) (((x) & 0x1) << 23) | ||
| 142 | #define G_000E40_TDM_BUSY(x) (((x) >> 23) & 0x1) | ||
| 143 | #define C_000E40_TDM_BUSY 0xFF7FFFFF | ||
| 144 | #define S_000E40_PB_BUSY(x) (((x) & 0x1) << 24) | ||
| 145 | #define G_000E40_PB_BUSY(x) (((x) >> 24) & 0x1) | ||
| 146 | #define C_000E40_PB_BUSY 0xFEFFFFFF | ||
| 147 | #define S_000E40_TIM_BUSY(x) (((x) & 0x1) << 25) | ||
| 148 | #define G_000E40_TIM_BUSY(x) (((x) >> 25) & 0x1) | ||
| 149 | #define C_000E40_TIM_BUSY 0xFDFFFFFF | ||
| 150 | #define S_000E40_GA_BUSY(x) (((x) & 0x1) << 26) | ||
| 151 | #define G_000E40_GA_BUSY(x) (((x) >> 26) & 0x1) | ||
| 152 | #define C_000E40_GA_BUSY 0xFBFFFFFF | ||
| 153 | #define S_000E40_CBA2D_BUSY(x) (((x) & 0x1) << 27) | ||
| 154 | #define G_000E40_CBA2D_BUSY(x) (((x) >> 27) & 0x1) | ||
| 155 | #define C_000E40_CBA2D_BUSY 0xF7FFFFFF | ||
| 156 | #define S_000E40_GUI_ACTIVE(x) (((x) & 0x1) << 31) | ||
| 157 | #define G_000E40_GUI_ACTIVE(x) (((x) >> 31) & 0x1) | ||
| 158 | #define C_000E40_GUI_ACTIVE 0x7FFFFFFF | ||
| 159 | |||
| 160 | #endif | ||
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c index 4a4fe1cb131c..10dfa78762da 100644 --- a/drivers/gpu/drm/radeon/rs600.c +++ b/drivers/gpu/drm/radeon/rs600.c | |||
| @@ -25,27 +25,26 @@ | |||
| 25 | * Alex Deucher | 25 | * Alex Deucher |
| 26 | * Jerome Glisse | 26 | * Jerome Glisse |
| 27 | */ | 27 | */ |
| 28 | /* RS600 / Radeon X1250/X1270 integrated GPU | ||
| 29 | * | ||
| 30 | * This file gather function specific to RS600 which is the IGP of | ||
| 31 | * the X1250/X1270 family supporting intel CPU (while RS690/RS740 | ||
| 32 | * is the X1250/X1270 supporting AMD CPU). The display engine are | ||
| 33 | * the avivo one, bios is an atombios, 3D block are the one of the | ||
| 34 | * R4XX family. The GART is different from the RS400 one and is very | ||
| 35 | * close to the one of the R600 family (R600 likely being an evolution | ||
| 36 | * of the RS600 GART block). | ||
| 37 | */ | ||
| 28 | #include "drmP.h" | 38 | #include "drmP.h" |
| 29 | #include "radeon_reg.h" | ||
| 30 | #include "radeon.h" | 39 | #include "radeon.h" |
| 40 | #include "atom.h" | ||
| 41 | #include "rs600d.h" | ||
| 31 | 42 | ||
| 32 | #include "rs600_reg_safe.h" | 43 | #include "rs600_reg_safe.h" |
| 33 | 44 | ||
| 34 | /* rs600 depends on : */ | ||
| 35 | void r100_hdp_reset(struct radeon_device *rdev); | ||
| 36 | int r100_gui_wait_for_idle(struct radeon_device *rdev); | ||
| 37 | int r300_mc_wait_for_idle(struct radeon_device *rdev); | ||
| 38 | void r420_pipes_init(struct radeon_device *rdev); | ||
| 39 | |||
| 40 | /* This files gather functions specifics to : | ||
| 41 | * rs600 | ||
| 42 | * | ||
| 43 | * Some of these functions might be used by newer ASICs. | ||
| 44 | */ | ||
| 45 | void rs600_gpu_init(struct radeon_device *rdev); | 45 | void rs600_gpu_init(struct radeon_device *rdev); |
| 46 | int rs600_mc_wait_for_idle(struct radeon_device *rdev); | 46 | int rs600_mc_wait_for_idle(struct radeon_device *rdev); |
| 47 | 47 | ||
| 48 | |||
| 49 | /* | 48 | /* |
| 50 | * GART. | 49 | * GART. |
| 51 | */ | 50 | */ |
| @@ -53,18 +52,18 @@ void rs600_gart_tlb_flush(struct radeon_device *rdev) | |||
| 53 | { | 52 | { |
| 54 | uint32_t tmp; | 53 | uint32_t tmp; |
| 55 | 54 | ||
| 56 | tmp = RREG32_MC(RS600_MC_PT0_CNTL); | 55 | tmp = RREG32_MC(R_000100_MC_PT0_CNTL); |
| 57 | tmp &= ~(RS600_INVALIDATE_ALL_L1_TLBS | RS600_INVALIDATE_L2_CACHE); | 56 | tmp &= C_000100_INVALIDATE_ALL_L1_TLBS & C_000100_INVALIDATE_L2_CACHE; |
| 58 | WREG32_MC(RS600_MC_PT0_CNTL, tmp); | 57 | WREG32_MC(R_000100_MC_PT0_CNTL, tmp); |
| 59 | 58 | ||
| 60 | tmp = RREG32_MC(RS600_MC_PT0_CNTL); | 59 | tmp = RREG32_MC(R_000100_MC_PT0_CNTL); |
| 61 | tmp |= RS600_INVALIDATE_ALL_L1_TLBS | RS600_INVALIDATE_L2_CACHE; | 60 | tmp |= S_000100_INVALIDATE_ALL_L1_TLBS(1) & S_000100_INVALIDATE_L2_CACHE(1); |
| 62 | WREG32_MC(RS600_MC_PT0_CNTL, tmp); | 61 | WREG32_MC(R_000100_MC_PT0_CNTL, tmp); |
| 63 | 62 | ||
| 64 | tmp = RREG32_MC(RS600_MC_PT0_CNTL); | 63 | tmp = RREG32_MC(R_000100_MC_PT0_CNTL); |
| 65 | tmp &= ~(RS600_INVALIDATE_ALL_L1_TLBS | RS600_INVALIDATE_L2_CACHE); | 64 | tmp &= C_000100_INVALIDATE_ALL_L1_TLBS & C_000100_INVALIDATE_L2_CACHE; |
| 66 | WREG32_MC(RS600_MC_PT0_CNTL, tmp); | 65 | WREG32_MC(R_000100_MC_PT0_CNTL, tmp); |
| 67 | tmp = RREG32_MC(RS600_MC_PT0_CNTL); | 66 | tmp = RREG32_MC(R_000100_MC_PT0_CNTL); |
| 68 | } | 67 | } |
| 69 | 68 | ||
| 70 | int rs600_gart_init(struct radeon_device *rdev) | 69 | int rs600_gart_init(struct radeon_device *rdev) |
| @@ -86,7 +85,7 @@ int rs600_gart_init(struct radeon_device *rdev) | |||
| 86 | 85 | ||
| 87 | int rs600_gart_enable(struct radeon_device *rdev) | 86 | int rs600_gart_enable(struct radeon_device *rdev) |
| 88 | { | 87 | { |
| 89 | uint32_t tmp; | 88 | u32 tmp; |
| 90 | int r, i; | 89 | int r, i; |
| 91 | 90 | ||
| 92 | if (rdev->gart.table.vram.robj == NULL) { | 91 | if (rdev->gart.table.vram.robj == NULL) { |
| @@ -96,46 +95,50 @@ int rs600_gart_enable(struct radeon_device *rdev) | |||
| 96 | r = radeon_gart_table_vram_pin(rdev); | 95 | r = radeon_gart_table_vram_pin(rdev); |
| 97 | if (r) | 96 | if (r) |
| 98 | return r; | 97 | return r; |
| 98 | /* Enable bus master */ | ||
| 99 | tmp = RREG32(R_00004C_BUS_CNTL) & C_00004C_BUS_MASTER_DIS; | ||
| 100 | WREG32(R_00004C_BUS_CNTL, tmp); | ||
| 99 | /* FIXME: setup default page */ | 101 | /* FIXME: setup default page */ |
| 100 | WREG32_MC(RS600_MC_PT0_CNTL, | 102 | WREG32_MC(R_000100_MC_PT0_CNTL, |
| 101 | (RS600_EFFECTIVE_L2_CACHE_SIZE(6) | | 103 | (S_000100_EFFECTIVE_L2_CACHE_SIZE(6) | |
| 102 | RS600_EFFECTIVE_L2_QUEUE_SIZE(6))); | 104 | S_000100_EFFECTIVE_L2_QUEUE_SIZE(6))); |
| 103 | for (i = 0; i < 19; i++) { | 105 | for (i = 0; i < 19; i++) { |
| 104 | WREG32_MC(RS600_MC_PT0_CLIENT0_CNTL + i, | 106 | WREG32_MC(R_00016C_MC_PT0_CLIENT0_CNTL + i, |
| 105 | (RS600_ENABLE_TRANSLATION_MODE_OVERRIDE | | 107 | S_00016C_ENABLE_TRANSLATION_MODE_OVERRIDE(1) | |
| 106 | RS600_SYSTEM_ACCESS_MODE_IN_SYS | | 108 | S_00016C_SYSTEM_ACCESS_MODE_MASK( |
| 107 | RS600_SYSTEM_APERTURE_UNMAPPED_ACCESS_DEFAULT_PAGE | | 109 | V_00016C_SYSTEM_ACCESS_MODE_IN_SYS) | |
| 108 | RS600_EFFECTIVE_L1_CACHE_SIZE(3) | | 110 | S_00016C_SYSTEM_APERTURE_UNMAPPED_ACCESS( |
| 109 | RS600_ENABLE_FRAGMENT_PROCESSING | | 111 | V_00016C_SYSTEM_APERTURE_UNMAPPED_DEFAULT_PAGE) | |
| 110 | RS600_EFFECTIVE_L1_QUEUE_SIZE(3))); | 112 | S_00016C_EFFECTIVE_L1_CACHE_SIZE(1) | |
| 113 | S_00016C_ENABLE_FRAGMENT_PROCESSING(1) | | ||
| 114 | S_00016C_EFFECTIVE_L1_QUEUE_SIZE(1)); | ||
| 111 | } | 115 | } |
| 112 | 116 | ||
| 113 | /* System context map to GART space */ | 117 | /* System context map to GART space */ |
| 114 | WREG32_MC(RS600_MC_PT0_SYSTEM_APERTURE_LOW_ADDR, rdev->mc.gtt_location); | 118 | WREG32_MC(R_000112_MC_PT0_SYSTEM_APERTURE_LOW_ADDR, rdev->mc.gtt_start); |
| 115 | tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - 1; | 119 | WREG32_MC(R_000114_MC_PT0_SYSTEM_APERTURE_HIGH_ADDR, rdev->mc.gtt_end); |
| 116 | WREG32_MC(RS600_MC_PT0_SYSTEM_APERTURE_HIGH_ADDR, tmp); | ||
| 117 | 120 | ||
| 118 | /* enable first context */ | 121 | /* enable first context */ |
| 119 | WREG32_MC(RS600_MC_PT0_CONTEXT0_FLAT_START_ADDR, rdev->mc.gtt_location); | 122 | WREG32_MC(R_00013C_MC_PT0_CONTEXT0_FLAT_START_ADDR, rdev->mc.gtt_start); |
| 120 | tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - 1; | 123 | WREG32_MC(R_00014C_MC_PT0_CONTEXT0_FLAT_END_ADDR, rdev->mc.gtt_end); |
| 121 | WREG32_MC(RS600_MC_PT0_CONTEXT0_FLAT_END_ADDR, tmp); | 124 | WREG32_MC(R_000102_MC_PT0_CONTEXT0_CNTL, |
| 122 | WREG32_MC(RS600_MC_PT0_CONTEXT0_CNTL, | 125 | S_000102_ENABLE_PAGE_TABLE(1) | |
| 123 | (RS600_ENABLE_PAGE_TABLE | RS600_PAGE_TABLE_TYPE_FLAT)); | 126 | S_000102_PAGE_TABLE_DEPTH(V_000102_PAGE_TABLE_FLAT)); |
| 124 | /* disable all other contexts */ | 127 | /* disable all other contexts */ |
| 125 | for (i = 1; i < 8; i++) { | 128 | for (i = 1; i < 8; i++) { |
| 126 | WREG32_MC(RS600_MC_PT0_CONTEXT0_CNTL + i, 0); | 129 | WREG32_MC(R_000102_MC_PT0_CONTEXT0_CNTL + i, 0); |
| 127 | } | 130 | } |
| 128 | 131 | ||
| 129 | /* setup the page table */ | 132 | /* setup the page table */ |
| 130 | WREG32_MC(RS600_MC_PT0_CONTEXT0_FLAT_BASE_ADDR, | 133 | WREG32_MC(R_00012C_MC_PT0_CONTEXT0_FLAT_BASE_ADDR, |
| 131 | rdev->gart.table_addr); | 134 | rdev->gart.table_addr); |
| 132 | WREG32_MC(RS600_MC_PT0_CONTEXT0_DEFAULT_READ_ADDR, 0); | 135 | WREG32_MC(R_00011C_MC_PT0_CONTEXT0_DEFAULT_READ_ADDR, 0); |
| 133 | 136 | ||
| 134 | /* enable page tables */ | 137 | /* enable page tables */ |
| 135 | tmp = RREG32_MC(RS600_MC_PT0_CNTL); | 138 | tmp = RREG32_MC(R_000100_MC_PT0_CNTL); |
| 136 | WREG32_MC(RS600_MC_PT0_CNTL, (tmp | RS600_ENABLE_PT)); | 139 | WREG32_MC(R_000100_MC_PT0_CNTL, (tmp | S_000100_ENABLE_PT(1))); |
| 137 | tmp = RREG32_MC(RS600_MC_CNTL1); | 140 | tmp = RREG32_MC(R_000009_MC_CNTL1); |
| 138 | WREG32_MC(RS600_MC_CNTL1, (tmp | RS600_ENABLE_PAGE_TABLES)); | 141 | WREG32_MC(R_000009_MC_CNTL1, (tmp | S_000009_ENABLE_PAGE_TABLES(1))); |
| 139 | rs600_gart_tlb_flush(rdev); | 142 | rs600_gart_tlb_flush(rdev); |
| 140 | rdev->gart.ready = true; | 143 | rdev->gart.ready = true; |
| 141 | return 0; | 144 | return 0; |
| @@ -146,10 +149,9 @@ void rs600_gart_disable(struct radeon_device *rdev) | |||
| 146 | uint32_t tmp; | 149 | uint32_t tmp; |
| 147 | 150 | ||
| 148 | /* FIXME: disable out of gart access */ | 151 | /* FIXME: disable out of gart access */ |
| 149 | WREG32_MC(RS600_MC_PT0_CNTL, 0); | 152 | WREG32_MC(R_000100_MC_PT0_CNTL, 0); |
| 150 | tmp = RREG32_MC(RS600_MC_CNTL1); | 153 | tmp = RREG32_MC(R_000009_MC_CNTL1); |
| 151 | tmp &= ~RS600_ENABLE_PAGE_TABLES; | 154 | WREG32_MC(R_000009_MC_CNTL1, tmp & C_000009_ENABLE_PAGE_TABLES); |
| 152 | WREG32_MC(RS600_MC_CNTL1, tmp); | ||
| 153 | if (rdev->gart.table.vram.robj) { | 155 | if (rdev->gart.table.vram.robj) { |
| 154 | radeon_object_kunmap(rdev->gart.table.vram.robj); | 156 | radeon_object_kunmap(rdev->gart.table.vram.robj); |
| 155 | radeon_object_unpin(rdev->gart.table.vram.robj); | 157 | radeon_object_unpin(rdev->gart.table.vram.robj); |
| @@ -183,129 +185,61 @@ int rs600_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr) | |||
| 183 | return 0; | 185 | return 0; |
| 184 | } | 186 | } |
| 185 | 187 | ||
| 186 | |||
| 187 | /* | ||
| 188 | * MC. | ||
| 189 | */ | ||
| 190 | void rs600_mc_disable_clients(struct radeon_device *rdev) | ||
| 191 | { | ||
| 192 | unsigned tmp; | ||
| 193 | |||
| 194 | if (r100_gui_wait_for_idle(rdev)) { | ||
| 195 | printk(KERN_WARNING "Failed to wait GUI idle while " | ||
| 196 | "programming pipes. Bad things might happen.\n"); | ||
| 197 | } | ||
| 198 | |||
| 199 | rv515_vga_render_disable(rdev); | ||
| 200 | |||
| 201 | tmp = RREG32(AVIVO_D1VGA_CONTROL); | ||
| 202 | WREG32(AVIVO_D1VGA_CONTROL, tmp & ~AVIVO_DVGA_CONTROL_MODE_ENABLE); | ||
| 203 | tmp = RREG32(AVIVO_D2VGA_CONTROL); | ||
| 204 | WREG32(AVIVO_D2VGA_CONTROL, tmp & ~AVIVO_DVGA_CONTROL_MODE_ENABLE); | ||
| 205 | |||
| 206 | tmp = RREG32(AVIVO_D1CRTC_CONTROL); | ||
| 207 | WREG32(AVIVO_D1CRTC_CONTROL, tmp & ~AVIVO_CRTC_EN); | ||
| 208 | tmp = RREG32(AVIVO_D2CRTC_CONTROL); | ||
| 209 | WREG32(AVIVO_D2CRTC_CONTROL, tmp & ~AVIVO_CRTC_EN); | ||
| 210 | |||
| 211 | /* make sure all previous write got through */ | ||
| 212 | tmp = RREG32(AVIVO_D2CRTC_CONTROL); | ||
| 213 | |||
| 214 | mdelay(1); | ||
| 215 | } | ||
| 216 | |||
| 217 | int rs600_mc_init(struct radeon_device *rdev) | ||
| 218 | { | ||
| 219 | uint32_t tmp; | ||
| 220 | int r; | ||
| 221 | |||
| 222 | if (r100_debugfs_rbbm_init(rdev)) { | ||
| 223 | DRM_ERROR("Failed to register debugfs file for RBBM !\n"); | ||
| 224 | } | ||
| 225 | |||
| 226 | rs600_gpu_init(rdev); | ||
| 227 | rs600_gart_disable(rdev); | ||
| 228 | |||
| 229 | /* Setup GPU memory space */ | ||
| 230 | rdev->mc.vram_location = 0xFFFFFFFFUL; | ||
| 231 | rdev->mc.gtt_location = 0xFFFFFFFFUL; | ||
| 232 | r = radeon_mc_setup(rdev); | ||
| 233 | if (r) { | ||
| 234 | return r; | ||
| 235 | } | ||
| 236 | |||
| 237 | /* Program GPU memory space */ | ||
| 238 | /* Enable bus master */ | ||
| 239 | tmp = RREG32(RADEON_BUS_CNTL) & ~RS600_BUS_MASTER_DIS; | ||
| 240 | WREG32(RADEON_BUS_CNTL, tmp); | ||
| 241 | /* FIXME: What does AGP means for such chipset ? */ | ||
| 242 | WREG32_MC(RS600_MC_AGP_LOCATION, 0x0FFFFFFF); | ||
| 243 | /* FIXME: are this AGP reg in indirect MC range ? */ | ||
| 244 | WREG32_MC(RS600_MC_AGP_BASE, 0); | ||
| 245 | WREG32_MC(RS600_MC_AGP_BASE_2, 0); | ||
| 246 | rs600_mc_disable_clients(rdev); | ||
| 247 | if (rs600_mc_wait_for_idle(rdev)) { | ||
| 248 | printk(KERN_WARNING "Failed to wait MC idle while " | ||
| 249 | "programming pipes. Bad things might happen.\n"); | ||
| 250 | } | ||
| 251 | tmp = rdev->mc.vram_location + rdev->mc.mc_vram_size - 1; | ||
| 252 | tmp = REG_SET(RS600_MC_FB_TOP, tmp >> 16); | ||
| 253 | tmp |= REG_SET(RS600_MC_FB_START, rdev->mc.vram_location >> 16); | ||
| 254 | WREG32_MC(RS600_MC_FB_LOCATION, tmp); | ||
| 255 | WREG32(RS690_HDP_FB_LOCATION, rdev->mc.vram_location >> 16); | ||
| 256 | return 0; | ||
| 257 | } | ||
| 258 | |||
| 259 | void rs600_mc_fini(struct radeon_device *rdev) | ||
| 260 | { | ||
| 261 | } | ||
| 262 | |||
| 263 | |||
| 264 | /* | ||
| 265 | * Interrupts | ||
| 266 | */ | ||
| 267 | int rs600_irq_set(struct radeon_device *rdev) | 188 | int rs600_irq_set(struct radeon_device *rdev) |
| 268 | { | 189 | { |
| 269 | uint32_t tmp = 0; | 190 | uint32_t tmp = 0; |
| 270 | uint32_t mode_int = 0; | 191 | uint32_t mode_int = 0; |
| 271 | 192 | ||
| 272 | if (rdev->irq.sw_int) { | 193 | if (rdev->irq.sw_int) { |
| 273 | tmp |= RADEON_SW_INT_ENABLE; | 194 | tmp |= S_000040_SW_INT_EN(1); |
| 274 | } | 195 | } |
| 275 | if (rdev->irq.crtc_vblank_int[0]) { | 196 | if (rdev->irq.crtc_vblank_int[0]) { |
| 276 | mode_int |= AVIVO_D1MODE_INT_MASK; | 197 | mode_int |= S_006540_D1MODE_VBLANK_INT_MASK(1); |
| 277 | } | 198 | } |
| 278 | if (rdev->irq.crtc_vblank_int[1]) { | 199 | if (rdev->irq.crtc_vblank_int[1]) { |
| 279 | mode_int |= AVIVO_D2MODE_INT_MASK; | 200 | mode_int |= S_006540_D2MODE_VBLANK_INT_MASK(1); |
| 280 | } | 201 | } |
| 281 | WREG32(RADEON_GEN_INT_CNTL, tmp); | 202 | WREG32(R_000040_GEN_INT_CNTL, tmp); |
| 282 | WREG32(AVIVO_DxMODE_INT_MASK, mode_int); | 203 | WREG32(R_006540_DxMODE_INT_MASK, mode_int); |
| 283 | return 0; | 204 | return 0; |
| 284 | } | 205 | } |
| 285 | 206 | ||
| 286 | static inline uint32_t rs600_irq_ack(struct radeon_device *rdev, u32 *r500_disp_int) | 207 | static inline uint32_t rs600_irq_ack(struct radeon_device *rdev, u32 *r500_disp_int) |
| 287 | { | 208 | { |
| 288 | uint32_t irqs = RREG32(RADEON_GEN_INT_STATUS); | 209 | uint32_t irqs = RREG32(R_000044_GEN_INT_STATUS); |
| 289 | uint32_t irq_mask = RADEON_SW_INT_TEST; | 210 | uint32_t irq_mask = ~C_000044_SW_INT; |
| 290 | 211 | ||
| 291 | if (irqs & AVIVO_DISPLAY_INT_STATUS) { | 212 | if (G_000044_DISPLAY_INT_STAT(irqs)) { |
| 292 | *r500_disp_int = RREG32(AVIVO_DISP_INTERRUPT_STATUS); | 213 | *r500_disp_int = RREG32(R_007EDC_DISP_INTERRUPT_STATUS); |
| 293 | if (*r500_disp_int & AVIVO_D1_VBLANK_INTERRUPT) { | 214 | if (G_007EDC_LB_D1_VBLANK_INTERRUPT(*r500_disp_int)) { |
| 294 | WREG32(AVIVO_D1MODE_VBLANK_STATUS, AVIVO_VBLANK_ACK); | 215 | WREG32(R_006534_D1MODE_VBLANK_STATUS, |
| 216 | S_006534_D1MODE_VBLANK_ACK(1)); | ||
| 295 | } | 217 | } |
| 296 | if (*r500_disp_int & AVIVO_D2_VBLANK_INTERRUPT) { | 218 | if (G_007EDC_LB_D2_VBLANK_INTERRUPT(*r500_disp_int)) { |
| 297 | WREG32(AVIVO_D2MODE_VBLANK_STATUS, AVIVO_VBLANK_ACK); | 219 | WREG32(R_006D34_D2MODE_VBLANK_STATUS, |
| 220 | S_006D34_D2MODE_VBLANK_ACK(1)); | ||
| 298 | } | 221 | } |
| 299 | } else { | 222 | } else { |
| 300 | *r500_disp_int = 0; | 223 | *r500_disp_int = 0; |
| 301 | } | 224 | } |
| 302 | 225 | ||
| 303 | if (irqs) { | 226 | if (irqs) { |
| 304 | WREG32(RADEON_GEN_INT_STATUS, irqs); | 227 | WREG32(R_000044_GEN_INT_STATUS, irqs); |
| 305 | } | 228 | } |
| 306 | return irqs & irq_mask; | 229 | return irqs & irq_mask; |
| 307 | } | 230 | } |
| 308 | 231 | ||
| 232 | void rs600_irq_disable(struct radeon_device *rdev) | ||
| 233 | { | ||
| 234 | u32 tmp; | ||
| 235 | |||
| 236 | WREG32(R_000040_GEN_INT_CNTL, 0); | ||
| 237 | WREG32(R_006540_DxMODE_INT_MASK, 0); | ||
| 238 | /* Wait and acknowledge irq */ | ||
| 239 | mdelay(1); | ||
| 240 | rs600_irq_ack(rdev, &tmp); | ||
| 241 | } | ||
| 242 | |||
| 309 | int rs600_irq_process(struct radeon_device *rdev) | 243 | int rs600_irq_process(struct radeon_device *rdev) |
| 310 | { | 244 | { |
| 311 | uint32_t status; | 245 | uint32_t status; |
| @@ -317,16 +251,13 @@ int rs600_irq_process(struct radeon_device *rdev) | |||
| 317 | } | 251 | } |
| 318 | while (status || r500_disp_int) { | 252 | while (status || r500_disp_int) { |
| 319 | /* SW interrupt */ | 253 | /* SW interrupt */ |
| 320 | if (status & RADEON_SW_INT_TEST) { | 254 | if (G_000040_SW_INT_EN(status)) |
| 321 | radeon_fence_process(rdev); | 255 | radeon_fence_process(rdev); |
| 322 | } | ||
| 323 | /* Vertical blank interrupts */ | 256 | /* Vertical blank interrupts */ |
| 324 | if (r500_disp_int & AVIVO_D1_VBLANK_INTERRUPT) { | 257 | if (G_007EDC_LB_D1_VBLANK_INTERRUPT(r500_disp_int)) |
| 325 | drm_handle_vblank(rdev->ddev, 0); | 258 | drm_handle_vblank(rdev->ddev, 0); |
| 326 | } | 259 | if (G_007EDC_LB_D2_VBLANK_INTERRUPT(r500_disp_int)) |
| 327 | if (r500_disp_int & AVIVO_D2_VBLANK_INTERRUPT) { | ||
| 328 | drm_handle_vblank(rdev->ddev, 1); | 260 | drm_handle_vblank(rdev->ddev, 1); |
| 329 | } | ||
| 330 | status = rs600_irq_ack(rdev, &r500_disp_int); | 261 | status = rs600_irq_ack(rdev, &r500_disp_int); |
| 331 | } | 262 | } |
| 332 | return IRQ_HANDLED; | 263 | return IRQ_HANDLED; |
| @@ -335,53 +266,34 @@ int rs600_irq_process(struct radeon_device *rdev) | |||
| 335 | u32 rs600_get_vblank_counter(struct radeon_device *rdev, int crtc) | 266 | u32 rs600_get_vblank_counter(struct radeon_device *rdev, int crtc) |
| 336 | { | 267 | { |
| 337 | if (crtc == 0) | 268 | if (crtc == 0) |
| 338 | return RREG32(AVIVO_D1CRTC_FRAME_COUNT); | 269 | return RREG32(R_0060A4_D1CRTC_STATUS_FRAME_COUNT); |
| 339 | else | 270 | else |
| 340 | return RREG32(AVIVO_D2CRTC_FRAME_COUNT); | 271 | return RREG32(R_0068A4_D2CRTC_STATUS_FRAME_COUNT); |
| 341 | } | 272 | } |
| 342 | 273 | ||
| 343 | |||
| 344 | /* | ||
| 345 | * Global GPU functions | ||
| 346 | */ | ||
| 347 | int rs600_mc_wait_for_idle(struct radeon_device *rdev) | 274 | int rs600_mc_wait_for_idle(struct radeon_device *rdev) |
| 348 | { | 275 | { |
| 349 | unsigned i; | 276 | unsigned i; |
| 350 | uint32_t tmp; | ||
| 351 | 277 | ||
| 352 | for (i = 0; i < rdev->usec_timeout; i++) { | 278 | for (i = 0; i < rdev->usec_timeout; i++) { |
| 353 | /* read MC_STATUS */ | 279 | if (G_000000_MC_IDLE(RREG32_MC(R_000000_MC_STATUS))) |
| 354 | tmp = RREG32_MC(RS600_MC_STATUS); | ||
| 355 | if (tmp & RS600_MC_STATUS_IDLE) { | ||
| 356 | return 0; | 280 | return 0; |
| 357 | } | 281 | udelay(1); |
| 358 | DRM_UDELAY(1); | ||
| 359 | } | 282 | } |
| 360 | return -1; | 283 | return -1; |
| 361 | } | 284 | } |
| 362 | 285 | ||
| 363 | void rs600_errata(struct radeon_device *rdev) | ||
| 364 | { | ||
| 365 | rdev->pll_errata = 0; | ||
| 366 | } | ||
| 367 | |||
| 368 | void rs600_gpu_init(struct radeon_device *rdev) | 286 | void rs600_gpu_init(struct radeon_device *rdev) |
| 369 | { | 287 | { |
| 370 | /* FIXME: HDP same place on rs600 ? */ | 288 | /* FIXME: HDP same place on rs600 ? */ |
| 371 | r100_hdp_reset(rdev); | 289 | r100_hdp_reset(rdev); |
| 372 | rv515_vga_render_disable(rdev); | ||
| 373 | /* FIXME: is this correct ? */ | 290 | /* FIXME: is this correct ? */ |
| 374 | r420_pipes_init(rdev); | 291 | r420_pipes_init(rdev); |
| 375 | if (rs600_mc_wait_for_idle(rdev)) { | 292 | /* Wait for mc idle */ |
| 376 | printk(KERN_WARNING "Failed to wait MC idle while " | 293 | if (rs600_mc_wait_for_idle(rdev)) |
| 377 | "programming pipes. Bad things might happen.\n"); | 294 | dev_warn(rdev->dev, "Wait MC idle timeout before updating MC.\n"); |
| 378 | } | ||
| 379 | } | 295 | } |
| 380 | 296 | ||
| 381 | |||
| 382 | /* | ||
| 383 | * VRAM info. | ||
| 384 | */ | ||
| 385 | void rs600_vram_info(struct radeon_device *rdev) | 297 | void rs600_vram_info(struct radeon_device *rdev) |
| 386 | { | 298 | { |
| 387 | /* FIXME: to do or is these values sane ? */ | 299 | /* FIXME: to do or is these values sane ? */ |
| @@ -394,31 +306,206 @@ void rs600_bandwidth_update(struct radeon_device *rdev) | |||
| 394 | /* FIXME: implement, should this be like rs690 ? */ | 306 | /* FIXME: implement, should this be like rs690 ? */ |
| 395 | } | 307 | } |
| 396 | 308 | ||
| 397 | |||
| 398 | /* | ||
| 399 | * Indirect registers accessor | ||
| 400 | */ | ||
| 401 | uint32_t rs600_mc_rreg(struct radeon_device *rdev, uint32_t reg) | 309 | uint32_t rs600_mc_rreg(struct radeon_device *rdev, uint32_t reg) |
| 402 | { | 310 | { |
| 403 | uint32_t r; | 311 | WREG32(R_000070_MC_IND_INDEX, S_000070_MC_IND_ADDR(reg) | |
| 404 | 312 | S_000070_MC_IND_CITF_ARB0(1)); | |
| 405 | WREG32(RS600_MC_INDEX, | 313 | return RREG32(R_000074_MC_IND_DATA); |
| 406 | ((reg & RS600_MC_ADDR_MASK) | RS600_MC_IND_CITF_ARB0)); | ||
| 407 | r = RREG32(RS600_MC_DATA); | ||
| 408 | return r; | ||
| 409 | } | 314 | } |
| 410 | 315 | ||
| 411 | void rs600_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) | 316 | void rs600_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) |
| 412 | { | 317 | { |
| 413 | WREG32(RS600_MC_INDEX, | 318 | WREG32(R_000070_MC_IND_INDEX, S_000070_MC_IND_ADDR(reg) | |
| 414 | RS600_MC_IND_WR_EN | RS600_MC_IND_CITF_ARB0 | | 319 | S_000070_MC_IND_CITF_ARB0(1) | S_000070_MC_IND_WR_EN(1)); |
| 415 | ((reg) & RS600_MC_ADDR_MASK)); | 320 | WREG32(R_000074_MC_IND_DATA, v); |
| 416 | WREG32(RS600_MC_DATA, v); | ||
| 417 | } | 321 | } |
| 418 | 322 | ||
| 419 | int rs600_init(struct radeon_device *rdev) | 323 | void rs600_debugfs(struct radeon_device *rdev) |
| 324 | { | ||
| 325 | if (r100_debugfs_rbbm_init(rdev)) | ||
| 326 | DRM_ERROR("Failed to register debugfs file for RBBM !\n"); | ||
| 327 | } | ||
| 328 | |||
| 329 | void rs600_set_safe_registers(struct radeon_device *rdev) | ||
| 420 | { | 330 | { |
| 421 | rdev->config.r300.reg_safe_bm = rs600_reg_safe_bm; | 331 | rdev->config.r300.reg_safe_bm = rs600_reg_safe_bm; |
| 422 | rdev->config.r300.reg_safe_bm_size = ARRAY_SIZE(rs600_reg_safe_bm); | 332 | rdev->config.r300.reg_safe_bm_size = ARRAY_SIZE(rs600_reg_safe_bm); |
| 333 | } | ||
| 334 | |||
| 335 | static void rs600_mc_program(struct radeon_device *rdev) | ||
| 336 | { | ||
| 337 | struct rv515_mc_save save; | ||
| 338 | |||
| 339 | /* Stops all mc clients */ | ||
| 340 | rv515_mc_stop(rdev, &save); | ||
| 341 | |||
| 342 | /* Wait for mc idle */ | ||
| 343 | if (rs600_mc_wait_for_idle(rdev)) | ||
| 344 | dev_warn(rdev->dev, "Wait MC idle timeout before updating MC.\n"); | ||
| 345 | |||
| 346 | /* FIXME: What does AGP means for such chipset ? */ | ||
| 347 | WREG32_MC(R_000005_MC_AGP_LOCATION, 0x0FFFFFFF); | ||
| 348 | WREG32_MC(R_000006_AGP_BASE, 0); | ||
| 349 | WREG32_MC(R_000007_AGP_BASE_2, 0); | ||
| 350 | /* Program MC */ | ||
| 351 | WREG32_MC(R_000004_MC_FB_LOCATION, | ||
| 352 | S_000004_MC_FB_START(rdev->mc.vram_start >> 16) | | ||
| 353 | S_000004_MC_FB_TOP(rdev->mc.vram_end >> 16)); | ||
| 354 | WREG32(R_000134_HDP_FB_LOCATION, | ||
| 355 | S_000134_HDP_FB_START(rdev->mc.vram_start >> 16)); | ||
| 356 | |||
| 357 | rv515_mc_resume(rdev, &save); | ||
| 358 | } | ||
| 359 | |||
| 360 | static int rs600_startup(struct radeon_device *rdev) | ||
| 361 | { | ||
| 362 | int r; | ||
| 363 | |||
| 364 | rs600_mc_program(rdev); | ||
| 365 | /* Resume clock */ | ||
| 366 | rv515_clock_startup(rdev); | ||
| 367 | /* Initialize GPU configuration (# pipes, ...) */ | ||
| 368 | rs600_gpu_init(rdev); | ||
| 369 | /* Initialize GART (initialize after TTM so we can allocate | ||
| 370 | * memory through TTM but finalize after TTM) */ | ||
| 371 | r = rs600_gart_enable(rdev); | ||
| 372 | if (r) | ||
| 373 | return r; | ||
| 374 | /* Enable IRQ */ | ||
| 375 | rdev->irq.sw_int = true; | ||
| 376 | rs600_irq_set(rdev); | ||
| 377 | /* 1M ring buffer */ | ||
| 378 | r = r100_cp_init(rdev, 1024 * 1024); | ||
| 379 | if (r) { | ||
| 380 | dev_err(rdev->dev, "failled initializing CP (%d).\n", r); | ||
| 381 | return r; | ||
| 382 | } | ||
| 383 | r = r100_wb_init(rdev); | ||
| 384 | if (r) | ||
| 385 | dev_err(rdev->dev, "failled initializing WB (%d).\n", r); | ||
| 386 | r = r100_ib_init(rdev); | ||
| 387 | if (r) { | ||
| 388 | dev_err(rdev->dev, "failled initializing IB (%d).\n", r); | ||
| 389 | return r; | ||
| 390 | } | ||
| 391 | return 0; | ||
| 392 | } | ||
| 393 | |||
| 394 | int rs600_resume(struct radeon_device *rdev) | ||
| 395 | { | ||
| 396 | /* Make sur GART are not working */ | ||
| 397 | rs600_gart_disable(rdev); | ||
| 398 | /* Resume clock before doing reset */ | ||
| 399 | rv515_clock_startup(rdev); | ||
| 400 | /* Reset gpu before posting otherwise ATOM will enter infinite loop */ | ||
| 401 | if (radeon_gpu_reset(rdev)) { | ||
| 402 | dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", | ||
| 403 | RREG32(R_000E40_RBBM_STATUS), | ||
| 404 | RREG32(R_0007C0_CP_STAT)); | ||
| 405 | } | ||
| 406 | /* post */ | ||
| 407 | atom_asic_init(rdev->mode_info.atom_context); | ||
| 408 | /* Resume clock after posting */ | ||
| 409 | rv515_clock_startup(rdev); | ||
| 410 | return rs600_startup(rdev); | ||
| 411 | } | ||
| 412 | |||
| 413 | int rs600_suspend(struct radeon_device *rdev) | ||
| 414 | { | ||
| 415 | r100_cp_disable(rdev); | ||
| 416 | r100_wb_disable(rdev); | ||
| 417 | rs600_irq_disable(rdev); | ||
| 418 | rs600_gart_disable(rdev); | ||
| 419 | return 0; | ||
| 420 | } | ||
| 421 | |||
| 422 | void rs600_fini(struct radeon_device *rdev) | ||
| 423 | { | ||
| 424 | rs600_suspend(rdev); | ||
| 425 | r100_cp_fini(rdev); | ||
| 426 | r100_wb_fini(rdev); | ||
| 427 | r100_ib_fini(rdev); | ||
| 428 | radeon_gem_fini(rdev); | ||
| 429 | rs600_gart_fini(rdev); | ||
| 430 | radeon_irq_kms_fini(rdev); | ||
| 431 | radeon_fence_driver_fini(rdev); | ||
| 432 | radeon_object_fini(rdev); | ||
| 433 | radeon_atombios_fini(rdev); | ||
| 434 | kfree(rdev->bios); | ||
| 435 | rdev->bios = NULL; | ||
| 436 | } | ||
| 437 | |||
| 438 | int rs600_init(struct radeon_device *rdev) | ||
| 439 | { | ||
| 440 | int r; | ||
| 441 | |||
| 442 | /* Disable VGA */ | ||
| 443 | rv515_vga_render_disable(rdev); | ||
| 444 | /* Initialize scratch registers */ | ||
| 445 | radeon_scratch_init(rdev); | ||
| 446 | /* Initialize surface registers */ | ||
| 447 | radeon_surface_init(rdev); | ||
| 448 | /* BIOS */ | ||
| 449 | if (!radeon_get_bios(rdev)) { | ||
| 450 | if (ASIC_IS_AVIVO(rdev)) | ||
| 451 | return -EINVAL; | ||
| 452 | } | ||
| 453 | if (rdev->is_atom_bios) { | ||
| 454 | r = radeon_atombios_init(rdev); | ||
| 455 | if (r) | ||
| 456 | return r; | ||
| 457 | } else { | ||
| 458 | dev_err(rdev->dev, "Expecting atombios for RS600 GPU\n"); | ||
| 459 | return -EINVAL; | ||
| 460 | } | ||
| 461 | /* Reset gpu before posting otherwise ATOM will enter infinite loop */ | ||
| 462 | if (radeon_gpu_reset(rdev)) { | ||
| 463 | dev_warn(rdev->dev, | ||
| 464 | "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", | ||
| 465 | RREG32(R_000E40_RBBM_STATUS), | ||
| 466 | RREG32(R_0007C0_CP_STAT)); | ||
| 467 | } | ||
| 468 | /* check if cards are posted or not */ | ||
| 469 | if (!radeon_card_posted(rdev) && rdev->bios) { | ||
| 470 | DRM_INFO("GPU not posted. posting now...\n"); | ||
| 471 | atom_asic_init(rdev->mode_info.atom_context); | ||
| 472 | } | ||
| 473 | /* Initialize clocks */ | ||
| 474 | radeon_get_clock_info(rdev->ddev); | ||
| 475 | /* Get vram informations */ | ||
| 476 | rs600_vram_info(rdev); | ||
| 477 | /* Initialize memory controller (also test AGP) */ | ||
| 478 | r = r420_mc_init(rdev); | ||
| 479 | if (r) | ||
| 480 | return r; | ||
| 481 | rs600_debugfs(rdev); | ||
| 482 | /* Fence driver */ | ||
| 483 | r = radeon_fence_driver_init(rdev); | ||
| 484 | if (r) | ||
| 485 | return r; | ||
| 486 | r = radeon_irq_kms_init(rdev); | ||
| 487 | if (r) | ||
| 488 | return r; | ||
| 489 | /* Memory manager */ | ||
| 490 | r = radeon_object_init(rdev); | ||
| 491 | if (r) | ||
| 492 | return r; | ||
| 493 | r = rs600_gart_init(rdev); | ||
| 494 | if (r) | ||
| 495 | return r; | ||
| 496 | rs600_set_safe_registers(rdev); | ||
| 497 | rdev->accel_working = true; | ||
| 498 | r = rs600_startup(rdev); | ||
| 499 | if (r) { | ||
| 500 | /* Somethings want wront with the accel init stop accel */ | ||
| 501 | dev_err(rdev->dev, "Disabling GPU acceleration\n"); | ||
| 502 | rs600_suspend(rdev); | ||
| 503 | r100_cp_fini(rdev); | ||
| 504 | r100_wb_fini(rdev); | ||
| 505 | r100_ib_fini(rdev); | ||
| 506 | rs600_gart_fini(rdev); | ||
| 507 | radeon_irq_kms_fini(rdev); | ||
| 508 | rdev->accel_working = false; | ||
| 509 | } | ||
| 423 | return 0; | 510 | return 0; |
| 424 | } | 511 | } |
diff --git a/drivers/gpu/drm/radeon/rs600d.h b/drivers/gpu/drm/radeon/rs600d.h new file mode 100644 index 000000000000..81308924859a --- /dev/null +++ b/drivers/gpu/drm/radeon/rs600d.h | |||
| @@ -0,0 +1,470 @@ | |||
| 1 | /* | ||
| 2 | * Copyright 2008 Advanced Micro Devices, Inc. | ||
| 3 | * Copyright 2008 Red Hat Inc. | ||
| 4 | * Copyright 2009 Jerome Glisse. | ||
| 5 | * | ||
| 6 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
| 7 | * copy of this software and associated documentation files (the "Software"), | ||
| 8 | * to deal in the Software without restriction, including without limitation | ||
| 9 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
| 10 | * and/or sell copies of the Software, and to permit persons to whom the | ||
| 11 | * Software is furnished to do so, subject to the following conditions: | ||
| 12 | * | ||
| 13 | * The above copyright notice and this permission notice shall be included in | ||
| 14 | * all copies or substantial portions of the Software. | ||
| 15 | * | ||
| 16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
| 17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
| 18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
| 19 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
| 20 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
| 21 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
| 22 | * OTHER DEALINGS IN THE SOFTWARE. | ||
| 23 | * | ||
| 24 | * Authors: Dave Airlie | ||
| 25 | * Alex Deucher | ||
| 26 | * Jerome Glisse | ||
| 27 | */ | ||
| 28 | #ifndef __RS600D_H__ | ||
| 29 | #define __RS600D_H__ | ||
| 30 | |||
| 31 | /* Registers */ | ||
| 32 | #define R_000040_GEN_INT_CNTL 0x000040 | ||
| 33 | #define S_000040_DISPLAY_INT_STATUS(x) (((x) & 0x1) << 0) | ||
| 34 | #define G_000040_DISPLAY_INT_STATUS(x) (((x) >> 0) & 0x1) | ||
| 35 | #define C_000040_DISPLAY_INT_STATUS 0xFFFFFFFE | ||
| 36 | #define S_000040_DMA_VIPH0_INT_EN(x) (((x) & 0x1) << 12) | ||
| 37 | #define G_000040_DMA_VIPH0_INT_EN(x) (((x) >> 12) & 0x1) | ||
| 38 | #define C_000040_DMA_VIPH0_INT_EN 0xFFFFEFFF | ||
| 39 | #define S_000040_CRTC2_VSYNC(x) (((x) & 0x1) << 6) | ||
| 40 | #define G_000040_CRTC2_VSYNC(x) (((x) >> 6) & 0x1) | ||
| 41 | #define C_000040_CRTC2_VSYNC 0xFFFFFFBF | ||
| 42 | #define S_000040_SNAPSHOT2(x) (((x) & 0x1) << 7) | ||
| 43 | #define G_000040_SNAPSHOT2(x) (((x) >> 7) & 0x1) | ||
| 44 | #define C_000040_SNAPSHOT2 0xFFFFFF7F | ||
| 45 | #define S_000040_CRTC2_VBLANK(x) (((x) & 0x1) << 9) | ||
| 46 | #define G_000040_CRTC2_VBLANK(x) (((x) >> 9) & 0x1) | ||
| 47 | #define C_000040_CRTC2_VBLANK 0xFFFFFDFF | ||
| 48 | #define S_000040_FP2_DETECT(x) (((x) & 0x1) << 10) | ||
| 49 | #define G_000040_FP2_DETECT(x) (((x) >> 10) & 0x1) | ||
| 50 | #define C_000040_FP2_DETECT 0xFFFFFBFF | ||
| 51 | #define S_000040_VSYNC_DIFF_OVER_LIMIT(x) (((x) & 0x1) << 11) | ||
| 52 | #define G_000040_VSYNC_DIFF_OVER_LIMIT(x) (((x) >> 11) & 0x1) | ||
| 53 | #define C_000040_VSYNC_DIFF_OVER_LIMIT 0xFFFFF7FF | ||
| 54 | #define S_000040_DMA_VIPH1_INT_EN(x) (((x) & 0x1) << 13) | ||
| 55 | #define G_000040_DMA_VIPH1_INT_EN(x) (((x) >> 13) & 0x1) | ||
| 56 | #define C_000040_DMA_VIPH1_INT_EN 0xFFFFDFFF | ||
| 57 | #define S_000040_DMA_VIPH2_INT_EN(x) (((x) & 0x1) << 14) | ||
| 58 | #define G_000040_DMA_VIPH2_INT_EN(x) (((x) >> 14) & 0x1) | ||
| 59 | #define C_000040_DMA_VIPH2_INT_EN 0xFFFFBFFF | ||
| 60 | #define S_000040_DMA_VIPH3_INT_EN(x) (((x) & 0x1) << 15) | ||
| 61 | #define G_000040_DMA_VIPH3_INT_EN(x) (((x) >> 15) & 0x1) | ||
| 62 | #define C_000040_DMA_VIPH3_INT_EN 0xFFFF7FFF | ||
| 63 | #define S_000040_I2C_INT_EN(x) (((x) & 0x1) << 17) | ||
| 64 | #define G_000040_I2C_INT_EN(x) (((x) >> 17) & 0x1) | ||
| 65 | #define C_000040_I2C_INT_EN 0xFFFDFFFF | ||
| 66 | #define S_000040_GUI_IDLE(x) (((x) & 0x1) << 19) | ||
| 67 | #define G_000040_GUI_IDLE(x) (((x) >> 19) & 0x1) | ||
| 68 | #define C_000040_GUI_IDLE 0xFFF7FFFF | ||
| 69 | #define S_000040_VIPH_INT_EN(x) (((x) & 0x1) << 24) | ||
| 70 | #define G_000040_VIPH_INT_EN(x) (((x) >> 24) & 0x1) | ||
| 71 | #define C_000040_VIPH_INT_EN 0xFEFFFFFF | ||
| 72 | #define S_000040_SW_INT_EN(x) (((x) & 0x1) << 25) | ||
| 73 | #define G_000040_SW_INT_EN(x) (((x) >> 25) & 0x1) | ||
| 74 | #define C_000040_SW_INT_EN 0xFDFFFFFF | ||
| 75 | #define S_000040_GEYSERVILLE(x) (((x) & 0x1) << 27) | ||
| 76 | #define G_000040_GEYSERVILLE(x) (((x) >> 27) & 0x1) | ||
| 77 | #define C_000040_GEYSERVILLE 0xF7FFFFFF | ||
| 78 | #define S_000040_HDCP_AUTHORIZED_INT(x) (((x) & 0x1) << 28) | ||
| 79 | #define G_000040_HDCP_AUTHORIZED_INT(x) (((x) >> 28) & 0x1) | ||
| 80 | #define C_000040_HDCP_AUTHORIZED_INT 0xEFFFFFFF | ||
| 81 | #define S_000040_DVI_I2C_INT(x) (((x) & 0x1) << 29) | ||
| 82 | #define G_000040_DVI_I2C_INT(x) (((x) >> 29) & 0x1) | ||
| 83 | #define C_000040_DVI_I2C_INT 0xDFFFFFFF | ||
| 84 | #define S_000040_GUIDMA(x) (((x) & 0x1) << 30) | ||
| 85 | #define G_000040_GUIDMA(x) (((x) >> 30) & 0x1) | ||
| 86 | #define C_000040_GUIDMA 0xBFFFFFFF | ||
| 87 | #define S_000040_VIDDMA(x) (((x) & 0x1) << 31) | ||
| 88 | #define G_000040_VIDDMA(x) (((x) >> 31) & 0x1) | ||
| 89 | #define C_000040_VIDDMA 0x7FFFFFFF | ||
| 90 | #define R_000044_GEN_INT_STATUS 0x000044 | ||
| 91 | #define S_000044_DISPLAY_INT_STAT(x) (((x) & 0x1) << 0) | ||
| 92 | #define G_000044_DISPLAY_INT_STAT(x) (((x) >> 0) & 0x1) | ||
| 93 | #define C_000044_DISPLAY_INT_STAT 0xFFFFFFFE | ||
| 94 | #define S_000044_VGA_INT_STAT(x) (((x) & 0x1) << 1) | ||
| 95 | #define G_000044_VGA_INT_STAT(x) (((x) >> 1) & 0x1) | ||
| 96 | #define C_000044_VGA_INT_STAT 0xFFFFFFFD | ||
| 97 | #define S_000044_CAP0_INT_ACTIVE(x) (((x) & 0x1) << 8) | ||
| 98 | #define G_000044_CAP0_INT_ACTIVE(x) (((x) >> 8) & 0x1) | ||
| 99 | #define C_000044_CAP0_INT_ACTIVE 0xFFFFFEFF | ||
| 100 | #define S_000044_DMA_VIPH0_INT(x) (((x) & 0x1) << 12) | ||
| 101 | #define G_000044_DMA_VIPH0_INT(x) (((x) >> 12) & 0x1) | ||
| 102 | #define C_000044_DMA_VIPH0_INT 0xFFFFEFFF | ||
| 103 | #define S_000044_DMA_VIPH1_INT(x) (((x) & 0x1) << 13) | ||
| 104 | #define G_000044_DMA_VIPH1_INT(x) (((x) >> 13) & 0x1) | ||
| 105 | #define C_000044_DMA_VIPH1_INT 0xFFFFDFFF | ||
| 106 | #define S_000044_DMA_VIPH2_INT(x) (((x) & 0x1) << 14) | ||
| 107 | #define G_000044_DMA_VIPH2_INT(x) (((x) >> 14) & 0x1) | ||
| 108 | #define C_000044_DMA_VIPH2_INT 0xFFFFBFFF | ||
| 109 | #define S_000044_DMA_VIPH3_INT(x) (((x) & 0x1) << 15) | ||
| 110 | #define G_000044_DMA_VIPH3_INT(x) (((x) >> 15) & 0x1) | ||
| 111 | #define C_000044_DMA_VIPH3_INT 0xFFFF7FFF | ||
| 112 | #define S_000044_MC_PROBE_FAULT_STAT(x) (((x) & 0x1) << 16) | ||
| 113 | #define G_000044_MC_PROBE_FAULT_STAT(x) (((x) >> 16) & 0x1) | ||
| 114 | #define C_000044_MC_PROBE_FAULT_STAT 0xFFFEFFFF | ||
| 115 | #define S_000044_I2C_INT(x) (((x) & 0x1) << 17) | ||
| 116 | #define G_000044_I2C_INT(x) (((x) >> 17) & 0x1) | ||
| 117 | #define C_000044_I2C_INT 0xFFFDFFFF | ||
| 118 | #define S_000044_SCRATCH_INT_STAT(x) (((x) & 0x1) << 18) | ||
| 119 | #define G_000044_SCRATCH_INT_STAT(x) (((x) >> 18) & 0x1) | ||
| 120 | #define C_000044_SCRATCH_INT_STAT 0xFFFBFFFF | ||
| 121 | #define S_000044_GUI_IDLE_STAT(x) (((x) & 0x1) << 19) | ||
| 122 | #define G_000044_GUI_IDLE_STAT(x) (((x) >> 19) & 0x1) | ||
| 123 | #define C_000044_GUI_IDLE_STAT 0xFFF7FFFF | ||
| 124 | #define S_000044_ATI_OVERDRIVE_INT_STAT(x) (((x) & 0x1) << 20) | ||
| 125 | #define G_000044_ATI_OVERDRIVE_INT_STAT(x) (((x) >> 20) & 0x1) | ||
| 126 | #define C_000044_ATI_OVERDRIVE_INT_STAT 0xFFEFFFFF | ||
| 127 | #define S_000044_MC_PROTECTION_FAULT_STAT(x) (((x) & 0x1) << 21) | ||
| 128 | #define G_000044_MC_PROTECTION_FAULT_STAT(x) (((x) >> 21) & 0x1) | ||
| 129 | #define C_000044_MC_PROTECTION_FAULT_STAT 0xFFDFFFFF | ||
| 130 | #define S_000044_RBBM_READ_INT_STAT(x) (((x) & 0x1) << 22) | ||
| 131 | #define G_000044_RBBM_READ_INT_STAT(x) (((x) >> 22) & 0x1) | ||
| 132 | #define C_000044_RBBM_READ_INT_STAT 0xFFBFFFFF | ||
| 133 | #define S_000044_CB_CONTEXT_SWITCH_STAT(x) (((x) & 0x1) << 23) | ||
| 134 | #define G_000044_CB_CONTEXT_SWITCH_STAT(x) (((x) >> 23) & 0x1) | ||
| 135 | #define C_000044_CB_CONTEXT_SWITCH_STAT 0xFF7FFFFF | ||
| 136 | #define S_000044_VIPH_INT(x) (((x) & 0x1) << 24) | ||
| 137 | #define G_000044_VIPH_INT(x) (((x) >> 24) & 0x1) | ||
| 138 | #define C_000044_VIPH_INT 0xFEFFFFFF | ||
| 139 | #define S_000044_SW_INT(x) (((x) & 0x1) << 25) | ||
| 140 | #define G_000044_SW_INT(x) (((x) >> 25) & 0x1) | ||
| 141 | #define C_000044_SW_INT 0xFDFFFFFF | ||
| 142 | #define S_000044_SW_INT_SET(x) (((x) & 0x1) << 26) | ||
| 143 | #define G_000044_SW_INT_SET(x) (((x) >> 26) & 0x1) | ||
| 144 | #define C_000044_SW_INT_SET 0xFBFFFFFF | ||
| 145 | #define S_000044_IDCT_INT_STAT(x) (((x) & 0x1) << 27) | ||
| 146 | #define G_000044_IDCT_INT_STAT(x) (((x) >> 27) & 0x1) | ||
| 147 | #define C_000044_IDCT_INT_STAT 0xF7FFFFFF | ||
| 148 | #define S_000044_GUIDMA_STAT(x) (((x) & 0x1) << 30) | ||
| 149 | #define G_000044_GUIDMA_STAT(x) (((x) >> 30) & 0x1) | ||
| 150 | #define C_000044_GUIDMA_STAT 0xBFFFFFFF | ||
| 151 | #define S_000044_VIDDMA_STAT(x) (((x) & 0x1) << 31) | ||
| 152 | #define G_000044_VIDDMA_STAT(x) (((x) >> 31) & 0x1) | ||
| 153 | #define C_000044_VIDDMA_STAT 0x7FFFFFFF | ||
| 154 | #define R_00004C_BUS_CNTL 0x00004C | ||
| 155 | #define S_00004C_BUS_MASTER_DIS(x) (((x) & 0x1) << 14) | ||
| 156 | #define G_00004C_BUS_MASTER_DIS(x) (((x) >> 14) & 0x1) | ||
| 157 | #define C_00004C_BUS_MASTER_DIS 0xFFFFBFFF | ||
| 158 | #define S_00004C_BUS_MSI_REARM(x) (((x) & 0x1) << 20) | ||
| 159 | #define G_00004C_BUS_MSI_REARM(x) (((x) >> 20) & 0x1) | ||
| 160 | #define C_00004C_BUS_MSI_REARM 0xFFEFFFFF | ||
| 161 | #define R_000070_MC_IND_INDEX 0x000070 | ||
| 162 | #define S_000070_MC_IND_ADDR(x) (((x) & 0xFFFF) << 0) | ||
| 163 | #define G_000070_MC_IND_ADDR(x) (((x) >> 0) & 0xFFFF) | ||
| 164 | #define C_000070_MC_IND_ADDR 0xFFFF0000 | ||
| 165 | #define S_000070_MC_IND_SEQ_RBS_0(x) (((x) & 0x1) << 16) | ||
| 166 | #define G_000070_MC_IND_SEQ_RBS_0(x) (((x) >> 16) & 0x1) | ||
| 167 | #define C_000070_MC_IND_SEQ_RBS_0 0xFFFEFFFF | ||
| 168 | #define S_000070_MC_IND_SEQ_RBS_1(x) (((x) & 0x1) << 17) | ||
| 169 | #define G_000070_MC_IND_SEQ_RBS_1(x) (((x) >> 17) & 0x1) | ||
| 170 | #define C_000070_MC_IND_SEQ_RBS_1 0xFFFDFFFF | ||
| 171 | #define S_000070_MC_IND_SEQ_RBS_2(x) (((x) & 0x1) << 18) | ||
| 172 | #define G_000070_MC_IND_SEQ_RBS_2(x) (((x) >> 18) & 0x1) | ||
| 173 | #define C_000070_MC_IND_SEQ_RBS_2 0xFFFBFFFF | ||
| 174 | #define S_000070_MC_IND_SEQ_RBS_3(x) (((x) & 0x1) << 19) | ||
| 175 | #define G_000070_MC_IND_SEQ_RBS_3(x) (((x) >> 19) & 0x1) | ||
| 176 | #define C_000070_MC_IND_SEQ_RBS_3 0xFFF7FFFF | ||
| 177 | #define S_000070_MC_IND_AIC_RBS(x) (((x) & 0x1) << 20) | ||
| 178 | #define G_000070_MC_IND_AIC_RBS(x) (((x) >> 20) & 0x1) | ||
| 179 | #define C_000070_MC_IND_AIC_RBS 0xFFEFFFFF | ||
| 180 | #define S_000070_MC_IND_CITF_ARB0(x) (((x) & 0x1) << 21) | ||
| 181 | #define G_000070_MC_IND_CITF_ARB0(x) (((x) >> 21) & 0x1) | ||
| 182 | #define C_000070_MC_IND_CITF_ARB0 0xFFDFFFFF | ||
| 183 | #define S_000070_MC_IND_CITF_ARB1(x) (((x) & 0x1) << 22) | ||
| 184 | #define G_000070_MC_IND_CITF_ARB1(x) (((x) >> 22) & 0x1) | ||
| 185 | #define C_000070_MC_IND_CITF_ARB1 0xFFBFFFFF | ||
| 186 | #define S_000070_MC_IND_WR_EN(x) (((x) & 0x1) << 23) | ||
| 187 | #define G_000070_MC_IND_WR_EN(x) (((x) >> 23) & 0x1) | ||
| 188 | #define C_000070_MC_IND_WR_EN 0xFF7FFFFF | ||
| 189 | #define S_000070_MC_IND_RD_INV(x) (((x) & 0x1) << 24) | ||
| 190 | #define G_000070_MC_IND_RD_INV(x) (((x) >> 24) & 0x1) | ||
| 191 | #define C_000070_MC_IND_RD_INV 0xFEFFFFFF | ||
| 192 | #define R_000074_MC_IND_DATA 0x000074 | ||
| 193 | #define S_000074_MC_IND_DATA(x) (((x) & 0xFFFFFFFF) << 0) | ||
| 194 | #define G_000074_MC_IND_DATA(x) (((x) >> 0) & 0xFFFFFFFF) | ||
| 195 | #define C_000074_MC_IND_DATA 0x00000000 | ||
| 196 | #define R_000134_HDP_FB_LOCATION 0x000134 | ||
| 197 | #define S_000134_HDP_FB_START(x) (((x) & 0xFFFF) << 0) | ||
| 198 | #define G_000134_HDP_FB_START(x) (((x) >> 0) & 0xFFFF) | ||
| 199 | #define C_000134_HDP_FB_START 0xFFFF0000 | ||
| 200 | #define R_0007C0_CP_STAT 0x0007C0 | ||
| 201 | #define S_0007C0_MRU_BUSY(x) (((x) & 0x1) << 0) | ||
| 202 | #define G_0007C0_MRU_BUSY(x) (((x) >> 0) & 0x1) | ||
| 203 | #define C_0007C0_MRU_BUSY 0xFFFFFFFE | ||
| 204 | #define S_0007C0_MWU_BUSY(x) (((x) & 0x1) << 1) | ||
| 205 | #define G_0007C0_MWU_BUSY(x) (((x) >> 1) & 0x1) | ||
| 206 | #define C_0007C0_MWU_BUSY 0xFFFFFFFD | ||
| 207 | #define S_0007C0_RSIU_BUSY(x) (((x) & 0x1) << 2) | ||
| 208 | #define G_0007C0_RSIU_BUSY(x) (((x) >> 2) & 0x1) | ||
| 209 | #define C_0007C0_RSIU_BUSY 0xFFFFFFFB | ||
| 210 | #define S_0007C0_RCIU_BUSY(x) (((x) & 0x1) << 3) | ||
| 211 | #define G_0007C0_RCIU_BUSY(x) (((x) >> 3) & 0x1) | ||
| 212 | #define C_0007C0_RCIU_BUSY 0xFFFFFFF7 | ||
| 213 | #define S_0007C0_CSF_PRIMARY_BUSY(x) (((x) & 0x1) << 9) | ||
| 214 | #define G_0007C0_CSF_PRIMARY_BUSY(x) (((x) >> 9) & 0x1) | ||
| 215 | #define C_0007C0_CSF_PRIMARY_BUSY 0xFFFFFDFF | ||
| 216 | #define S_0007C0_CSF_INDIRECT_BUSY(x) (((x) & 0x1) << 10) | ||
| 217 | #define G_0007C0_CSF_INDIRECT_BUSY(x) (((x) >> 10) & 0x1) | ||
| 218 | #define C_0007C0_CSF_INDIRECT_BUSY 0xFFFFFBFF | ||
| 219 | #define S_0007C0_CSQ_PRIMARY_BUSY(x) (((x) & 0x1) << 11) | ||
| 220 | #define G_0007C0_CSQ_PRIMARY_BUSY(x) (((x) >> 11) & 0x1) | ||
| 221 | #define C_0007C0_CSQ_PRIMARY_BUSY 0xFFFFF7FF | ||
| 222 | #define S_0007C0_CSQ_INDIRECT_BUSY(x) (((x) & 0x1) << 12) | ||
| 223 | #define G_0007C0_CSQ_INDIRECT_BUSY(x) (((x) >> 12) & 0x1) | ||
| 224 | #define C_0007C0_CSQ_INDIRECT_BUSY 0xFFFFEFFF | ||
| 225 | #define S_0007C0_CSI_BUSY(x) (((x) & 0x1) << 13) | ||
| 226 | #define G_0007C0_CSI_BUSY(x) (((x) >> 13) & 0x1) | ||
| 227 | #define C_0007C0_CSI_BUSY 0xFFFFDFFF | ||
| 228 | #define S_0007C0_CSF_INDIRECT2_BUSY(x) (((x) & 0x1) << 14) | ||
| 229 | #define G_0007C0_CSF_INDIRECT2_BUSY(x) (((x) >> 14) & 0x1) | ||
| 230 | #define C_0007C0_CSF_INDIRECT2_BUSY 0xFFFFBFFF | ||
| 231 | #define S_0007C0_CSQ_INDIRECT2_BUSY(x) (((x) & 0x1) << 15) | ||
| 232 | #define G_0007C0_CSQ_INDIRECT2_BUSY(x) (((x) >> 15) & 0x1) | ||
| 233 | #define C_0007C0_CSQ_INDIRECT2_BUSY 0xFFFF7FFF | ||
| 234 | #define S_0007C0_GUIDMA_BUSY(x) (((x) & 0x1) << 28) | ||
| 235 | #define G_0007C0_GUIDMA_BUSY(x) (((x) >> 28) & 0x1) | ||
| 236 | #define C_0007C0_GUIDMA_BUSY 0xEFFFFFFF | ||
| 237 | #define S_0007C0_VIDDMA_BUSY(x) (((x) & 0x1) << 29) | ||
| 238 | #define G_0007C0_VIDDMA_BUSY(x) (((x) >> 29) & 0x1) | ||
| 239 | #define C_0007C0_VIDDMA_BUSY 0xDFFFFFFF | ||
| 240 | #define S_0007C0_CMDSTRM_BUSY(x) (((x) & 0x1) << 30) | ||
| 241 | #define G_0007C0_CMDSTRM_BUSY(x) (((x) >> 30) & 0x1) | ||
| 242 | #define C_0007C0_CMDSTRM_BUSY 0xBFFFFFFF | ||
| 243 | #define S_0007C0_CP_BUSY(x) (((x) & 0x1) << 31) | ||
| 244 | #define G_0007C0_CP_BUSY(x) (((x) >> 31) & 0x1) | ||
| 245 | #define C_0007C0_CP_BUSY 0x7FFFFFFF | ||
| 246 | #define R_000E40_RBBM_STATUS 0x000E40 | ||
| 247 | #define S_000E40_CMDFIFO_AVAIL(x) (((x) & 0x7F) << 0) | ||
| 248 | #define G_000E40_CMDFIFO_AVAIL(x) (((x) >> 0) & 0x7F) | ||
| 249 | #define C_000E40_CMDFIFO_AVAIL 0xFFFFFF80 | ||
| 250 | #define S_000E40_HIRQ_ON_RBB(x) (((x) & 0x1) << 8) | ||
| 251 | #define G_000E40_HIRQ_ON_RBB(x) (((x) >> 8) & 0x1) | ||
| 252 | #define C_000E40_HIRQ_ON_RBB 0xFFFFFEFF | ||
| 253 | #define S_000E40_CPRQ_ON_RBB(x) (((x) & 0x1) << 9) | ||
| 254 | #define G_000E40_CPRQ_ON_RBB(x) (((x) >> 9) & 0x1) | ||
| 255 | #define C_000E40_CPRQ_ON_RBB 0xFFFFFDFF | ||
| 256 | #define S_000E40_CFRQ_ON_RBB(x) (((x) & 0x1) << 10) | ||
| 257 | #define G_000E40_CFRQ_ON_RBB(x) (((x) >> 10) & 0x1) | ||
| 258 | #define C_000E40_CFRQ_ON_RBB 0xFFFFFBFF | ||
| 259 | #define S_000E40_HIRQ_IN_RTBUF(x) (((x) & 0x1) << 11) | ||
| 260 | #define G_000E40_HIRQ_IN_RTBUF(x) (((x) >> 11) & 0x1) | ||
| 261 | #define C_000E40_HIRQ_IN_RTBUF 0xFFFFF7FF | ||
| 262 | #define S_000E40_CPRQ_IN_RTBUF(x) (((x) & 0x1) << 12) | ||
| 263 | #define G_000E40_CPRQ_IN_RTBUF(x) (((x) >> 12) & 0x1) | ||
| 264 | #define C_000E40_CPRQ_IN_RTBUF 0xFFFFEFFF | ||
| 265 | #define S_000E40_CFRQ_IN_RTBUF(x) (((x) & 0x1) << 13) | ||
| 266 | #define G_000E40_CFRQ_IN_RTBUF(x) (((x) >> 13) & 0x1) | ||
| 267 | #define C_000E40_CFRQ_IN_RTBUF 0xFFFFDFFF | ||
| 268 | #define S_000E40_CF_PIPE_BUSY(x) (((x) & 0x1) << 14) | ||
| 269 | #define G_000E40_CF_PIPE_BUSY(x) (((x) >> 14) & 0x1) | ||
| 270 | #define C_000E40_CF_PIPE_BUSY 0xFFFFBFFF | ||
| 271 | #define S_000E40_ENG_EV_BUSY(x) (((x) & 0x1) << 15) | ||
| 272 | #define G_000E40_ENG_EV_BUSY(x) (((x) >> 15) & 0x1) | ||
| 273 | #define C_000E40_ENG_EV_BUSY 0xFFFF7FFF | ||
| 274 | #define S_000E40_CP_CMDSTRM_BUSY(x) (((x) & 0x1) << 16) | ||
| 275 | #define G_000E40_CP_CMDSTRM_BUSY(x) (((x) >> 16) & 0x1) | ||
| 276 | #define C_000E40_CP_CMDSTRM_BUSY 0xFFFEFFFF | ||
| 277 | #define S_000E40_E2_BUSY(x) (((x) & 0x1) << 17) | ||
| 278 | #define G_000E40_E2_BUSY(x) (((x) >> 17) & 0x1) | ||
| 279 | #define C_000E40_E2_BUSY 0xFFFDFFFF | ||
| 280 | #define S_000E40_RB2D_BUSY(x) (((x) & 0x1) << 18) | ||
| 281 | #define G_000E40_RB2D_BUSY(x) (((x) >> 18) & 0x1) | ||
| 282 | #define C_000E40_RB2D_BUSY 0xFFFBFFFF | ||
| 283 | #define S_000E40_RB3D_BUSY(x) (((x) & 0x1) << 19) | ||
| 284 | #define G_000E40_RB3D_BUSY(x) (((x) >> 19) & 0x1) | ||
| 285 | #define C_000E40_RB3D_BUSY 0xFFF7FFFF | ||
| 286 | #define S_000E40_VAP_BUSY(x) (((x) & 0x1) << 20) | ||
| 287 | #define G_000E40_VAP_BUSY(x) (((x) >> 20) & 0x1) | ||
| 288 | #define C_000E40_VAP_BUSY 0xFFEFFFFF | ||
| 289 | #define S_000E40_RE_BUSY(x) (((x) & 0x1) << 21) | ||
| 290 | #define G_000E40_RE_BUSY(x) (((x) >> 21) & 0x1) | ||
| 291 | #define C_000E40_RE_BUSY 0xFFDFFFFF | ||
| 292 | #define S_000E40_TAM_BUSY(x) (((x) & 0x1) << 22) | ||
| 293 | #define G_000E40_TAM_BUSY(x) (((x) >> 22) & 0x1) | ||
| 294 | #define C_000E40_TAM_BUSY 0xFFBFFFFF | ||
| 295 | #define S_000E40_TDM_BUSY(x) (((x) & 0x1) << 23) | ||
| 296 | #define G_000E40_TDM_BUSY(x) (((x) >> 23) & 0x1) | ||
| 297 | #define C_000E40_TDM_BUSY 0xFF7FFFFF | ||
| 298 | #define S_000E40_PB_BUSY(x) (((x) & 0x1) << 24) | ||
| 299 | #define G_000E40_PB_BUSY(x) (((x) >> 24) & 0x1) | ||
| 300 | #define C_000E40_PB_BUSY 0xFEFFFFFF | ||
| 301 | #define S_000E40_TIM_BUSY(x) (((x) & 0x1) << 25) | ||
| 302 | #define G_000E40_TIM_BUSY(x) (((x) >> 25) & 0x1) | ||
| 303 | #define C_000E40_TIM_BUSY 0xFDFFFFFF | ||
| 304 | #define S_000E40_GA_BUSY(x) (((x) & 0x1) << 26) | ||
| 305 | #define G_000E40_GA_BUSY(x) (((x) >> 26) & 0x1) | ||
| 306 | #define C_000E40_GA_BUSY 0xFBFFFFFF | ||
| 307 | #define S_000E40_CBA2D_BUSY(x) (((x) & 0x1) << 27) | ||
| 308 | #define G_000E40_CBA2D_BUSY(x) (((x) >> 27) & 0x1) | ||
| 309 | #define C_000E40_CBA2D_BUSY 0xF7FFFFFF | ||
| 310 | #define S_000E40_GUI_ACTIVE(x) (((x) & 0x1) << 31) | ||
| 311 | #define G_000E40_GUI_ACTIVE(x) (((x) >> 31) & 0x1) | ||
| 312 | #define C_000E40_GUI_ACTIVE 0x7FFFFFFF | ||
| 313 | #define R_0060A4_D1CRTC_STATUS_FRAME_COUNT 0x0060A4 | ||
| 314 | #define S_0060A4_D1CRTC_FRAME_COUNT(x) (((x) & 0xFFFFFF) << 0) | ||
| 315 | #define G_0060A4_D1CRTC_FRAME_COUNT(x) (((x) >> 0) & 0xFFFFFF) | ||
| 316 | #define C_0060A4_D1CRTC_FRAME_COUNT 0xFF000000 | ||
| 317 | #define R_006534_D1MODE_VBLANK_STATUS 0x006534 | ||
| 318 | #define S_006534_D1MODE_VBLANK_OCCURRED(x) (((x) & 0x1) << 0) | ||
| 319 | #define G_006534_D1MODE_VBLANK_OCCURRED(x) (((x) >> 0) & 0x1) | ||
| 320 | #define C_006534_D1MODE_VBLANK_OCCURRED 0xFFFFFFFE | ||
| 321 | #define S_006534_D1MODE_VBLANK_ACK(x) (((x) & 0x1) << 4) | ||
| 322 | #define G_006534_D1MODE_VBLANK_ACK(x) (((x) >> 4) & 0x1) | ||
| 323 | #define C_006534_D1MODE_VBLANK_ACK 0xFFFFFFEF | ||
| 324 | #define S_006534_D1MODE_VBLANK_STAT(x) (((x) & 0x1) << 12) | ||
| 325 | #define G_006534_D1MODE_VBLANK_STAT(x) (((x) >> 12) & 0x1) | ||
| 326 | #define C_006534_D1MODE_VBLANK_STAT 0xFFFFEFFF | ||
| 327 | #define S_006534_D1MODE_VBLANK_INTERRUPT(x) (((x) & 0x1) << 16) | ||
| 328 | #define G_006534_D1MODE_VBLANK_INTERRUPT(x) (((x) >> 16) & 0x1) | ||
| 329 | #define C_006534_D1MODE_VBLANK_INTERRUPT 0xFFFEFFFF | ||
| 330 | #define R_006540_DxMODE_INT_MASK 0x006540 | ||
| 331 | #define S_006540_D1MODE_VBLANK_INT_MASK(x) (((x) & 0x1) << 0) | ||
| 332 | #define G_006540_D1MODE_VBLANK_INT_MASK(x) (((x) >> 0) & 0x1) | ||
| 333 | #define C_006540_D1MODE_VBLANK_INT_MASK 0xFFFFFFFE | ||
| 334 | #define S_006540_D1MODE_VLINE_INT_MASK(x) (((x) & 0x1) << 4) | ||
| 335 | #define G_006540_D1MODE_VLINE_INT_MASK(x) (((x) >> 4) & 0x1) | ||
| 336 | #define C_006540_D1MODE_VLINE_INT_MASK 0xFFFFFFEF | ||
| 337 | #define S_006540_D2MODE_VBLANK_INT_MASK(x) (((x) & 0x1) << 8) | ||
| 338 | #define G_006540_D2MODE_VBLANK_INT_MASK(x) (((x) >> 8) & 0x1) | ||
| 339 | #define C_006540_D2MODE_VBLANK_INT_MASK 0xFFFFFEFF | ||
| 340 | #define S_006540_D2MODE_VLINE_INT_MASK(x) (((x) & 0x1) << 12) | ||
| 341 | #define G_006540_D2MODE_VLINE_INT_MASK(x) (((x) >> 12) & 0x1) | ||
| 342 | #define C_006540_D2MODE_VLINE_INT_MASK 0xFFFFEFFF | ||
| 343 | #define S_006540_D1MODE_VBLANK_CP_SEL(x) (((x) & 0x1) << 30) | ||
| 344 | #define G_006540_D1MODE_VBLANK_CP_SEL(x) (((x) >> 30) & 0x1) | ||
| 345 | #define C_006540_D1MODE_VBLANK_CP_SEL 0xBFFFFFFF | ||
| 346 | #define S_006540_D2MODE_VBLANK_CP_SEL(x) (((x) & 0x1) << 31) | ||
| 347 | #define G_006540_D2MODE_VBLANK_CP_SEL(x) (((x) >> 31) & 0x1) | ||
| 348 | #define C_006540_D2MODE_VBLANK_CP_SEL 0x7FFFFFFF | ||
| 349 | #define R_0068A4_D2CRTC_STATUS_FRAME_COUNT 0x0068A4 | ||
| 350 | #define S_0068A4_D2CRTC_FRAME_COUNT(x) (((x) & 0xFFFFFF) << 0) | ||
| 351 | #define G_0068A4_D2CRTC_FRAME_COUNT(x) (((x) >> 0) & 0xFFFFFF) | ||
| 352 | #define C_0068A4_D2CRTC_FRAME_COUNT 0xFF000000 | ||
| 353 | #define R_006D34_D2MODE_VBLANK_STATUS 0x006D34 | ||
| 354 | #define S_006D34_D2MODE_VBLANK_OCCURRED(x) (((x) & 0x1) << 0) | ||
| 355 | #define G_006D34_D2MODE_VBLANK_OCCURRED(x) (((x) >> 0) & 0x1) | ||
| 356 | #define C_006D34_D2MODE_VBLANK_OCCURRED 0xFFFFFFFE | ||
| 357 | #define S_006D34_D2MODE_VBLANK_ACK(x) (((x) & 0x1) << 4) | ||
| 358 | #define G_006D34_D2MODE_VBLANK_ACK(x) (((x) >> 4) & 0x1) | ||
| 359 | #define C_006D34_D2MODE_VBLANK_ACK 0xFFFFFFEF | ||
| 360 | #define S_006D34_D2MODE_VBLANK_STAT(x) (((x) & 0x1) << 12) | ||
| 361 | #define G_006D34_D2MODE_VBLANK_STAT(x) (((x) >> 12) & 0x1) | ||
| 362 | #define C_006D34_D2MODE_VBLANK_STAT 0xFFFFEFFF | ||
| 363 | #define S_006D34_D2MODE_VBLANK_INTERRUPT(x) (((x) & 0x1) << 16) | ||
| 364 | #define G_006D34_D2MODE_VBLANK_INTERRUPT(x) (((x) >> 16) & 0x1) | ||
| 365 | #define C_006D34_D2MODE_VBLANK_INTERRUPT 0xFFFEFFFF | ||
| 366 | #define R_007EDC_DISP_INTERRUPT_STATUS 0x007EDC | ||
| 367 | #define S_007EDC_LB_D1_VBLANK_INTERRUPT(x) (((x) & 0x1) << 4) | ||
| 368 | #define G_007EDC_LB_D1_VBLANK_INTERRUPT(x) (((x) >> 4) & 0x1) | ||
| 369 | #define C_007EDC_LB_D1_VBLANK_INTERRUPT 0xFFFFFFEF | ||
| 370 | #define S_007EDC_LB_D2_VBLANK_INTERRUPT(x) (((x) & 0x1) << 5) | ||
| 371 | #define G_007EDC_LB_D2_VBLANK_INTERRUPT(x) (((x) >> 5) & 0x1) | ||
| 372 | #define C_007EDC_LB_D2_VBLANK_INTERRUPT 0xFFFFFFDF | ||
| 373 | |||
| 374 | |||
| 375 | /* MC registers */ | ||
| 376 | #define R_000000_MC_STATUS 0x000000 | ||
| 377 | #define S_000000_MC_IDLE(x) (((x) & 0x1) << 0) | ||
| 378 | #define G_000000_MC_IDLE(x) (((x) >> 0) & 0x1) | ||
| 379 | #define C_000000_MC_IDLE 0xFFFFFFFE | ||
| 380 | #define R_000004_MC_FB_LOCATION 0x000004 | ||
| 381 | #define S_000004_MC_FB_START(x) (((x) & 0xFFFF) << 0) | ||
| 382 | #define G_000004_MC_FB_START(x) (((x) >> 0) & 0xFFFF) | ||
| 383 | #define C_000004_MC_FB_START 0xFFFF0000 | ||
| 384 | #define S_000004_MC_FB_TOP(x) (((x) & 0xFFFF) << 16) | ||
| 385 | #define G_000004_MC_FB_TOP(x) (((x) >> 16) & 0xFFFF) | ||
| 386 | #define C_000004_MC_FB_TOP 0x0000FFFF | ||
| 387 | #define R_000005_MC_AGP_LOCATION 0x000005 | ||
| 388 | #define S_000005_MC_AGP_START(x) (((x) & 0xFFFF) << 0) | ||
| 389 | #define G_000005_MC_AGP_START(x) (((x) >> 0) & 0xFFFF) | ||
| 390 | #define C_000005_MC_AGP_START 0xFFFF0000 | ||
| 391 | #define S_000005_MC_AGP_TOP(x) (((x) & 0xFFFF) << 16) | ||
| 392 | #define G_000005_MC_AGP_TOP(x) (((x) >> 16) & 0xFFFF) | ||
| 393 | #define C_000005_MC_AGP_TOP 0x0000FFFF | ||
| 394 | #define R_000006_AGP_BASE 0x000006 | ||
| 395 | #define S_000006_AGP_BASE_ADDR(x) (((x) & 0xFFFFFFFF) << 0) | ||
| 396 | #define G_000006_AGP_BASE_ADDR(x) (((x) >> 0) & 0xFFFFFFFF) | ||
| 397 | #define C_000006_AGP_BASE_ADDR 0x00000000 | ||
| 398 | #define R_000007_AGP_BASE_2 0x000007 | ||
| 399 | #define S_000007_AGP_BASE_ADDR_2(x) (((x) & 0xF) << 0) | ||
| 400 | #define G_000007_AGP_BASE_ADDR_2(x) (((x) >> 0) & 0xF) | ||
| 401 | #define C_000007_AGP_BASE_ADDR_2 0xFFFFFFF0 | ||
| 402 | #define R_000009_MC_CNTL1 0x000009 | ||
| 403 | #define S_000009_ENABLE_PAGE_TABLES(x) (((x) & 0x1) << 26) | ||
| 404 | #define G_000009_ENABLE_PAGE_TABLES(x) (((x) >> 26) & 0x1) | ||
| 405 | #define C_000009_ENABLE_PAGE_TABLES 0xFBFFFFFF | ||
| 406 | /* FIXME don't know the various field size need feedback from AMD */ | ||
| 407 | #define R_000100_MC_PT0_CNTL 0x000100 | ||
| 408 | #define S_000100_ENABLE_PT(x) (((x) & 0x1) << 0) | ||
| 409 | #define G_000100_ENABLE_PT(x) (((x) >> 0) & 0x1) | ||
| 410 | #define C_000100_ENABLE_PT 0xFFFFFFFE | ||
| 411 | #define S_000100_EFFECTIVE_L2_CACHE_SIZE(x) (((x) & 0x7) << 15) | ||
| 412 | #define G_000100_EFFECTIVE_L2_CACHE_SIZE(x) (((x) >> 15) & 0x7) | ||
| 413 | #define C_000100_EFFECTIVE_L2_CACHE_SIZE 0xFFFC7FFF | ||
| 414 | #define S_000100_EFFECTIVE_L2_QUEUE_SIZE(x) (((x) & 0x7) << 21) | ||
| 415 | #define G_000100_EFFECTIVE_L2_QUEUE_SIZE(x) (((x) >> 21) & 0x7) | ||
| 416 | #define C_000100_EFFECTIVE_L2_QUEUE_SIZE 0xFF1FFFFF | ||
| 417 | #define S_000100_INVALIDATE_ALL_L1_TLBS(x) (((x) & 0x1) << 28) | ||
| 418 | #define G_000100_INVALIDATE_ALL_L1_TLBS(x) (((x) >> 28) & 0x1) | ||
| 419 | #define C_000100_INVALIDATE_ALL_L1_TLBS 0xEFFFFFFF | ||
| 420 | #define S_000100_INVALIDATE_L2_CACHE(x) (((x) & 0x1) << 29) | ||
| 421 | #define G_000100_INVALIDATE_L2_CACHE(x) (((x) >> 29) & 0x1) | ||
| 422 | #define C_000100_INVALIDATE_L2_CACHE 0xDFFFFFFF | ||
| 423 | #define R_000102_MC_PT0_CONTEXT0_CNTL 0x000102 | ||
| 424 | #define S_000102_ENABLE_PAGE_TABLE(x) (((x) & 0x1) << 0) | ||
| 425 | #define G_000102_ENABLE_PAGE_TABLE(x) (((x) >> 0) & 0x1) | ||
| 426 | #define C_000102_ENABLE_PAGE_TABLE 0xFFFFFFFE | ||
| 427 | #define S_000102_PAGE_TABLE_DEPTH(x) (((x) & 0x3) << 1) | ||
| 428 | #define G_000102_PAGE_TABLE_DEPTH(x) (((x) >> 1) & 0x3) | ||
| 429 | #define C_000102_PAGE_TABLE_DEPTH 0xFFFFFFF9 | ||
| 430 | #define V_000102_PAGE_TABLE_FLAT 0 | ||
| 431 | /* R600 documentation suggest that this should be a number of pages */ | ||
| 432 | #define R_000112_MC_PT0_SYSTEM_APERTURE_LOW_ADDR 0x000112 | ||
| 433 | #define R_000114_MC_PT0_SYSTEM_APERTURE_HIGH_ADDR 0x000114 | ||
| 434 | #define R_00011C_MC_PT0_CONTEXT0_DEFAULT_READ_ADDR 0x00011C | ||
| 435 | #define R_00012C_MC_PT0_CONTEXT0_FLAT_BASE_ADDR 0x00012C | ||
| 436 | #define R_00013C_MC_PT0_CONTEXT0_FLAT_START_ADDR 0x00013C | ||
| 437 | #define R_00014C_MC_PT0_CONTEXT0_FLAT_END_ADDR 0x00014C | ||
| 438 | #define R_00016C_MC_PT0_CLIENT0_CNTL 0x00016C | ||
| 439 | #define S_00016C_ENABLE_TRANSLATION_MODE_OVERRIDE(x) (((x) & 0x1) << 0) | ||
| 440 | #define G_00016C_ENABLE_TRANSLATION_MODE_OVERRIDE(x) (((x) >> 0) & 0x1) | ||
| 441 | #define C_00016C_ENABLE_TRANSLATION_MODE_OVERRIDE 0xFFFFFFFE | ||
| 442 | #define S_00016C_TRANSLATION_MODE_OVERRIDE(x) (((x) & 0x1) << 1) | ||
| 443 | #define G_00016C_TRANSLATION_MODE_OVERRIDE(x) (((x) >> 1) & 0x1) | ||
| 444 | #define C_00016C_TRANSLATION_MODE_OVERRIDE 0xFFFFFFFD | ||
| 445 | #define S_00016C_SYSTEM_ACCESS_MODE_MASK(x) (((x) & 0x3) << 8) | ||
| 446 | #define G_00016C_SYSTEM_ACCESS_MODE_MASK(x) (((x) >> 8) & 0x3) | ||
| 447 | #define C_00016C_SYSTEM_ACCESS_MODE_MASK 0xFFFFFCFF | ||
| 448 | #define V_00016C_SYSTEM_ACCESS_MODE_PA_ONLY 0 | ||
| 449 | #define V_00016C_SYSTEM_ACCESS_MODE_USE_SYS_MAP 1 | ||
| 450 | #define V_00016C_SYSTEM_ACCESS_MODE_IN_SYS 2 | ||
| 451 | #define V_00016C_SYSTEM_ACCESS_MODE_NOT_IN_SYS 3 | ||
| 452 | #define S_00016C_SYSTEM_APERTURE_UNMAPPED_ACCESS(x) (((x) & 0x1) << 10) | ||
| 453 | #define G_00016C_SYSTEM_APERTURE_UNMAPPED_ACCESS(x) (((x) >> 10) & 0x1) | ||
| 454 | #define C_00016C_SYSTEM_APERTURE_UNMAPPED_ACCESS 0xFFFFFBFF | ||
| 455 | #define V_00016C_SYSTEM_APERTURE_UNMAPPED_PASSTHROUGH 0 | ||
| 456 | #define V_00016C_SYSTEM_APERTURE_UNMAPPED_DEFAULT_PAGE 1 | ||
| 457 | #define S_00016C_EFFECTIVE_L1_CACHE_SIZE(x) (((x) & 0x7) << 11) | ||
| 458 | #define G_00016C_EFFECTIVE_L1_CACHE_SIZE(x) (((x) >> 11) & 0x7) | ||
| 459 | #define C_00016C_EFFECTIVE_L1_CACHE_SIZE 0xFFFFC7FF | ||
| 460 | #define S_00016C_ENABLE_FRAGMENT_PROCESSING(x) (((x) & 0x1) << 14) | ||
| 461 | #define G_00016C_ENABLE_FRAGMENT_PROCESSING(x) (((x) >> 14) & 0x1) | ||
| 462 | #define C_00016C_ENABLE_FRAGMENT_PROCESSING 0xFFFFBFFF | ||
| 463 | #define S_00016C_EFFECTIVE_L1_QUEUE_SIZE(x) (((x) & 0x7) << 15) | ||
| 464 | #define G_00016C_EFFECTIVE_L1_QUEUE_SIZE(x) (((x) >> 15) & 0x7) | ||
| 465 | #define C_00016C_EFFECTIVE_L1_QUEUE_SIZE 0xFFFC7FFF | ||
| 466 | #define S_00016C_INVALIDATE_L1_TLB(x) (((x) & 0x1) << 20) | ||
| 467 | #define G_00016C_INVALIDATE_L1_TLB(x) (((x) >> 20) & 0x1) | ||
| 468 | #define C_00016C_INVALIDATE_L1_TLB 0xFFEFFFFF | ||
| 469 | |||
| 470 | #endif | ||
diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c index 7a0098ddf977..025e3225346c 100644 --- a/drivers/gpu/drm/radeon/rs690.c +++ b/drivers/gpu/drm/radeon/rs690.c | |||
| @@ -26,105 +26,29 @@ | |||
| 26 | * Jerome Glisse | 26 | * Jerome Glisse |
| 27 | */ | 27 | */ |
| 28 | #include "drmP.h" | 28 | #include "drmP.h" |
| 29 | #include "radeon_reg.h" | ||
| 30 | #include "radeon.h" | 29 | #include "radeon.h" |
| 31 | #include "rs690r.h" | ||
| 32 | #include "atom.h" | 30 | #include "atom.h" |
| 33 | #include "atom-bits.h" | 31 | #include "rs690d.h" |
| 34 | |||
| 35 | /* rs690,rs740 depends on : */ | ||
| 36 | void r100_hdp_reset(struct radeon_device *rdev); | ||
| 37 | int r300_mc_wait_for_idle(struct radeon_device *rdev); | ||
| 38 | void r420_pipes_init(struct radeon_device *rdev); | ||
| 39 | void rs400_gart_disable(struct radeon_device *rdev); | ||
| 40 | int rs400_gart_enable(struct radeon_device *rdev); | ||
| 41 | void rs400_gart_adjust_size(struct radeon_device *rdev); | ||
| 42 | void rs600_mc_disable_clients(struct radeon_device *rdev); | ||
| 43 | |||
| 44 | /* This files gather functions specifics to : | ||
| 45 | * rs690,rs740 | ||
| 46 | * | ||
| 47 | * Some of these functions might be used by newer ASICs. | ||
| 48 | */ | ||
| 49 | void rs690_gpu_init(struct radeon_device *rdev); | ||
| 50 | int rs690_mc_wait_for_idle(struct radeon_device *rdev); | ||
| 51 | |||
| 52 | |||
| 53 | /* | ||
| 54 | * MC functions. | ||
| 55 | */ | ||
| 56 | int rs690_mc_init(struct radeon_device *rdev) | ||
| 57 | { | ||
| 58 | uint32_t tmp; | ||
| 59 | int r; | ||
| 60 | |||
| 61 | if (r100_debugfs_rbbm_init(rdev)) { | ||
| 62 | DRM_ERROR("Failed to register debugfs file for RBBM !\n"); | ||
| 63 | } | ||
| 64 | |||
| 65 | rs690_gpu_init(rdev); | ||
| 66 | rs400_gart_disable(rdev); | ||
| 67 | |||
| 68 | /* Setup GPU memory space */ | ||
| 69 | rdev->mc.gtt_location = rdev->mc.mc_vram_size; | ||
| 70 | rdev->mc.gtt_location += (rdev->mc.gtt_size - 1); | ||
| 71 | rdev->mc.gtt_location &= ~(rdev->mc.gtt_size - 1); | ||
| 72 | rdev->mc.vram_location = 0xFFFFFFFFUL; | ||
| 73 | r = radeon_mc_setup(rdev); | ||
| 74 | if (r) { | ||
| 75 | return r; | ||
| 76 | } | ||
| 77 | |||
| 78 | /* Program GPU memory space */ | ||
| 79 | rs600_mc_disable_clients(rdev); | ||
| 80 | if (rs690_mc_wait_for_idle(rdev)) { | ||
| 81 | printk(KERN_WARNING "Failed to wait MC idle while " | ||
| 82 | "programming pipes. Bad things might happen.\n"); | ||
| 83 | } | ||
| 84 | tmp = rdev->mc.vram_location + rdev->mc.mc_vram_size - 1; | ||
| 85 | tmp = REG_SET(RS690_MC_FB_TOP, tmp >> 16); | ||
| 86 | tmp |= REG_SET(RS690_MC_FB_START, rdev->mc.vram_location >> 16); | ||
| 87 | WREG32_MC(RS690_MCCFG_FB_LOCATION, tmp); | ||
| 88 | /* FIXME: Does this reg exist on RS480,RS740 ? */ | ||
| 89 | WREG32(0x310, rdev->mc.vram_location); | ||
| 90 | WREG32(RS690_HDP_FB_LOCATION, rdev->mc.vram_location >> 16); | ||
| 91 | return 0; | ||
| 92 | } | ||
| 93 | |||
| 94 | void rs690_mc_fini(struct radeon_device *rdev) | ||
| 95 | { | ||
| 96 | } | ||
| 97 | |||
| 98 | 32 | ||
| 99 | /* | 33 | static int rs690_mc_wait_for_idle(struct radeon_device *rdev) |
| 100 | * Global GPU functions | ||
| 101 | */ | ||
| 102 | int rs690_mc_wait_for_idle(struct radeon_device *rdev) | ||
| 103 | { | 34 | { |
| 104 | unsigned i; | 35 | unsigned i; |
| 105 | uint32_t tmp; | 36 | uint32_t tmp; |
| 106 | 37 | ||
| 107 | for (i = 0; i < rdev->usec_timeout; i++) { | 38 | for (i = 0; i < rdev->usec_timeout; i++) { |
| 108 | /* read MC_STATUS */ | 39 | /* read MC_STATUS */ |
| 109 | tmp = RREG32_MC(RS690_MC_STATUS); | 40 | tmp = RREG32_MC(R_000090_MC_SYSTEM_STATUS); |
| 110 | if (tmp & RS690_MC_STATUS_IDLE) { | 41 | if (G_000090_MC_SYSTEM_IDLE(tmp)) |
| 111 | return 0; | 42 | return 0; |
| 112 | } | 43 | udelay(1); |
| 113 | DRM_UDELAY(1); | ||
| 114 | } | 44 | } |
| 115 | return -1; | 45 | return -1; |
| 116 | } | 46 | } |
| 117 | 47 | ||
| 118 | void rs690_errata(struct radeon_device *rdev) | 48 | static void rs690_gpu_init(struct radeon_device *rdev) |
| 119 | { | ||
| 120 | rdev->pll_errata = 0; | ||
| 121 | } | ||
| 122 | |||
| 123 | void rs690_gpu_init(struct radeon_device *rdev) | ||
| 124 | { | 49 | { |
| 125 | /* FIXME: HDP same place on rs690 ? */ | 50 | /* FIXME: HDP same place on rs690 ? */ |
| 126 | r100_hdp_reset(rdev); | 51 | r100_hdp_reset(rdev); |
| 127 | rv515_vga_render_disable(rdev); | ||
| 128 | /* FIXME: is this correct ? */ | 52 | /* FIXME: is this correct ? */ |
| 129 | r420_pipes_init(rdev); | 53 | r420_pipes_init(rdev); |
| 130 | if (rs690_mc_wait_for_idle(rdev)) { | 54 | if (rs690_mc_wait_for_idle(rdev)) { |
| @@ -133,10 +57,6 @@ void rs690_gpu_init(struct radeon_device *rdev) | |||
| 133 | } | 57 | } |
| 134 | } | 58 | } |
| 135 | 59 | ||
| 136 | |||
| 137 | /* | ||
| 138 | * VRAM info. | ||
| 139 | */ | ||
| 140 | void rs690_pm_info(struct radeon_device *rdev) | 60 | void rs690_pm_info(struct radeon_device *rdev) |
| 141 | { | 61 | { |
| 142 | int index = GetIndexIntoMasterTable(DATA, IntegratedSystemInfo); | 62 | int index = GetIndexIntoMasterTable(DATA, IntegratedSystemInfo); |
| @@ -250,39 +170,39 @@ void rs690_line_buffer_adjust(struct radeon_device *rdev, | |||
| 250 | /* | 170 | /* |
| 251 | * Line Buffer Setup | 171 | * Line Buffer Setup |
| 252 | * There is a single line buffer shared by both display controllers. | 172 | * There is a single line buffer shared by both display controllers. |
| 253 | * DC_LB_MEMORY_SPLIT controls how that line buffer is shared between | 173 | * R_006520_DC_LB_MEMORY_SPLIT controls how that line buffer is shared between |
| 254 | * the display controllers. The paritioning can either be done | 174 | * the display controllers. The paritioning can either be done |
| 255 | * manually or via one of four preset allocations specified in bits 1:0: | 175 | * manually or via one of four preset allocations specified in bits 1:0: |
| 256 | * 0 - line buffer is divided in half and shared between crtc | 176 | * 0 - line buffer is divided in half and shared between crtc |
| 257 | * 1 - D1 gets 3/4 of the line buffer, D2 gets 1/4 | 177 | * 1 - D1 gets 3/4 of the line buffer, D2 gets 1/4 |
| 258 | * 2 - D1 gets the whole buffer | 178 | * 2 - D1 gets the whole buffer |
| 259 | * 3 - D1 gets 1/4 of the line buffer, D2 gets 3/4 | 179 | * 3 - D1 gets 1/4 of the line buffer, D2 gets 3/4 |
| 260 | * Setting bit 2 of DC_LB_MEMORY_SPLIT controls switches to manual | 180 | * Setting bit 2 of R_006520_DC_LB_MEMORY_SPLIT controls switches to manual |
| 261 | * allocation mode. In manual allocation mode, D1 always starts at 0, | 181 | * allocation mode. In manual allocation mode, D1 always starts at 0, |
| 262 | * D1 end/2 is specified in bits 14:4; D2 allocation follows D1. | 182 | * D1 end/2 is specified in bits 14:4; D2 allocation follows D1. |
| 263 | */ | 183 | */ |
| 264 | tmp = RREG32(DC_LB_MEMORY_SPLIT) & ~DC_LB_MEMORY_SPLIT_MASK; | 184 | tmp = RREG32(R_006520_DC_LB_MEMORY_SPLIT) & C_006520_DC_LB_MEMORY_SPLIT; |
| 265 | tmp &= ~DC_LB_MEMORY_SPLIT_SHIFT_MODE; | 185 | tmp &= ~C_006520_DC_LB_MEMORY_SPLIT_MODE; |
| 266 | /* auto */ | 186 | /* auto */ |
| 267 | if (mode1 && mode2) { | 187 | if (mode1 && mode2) { |
| 268 | if (mode1->hdisplay > mode2->hdisplay) { | 188 | if (mode1->hdisplay > mode2->hdisplay) { |
| 269 | if (mode1->hdisplay > 2560) | 189 | if (mode1->hdisplay > 2560) |
| 270 | tmp |= DC_LB_MEMORY_SPLIT_D1_3Q_D2_1Q; | 190 | tmp |= V_006520_DC_LB_MEMORY_SPLIT_D1_3Q_D2_1Q; |
| 271 | else | 191 | else |
| 272 | tmp |= DC_LB_MEMORY_SPLIT_D1HALF_D2HALF; | 192 | tmp |= V_006520_DC_LB_MEMORY_SPLIT_D1HALF_D2HALF; |
| 273 | } else if (mode2->hdisplay > mode1->hdisplay) { | 193 | } else if (mode2->hdisplay > mode1->hdisplay) { |
| 274 | if (mode2->hdisplay > 2560) | 194 | if (mode2->hdisplay > 2560) |
| 275 | tmp |= DC_LB_MEMORY_SPLIT_D1_1Q_D2_3Q; | 195 | tmp |= V_006520_DC_LB_MEMORY_SPLIT_D1_1Q_D2_3Q; |
| 276 | else | 196 | else |
| 277 | tmp |= DC_LB_MEMORY_SPLIT_D1HALF_D2HALF; | 197 | tmp |= V_006520_DC_LB_MEMORY_SPLIT_D1HALF_D2HALF; |
| 278 | } else | 198 | } else |
| 279 | tmp |= AVIVO_DC_LB_MEMORY_SPLIT_D1HALF_D2HALF; | 199 | tmp |= V_006520_DC_LB_MEMORY_SPLIT_D1HALF_D2HALF; |
| 280 | } else if (mode1) { | 200 | } else if (mode1) { |
| 281 | tmp |= DC_LB_MEMORY_SPLIT_D1_ONLY; | 201 | tmp |= V_006520_DC_LB_MEMORY_SPLIT_D1_ONLY; |
| 282 | } else if (mode2) { | 202 | } else if (mode2) { |
| 283 | tmp |= DC_LB_MEMORY_SPLIT_D1_1Q_D2_3Q; | 203 | tmp |= V_006520_DC_LB_MEMORY_SPLIT_D1_1Q_D2_3Q; |
| 284 | } | 204 | } |
| 285 | WREG32(DC_LB_MEMORY_SPLIT, tmp); | 205 | WREG32(R_006520_DC_LB_MEMORY_SPLIT, tmp); |
| 286 | } | 206 | } |
| 287 | 207 | ||
| 288 | struct rs690_watermark { | 208 | struct rs690_watermark { |
| @@ -487,28 +407,28 @@ void rs690_bandwidth_update(struct radeon_device *rdev) | |||
| 487 | * option. | 407 | * option. |
| 488 | */ | 408 | */ |
| 489 | if (rdev->disp_priority == 2) { | 409 | if (rdev->disp_priority == 2) { |
| 490 | tmp = RREG32_MC(MC_INIT_MISC_LAT_TIMER); | 410 | tmp = RREG32_MC(R_000104_MC_INIT_MISC_LAT_TIMER); |
| 491 | tmp &= ~MC_DISP1R_INIT_LAT_MASK; | 411 | tmp &= C_000104_MC_DISP0R_INIT_LAT; |
| 492 | tmp &= ~MC_DISP0R_INIT_LAT_MASK; | 412 | tmp &= C_000104_MC_DISP1R_INIT_LAT; |
| 493 | if (mode1) | ||
| 494 | tmp |= (1 << MC_DISP1R_INIT_LAT_SHIFT); | ||
| 495 | if (mode0) | 413 | if (mode0) |
| 496 | tmp |= (1 << MC_DISP0R_INIT_LAT_SHIFT); | 414 | tmp |= S_000104_MC_DISP0R_INIT_LAT(1); |
| 497 | WREG32_MC(MC_INIT_MISC_LAT_TIMER, tmp); | 415 | if (mode1) |
| 416 | tmp |= S_000104_MC_DISP1R_INIT_LAT(1); | ||
| 417 | WREG32_MC(R_000104_MC_INIT_MISC_LAT_TIMER, tmp); | ||
| 498 | } | 418 | } |
| 499 | rs690_line_buffer_adjust(rdev, mode0, mode1); | 419 | rs690_line_buffer_adjust(rdev, mode0, mode1); |
| 500 | 420 | ||
| 501 | if ((rdev->family == CHIP_RS690) || (rdev->family == CHIP_RS740)) | 421 | if ((rdev->family == CHIP_RS690) || (rdev->family == CHIP_RS740)) |
| 502 | WREG32(DCP_CONTROL, 0); | 422 | WREG32(R_006C9C_DCP_CONTROL, 0); |
| 503 | if ((rdev->family == CHIP_RS780) || (rdev->family == CHIP_RS880)) | 423 | if ((rdev->family == CHIP_RS780) || (rdev->family == CHIP_RS880)) |
| 504 | WREG32(DCP_CONTROL, 2); | 424 | WREG32(R_006C9C_DCP_CONTROL, 2); |
| 505 | 425 | ||
| 506 | rs690_crtc_bandwidth_compute(rdev, rdev->mode_info.crtcs[0], &wm0); | 426 | rs690_crtc_bandwidth_compute(rdev, rdev->mode_info.crtcs[0], &wm0); |
| 507 | rs690_crtc_bandwidth_compute(rdev, rdev->mode_info.crtcs[1], &wm1); | 427 | rs690_crtc_bandwidth_compute(rdev, rdev->mode_info.crtcs[1], &wm1); |
| 508 | 428 | ||
| 509 | tmp = (wm0.lb_request_fifo_depth - 1); | 429 | tmp = (wm0.lb_request_fifo_depth - 1); |
| 510 | tmp |= (wm1.lb_request_fifo_depth - 1) << 16; | 430 | tmp |= (wm1.lb_request_fifo_depth - 1) << 16; |
| 511 | WREG32(LB_MAX_REQ_OUTSTANDING, tmp); | 431 | WREG32(R_006D58_LB_MAX_REQ_OUTSTANDING, tmp); |
| 512 | 432 | ||
| 513 | if (mode0 && mode1) { | 433 | if (mode0 && mode1) { |
| 514 | if (rfixed_trunc(wm0.dbpp) > 64) | 434 | if (rfixed_trunc(wm0.dbpp) > 64) |
| @@ -561,10 +481,10 @@ void rs690_bandwidth_update(struct radeon_device *rdev) | |||
| 561 | priority_mark12.full = 0; | 481 | priority_mark12.full = 0; |
| 562 | if (wm1.priority_mark_max.full > priority_mark12.full) | 482 | if (wm1.priority_mark_max.full > priority_mark12.full) |
| 563 | priority_mark12.full = wm1.priority_mark_max.full; | 483 | priority_mark12.full = wm1.priority_mark_max.full; |
| 564 | WREG32(D1MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark02)); | 484 | WREG32(R_006548_D1MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark02)); |
| 565 | WREG32(D1MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark02)); | 485 | WREG32(R_00654C_D1MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark02)); |
| 566 | WREG32(D2MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark12)); | 486 | WREG32(R_006D48_D2MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark12)); |
| 567 | WREG32(D2MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark12)); | 487 | WREG32(R_006D4C_D2MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark12)); |
| 568 | } else if (mode0) { | 488 | } else if (mode0) { |
| 569 | if (rfixed_trunc(wm0.dbpp) > 64) | 489 | if (rfixed_trunc(wm0.dbpp) > 64) |
| 570 | a.full = rfixed_mul(wm0.dbpp, wm0.num_line_pair); | 490 | a.full = rfixed_mul(wm0.dbpp, wm0.num_line_pair); |
| @@ -591,10 +511,12 @@ void rs690_bandwidth_update(struct radeon_device *rdev) | |||
| 591 | priority_mark02.full = 0; | 511 | priority_mark02.full = 0; |
| 592 | if (wm0.priority_mark_max.full > priority_mark02.full) | 512 | if (wm0.priority_mark_max.full > priority_mark02.full) |
| 593 | priority_mark02.full = wm0.priority_mark_max.full; | 513 | priority_mark02.full = wm0.priority_mark_max.full; |
| 594 | WREG32(D1MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark02)); | 514 | WREG32(R_006548_D1MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark02)); |
| 595 | WREG32(D1MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark02)); | 515 | WREG32(R_00654C_D1MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark02)); |
| 596 | WREG32(D2MODE_PRIORITY_A_CNT, MODE_PRIORITY_OFF); | 516 | WREG32(R_006D48_D2MODE_PRIORITY_A_CNT, |
| 597 | WREG32(D2MODE_PRIORITY_B_CNT, MODE_PRIORITY_OFF); | 517 | S_006D48_D2MODE_PRIORITY_A_OFF(1)); |
| 518 | WREG32(R_006D4C_D2MODE_PRIORITY_B_CNT, | ||
| 519 | S_006D4C_D2MODE_PRIORITY_B_OFF(1)); | ||
| 598 | } else { | 520 | } else { |
| 599 | if (rfixed_trunc(wm1.dbpp) > 64) | 521 | if (rfixed_trunc(wm1.dbpp) > 64) |
| 600 | a.full = rfixed_mul(wm1.dbpp, wm1.num_line_pair); | 522 | a.full = rfixed_mul(wm1.dbpp, wm1.num_line_pair); |
| @@ -621,30 +543,203 @@ void rs690_bandwidth_update(struct radeon_device *rdev) | |||
| 621 | priority_mark12.full = 0; | 543 | priority_mark12.full = 0; |
| 622 | if (wm1.priority_mark_max.full > priority_mark12.full) | 544 | if (wm1.priority_mark_max.full > priority_mark12.full) |
| 623 | priority_mark12.full = wm1.priority_mark_max.full; | 545 | priority_mark12.full = wm1.priority_mark_max.full; |
| 624 | WREG32(D1MODE_PRIORITY_A_CNT, MODE_PRIORITY_OFF); | 546 | WREG32(R_006548_D1MODE_PRIORITY_A_CNT, |
| 625 | WREG32(D1MODE_PRIORITY_B_CNT, MODE_PRIORITY_OFF); | 547 | S_006548_D1MODE_PRIORITY_A_OFF(1)); |
| 626 | WREG32(D2MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark12)); | 548 | WREG32(R_00654C_D1MODE_PRIORITY_B_CNT, |
| 627 | WREG32(D2MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark12)); | 549 | S_00654C_D1MODE_PRIORITY_B_OFF(1)); |
| 550 | WREG32(R_006D48_D2MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark12)); | ||
| 551 | WREG32(R_006D4C_D2MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark12)); | ||
| 628 | } | 552 | } |
| 629 | } | 553 | } |
| 630 | 554 | ||
| 631 | /* | ||
| 632 | * Indirect registers accessor | ||
| 633 | */ | ||
| 634 | uint32_t rs690_mc_rreg(struct radeon_device *rdev, uint32_t reg) | 555 | uint32_t rs690_mc_rreg(struct radeon_device *rdev, uint32_t reg) |
| 635 | { | 556 | { |
| 636 | uint32_t r; | 557 | uint32_t r; |
| 637 | 558 | ||
| 638 | WREG32(RS690_MC_INDEX, (reg & RS690_MC_INDEX_MASK)); | 559 | WREG32(R_000078_MC_INDEX, S_000078_MC_IND_ADDR(reg)); |
| 639 | r = RREG32(RS690_MC_DATA); | 560 | r = RREG32(R_00007C_MC_DATA); |
| 640 | WREG32(RS690_MC_INDEX, RS690_MC_INDEX_MASK); | 561 | WREG32(R_000078_MC_INDEX, ~C_000078_MC_IND_ADDR); |
| 641 | return r; | 562 | return r; |
| 642 | } | 563 | } |
| 643 | 564 | ||
| 644 | void rs690_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) | 565 | void rs690_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) |
| 645 | { | 566 | { |
| 646 | WREG32(RS690_MC_INDEX, | 567 | WREG32(R_000078_MC_INDEX, S_000078_MC_IND_ADDR(reg) | |
| 647 | RS690_MC_INDEX_WR_EN | ((reg) & RS690_MC_INDEX_MASK)); | 568 | S_000078_MC_IND_WR_EN(1)); |
| 648 | WREG32(RS690_MC_DATA, v); | 569 | WREG32(R_00007C_MC_DATA, v); |
| 649 | WREG32(RS690_MC_INDEX, RS690_MC_INDEX_WR_ACK); | 570 | WREG32(R_000078_MC_INDEX, 0x7F); |
| 571 | } | ||
| 572 | |||
| 573 | void rs690_mc_program(struct radeon_device *rdev) | ||
| 574 | { | ||
| 575 | struct rv515_mc_save save; | ||
| 576 | |||
| 577 | /* Stops all mc clients */ | ||
| 578 | rv515_mc_stop(rdev, &save); | ||
| 579 | |||
| 580 | /* Wait for mc idle */ | ||
| 581 | if (rs690_mc_wait_for_idle(rdev)) | ||
| 582 | dev_warn(rdev->dev, "Wait MC idle timeout before updating MC.\n"); | ||
| 583 | /* Program MC, should be a 32bits limited address space */ | ||
| 584 | WREG32_MC(R_000100_MCCFG_FB_LOCATION, | ||
| 585 | S_000100_MC_FB_START(rdev->mc.vram_start >> 16) | | ||
| 586 | S_000100_MC_FB_TOP(rdev->mc.vram_end >> 16)); | ||
| 587 | WREG32(R_000134_HDP_FB_LOCATION, | ||
| 588 | S_000134_HDP_FB_START(rdev->mc.vram_start >> 16)); | ||
| 589 | |||
| 590 | rv515_mc_resume(rdev, &save); | ||
| 591 | } | ||
| 592 | |||
| 593 | static int rs690_startup(struct radeon_device *rdev) | ||
| 594 | { | ||
| 595 | int r; | ||
| 596 | |||
| 597 | rs690_mc_program(rdev); | ||
| 598 | /* Resume clock */ | ||
| 599 | rv515_clock_startup(rdev); | ||
| 600 | /* Initialize GPU configuration (# pipes, ...) */ | ||
| 601 | rs690_gpu_init(rdev); | ||
| 602 | /* Initialize GART (initialize after TTM so we can allocate | ||
| 603 | * memory through TTM but finalize after TTM) */ | ||
| 604 | r = rs400_gart_enable(rdev); | ||
| 605 | if (r) | ||
| 606 | return r; | ||
| 607 | /* Enable IRQ */ | ||
| 608 | rdev->irq.sw_int = true; | ||
| 609 | rs600_irq_set(rdev); | ||
| 610 | /* 1M ring buffer */ | ||
| 611 | r = r100_cp_init(rdev, 1024 * 1024); | ||
| 612 | if (r) { | ||
| 613 | dev_err(rdev->dev, "failled initializing CP (%d).\n", r); | ||
| 614 | return r; | ||
| 615 | } | ||
| 616 | r = r100_wb_init(rdev); | ||
| 617 | if (r) | ||
| 618 | dev_err(rdev->dev, "failled initializing WB (%d).\n", r); | ||
| 619 | r = r100_ib_init(rdev); | ||
| 620 | if (r) { | ||
| 621 | dev_err(rdev->dev, "failled initializing IB (%d).\n", r); | ||
| 622 | return r; | ||
| 623 | } | ||
| 624 | return 0; | ||
| 625 | } | ||
| 626 | |||
| 627 | int rs690_resume(struct radeon_device *rdev) | ||
| 628 | { | ||
| 629 | /* Make sur GART are not working */ | ||
| 630 | rs400_gart_disable(rdev); | ||
| 631 | /* Resume clock before doing reset */ | ||
| 632 | rv515_clock_startup(rdev); | ||
| 633 | /* Reset gpu before posting otherwise ATOM will enter infinite loop */ | ||
| 634 | if (radeon_gpu_reset(rdev)) { | ||
| 635 | dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", | ||
| 636 | RREG32(R_000E40_RBBM_STATUS), | ||
| 637 | RREG32(R_0007C0_CP_STAT)); | ||
| 638 | } | ||
| 639 | /* post */ | ||
| 640 | atom_asic_init(rdev->mode_info.atom_context); | ||
| 641 | /* Resume clock after posting */ | ||
| 642 | rv515_clock_startup(rdev); | ||
| 643 | return rs690_startup(rdev); | ||
| 644 | } | ||
| 645 | |||
| 646 | int rs690_suspend(struct radeon_device *rdev) | ||
| 647 | { | ||
| 648 | r100_cp_disable(rdev); | ||
| 649 | r100_wb_disable(rdev); | ||
| 650 | rs600_irq_disable(rdev); | ||
| 651 | rs400_gart_disable(rdev); | ||
| 652 | return 0; | ||
| 653 | } | ||
| 654 | |||
| 655 | void rs690_fini(struct radeon_device *rdev) | ||
| 656 | { | ||
| 657 | rs690_suspend(rdev); | ||
| 658 | r100_cp_fini(rdev); | ||
| 659 | r100_wb_fini(rdev); | ||
| 660 | r100_ib_fini(rdev); | ||
| 661 | radeon_gem_fini(rdev); | ||
| 662 | rs400_gart_fini(rdev); | ||
| 663 | radeon_irq_kms_fini(rdev); | ||
| 664 | radeon_fence_driver_fini(rdev); | ||
| 665 | radeon_object_fini(rdev); | ||
| 666 | radeon_atombios_fini(rdev); | ||
| 667 | kfree(rdev->bios); | ||
| 668 | rdev->bios = NULL; | ||
| 669 | } | ||
| 670 | |||
| 671 | int rs690_init(struct radeon_device *rdev) | ||
| 672 | { | ||
| 673 | int r; | ||
| 674 | |||
| 675 | /* Disable VGA */ | ||
| 676 | rv515_vga_render_disable(rdev); | ||
| 677 | /* Initialize scratch registers */ | ||
| 678 | radeon_scratch_init(rdev); | ||
| 679 | /* Initialize surface registers */ | ||
| 680 | radeon_surface_init(rdev); | ||
| 681 | /* TODO: disable VGA need to use VGA request */ | ||
| 682 | /* BIOS*/ | ||
| 683 | if (!radeon_get_bios(rdev)) { | ||
| 684 | if (ASIC_IS_AVIVO(rdev)) | ||
| 685 | return -EINVAL; | ||
| 686 | } | ||
| 687 | if (rdev->is_atom_bios) { | ||
| 688 | r = radeon_atombios_init(rdev); | ||
| 689 | if (r) | ||
| 690 | return r; | ||
| 691 | } else { | ||
| 692 | dev_err(rdev->dev, "Expecting atombios for RV515 GPU\n"); | ||
| 693 | return -EINVAL; | ||
| 694 | } | ||
| 695 | /* Reset gpu before posting otherwise ATOM will enter infinite loop */ | ||
| 696 | if (radeon_gpu_reset(rdev)) { | ||
| 697 | dev_warn(rdev->dev, | ||
| 698 | "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", | ||
| 699 | RREG32(R_000E40_RBBM_STATUS), | ||
| 700 | RREG32(R_0007C0_CP_STAT)); | ||
| 701 | } | ||
| 702 | /* check if cards are posted or not */ | ||
| 703 | if (!radeon_card_posted(rdev) && rdev->bios) { | ||
| 704 | DRM_INFO("GPU not posted. posting now...\n"); | ||
| 705 | atom_asic_init(rdev->mode_info.atom_context); | ||
| 706 | } | ||
| 707 | /* Initialize clocks */ | ||
| 708 | radeon_get_clock_info(rdev->ddev); | ||
| 709 | /* Get vram informations */ | ||
| 710 | rs690_vram_info(rdev); | ||
| 711 | /* Initialize memory controller (also test AGP) */ | ||
| 712 | r = r420_mc_init(rdev); | ||
| 713 | if (r) | ||
| 714 | return r; | ||
| 715 | rv515_debugfs(rdev); | ||
| 716 | /* Fence driver */ | ||
| 717 | r = radeon_fence_driver_init(rdev); | ||
| 718 | if (r) | ||
| 719 | return r; | ||
| 720 | r = radeon_irq_kms_init(rdev); | ||
| 721 | if (r) | ||
| 722 | return r; | ||
| 723 | /* Memory manager */ | ||
| 724 | r = radeon_object_init(rdev); | ||
| 725 | if (r) | ||
| 726 | return r; | ||
| 727 | r = rs400_gart_init(rdev); | ||
| 728 | if (r) | ||
| 729 | return r; | ||
| 730 | rs600_set_safe_registers(rdev); | ||
| 731 | rdev->accel_working = true; | ||
| 732 | r = rs690_startup(rdev); | ||
| 733 | if (r) { | ||
| 734 | /* Somethings want wront with the accel init stop accel */ | ||
| 735 | dev_err(rdev->dev, "Disabling GPU acceleration\n"); | ||
| 736 | rs690_suspend(rdev); | ||
| 737 | r100_cp_fini(rdev); | ||
| 738 | r100_wb_fini(rdev); | ||
| 739 | r100_ib_fini(rdev); | ||
| 740 | rs400_gart_fini(rdev); | ||
| 741 | radeon_irq_kms_fini(rdev); | ||
| 742 | rdev->accel_working = false; | ||
| 743 | } | ||
| 744 | return 0; | ||
| 650 | } | 745 | } |
diff --git a/drivers/gpu/drm/radeon/rs690d.h b/drivers/gpu/drm/radeon/rs690d.h new file mode 100644 index 000000000000..62d31e7a897f --- /dev/null +++ b/drivers/gpu/drm/radeon/rs690d.h | |||
| @@ -0,0 +1,307 @@ | |||
| 1 | /* | ||
| 2 | * Copyright 2008 Advanced Micro Devices, Inc. | ||
| 3 | * Copyright 2008 Red Hat Inc. | ||
| 4 | * Copyright 2009 Jerome Glisse. | ||
| 5 | * | ||
| 6 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
| 7 | * copy of this software and associated documentation files (the "Software"), | ||
| 8 | * to deal in the Software without restriction, including without limitation | ||
| 9 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
| 10 | * and/or sell copies of the Software, and to permit persons to whom the | ||
| 11 | * Software is furnished to do so, subject to the following conditions: | ||
| 12 | * | ||
| 13 | * The above copyright notice and this permission notice shall be included in | ||
| 14 | * all copies or substantial portions of the Software. | ||
| 15 | * | ||
| 16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
| 17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
| 18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
| 19 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
| 20 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
| 21 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
| 22 | * OTHER DEALINGS IN THE SOFTWARE. | ||
| 23 | * | ||
| 24 | * Authors: Dave Airlie | ||
| 25 | * Alex Deucher | ||
| 26 | * Jerome Glisse | ||
| 27 | */ | ||
| 28 | #ifndef __RS690D_H__ | ||
| 29 | #define __RS690D_H__ | ||
| 30 | |||
| 31 | /* Registers */ | ||
| 32 | #define R_000078_MC_INDEX 0x000078 | ||
| 33 | #define S_000078_MC_IND_ADDR(x) (((x) & 0x1FF) << 0) | ||
| 34 | #define G_000078_MC_IND_ADDR(x) (((x) >> 0) & 0x1FF) | ||
| 35 | #define C_000078_MC_IND_ADDR 0xFFFFFE00 | ||
| 36 | #define S_000078_MC_IND_WR_EN(x) (((x) & 0x1) << 9) | ||
| 37 | #define G_000078_MC_IND_WR_EN(x) (((x) >> 9) & 0x1) | ||
| 38 | #define C_000078_MC_IND_WR_EN 0xFFFFFDFF | ||
| 39 | #define R_00007C_MC_DATA 0x00007C | ||
| 40 | #define S_00007C_MC_DATA(x) (((x) & 0xFFFFFFFF) << 0) | ||
| 41 | #define G_00007C_MC_DATA(x) (((x) >> 0) & 0xFFFFFFFF) | ||
| 42 | #define C_00007C_MC_DATA 0x00000000 | ||
| 43 | #define R_0000F8_CONFIG_MEMSIZE 0x0000F8 | ||
| 44 | #define S_0000F8_CONFIG_MEMSIZE(x) (((x) & 0xFFFFFFFF) << 0) | ||
| 45 | #define G_0000F8_CONFIG_MEMSIZE(x) (((x) >> 0) & 0xFFFFFFFF) | ||
| 46 | #define C_0000F8_CONFIG_MEMSIZE 0x00000000 | ||
| 47 | #define R_000134_HDP_FB_LOCATION 0x000134 | ||
| 48 | #define S_000134_HDP_FB_START(x) (((x) & 0xFFFF) << 0) | ||
| 49 | #define G_000134_HDP_FB_START(x) (((x) >> 0) & 0xFFFF) | ||
| 50 | #define C_000134_HDP_FB_START 0xFFFF0000 | ||
| 51 | #define R_0007C0_CP_STAT 0x0007C0 | ||
| 52 | #define S_0007C0_MRU_BUSY(x) (((x) & 0x1) << 0) | ||
| 53 | #define G_0007C0_MRU_BUSY(x) (((x) >> 0) & 0x1) | ||
| 54 | #define C_0007C0_MRU_BUSY 0xFFFFFFFE | ||
| 55 | #define S_0007C0_MWU_BUSY(x) (((x) & 0x1) << 1) | ||
| 56 | #define G_0007C0_MWU_BUSY(x) (((x) >> 1) & 0x1) | ||
| 57 | #define C_0007C0_MWU_BUSY 0xFFFFFFFD | ||
| 58 | #define S_0007C0_RSIU_BUSY(x) (((x) & 0x1) << 2) | ||
| 59 | #define G_0007C0_RSIU_BUSY(x) (((x) >> 2) & 0x1) | ||
| 60 | #define C_0007C0_RSIU_BUSY 0xFFFFFFFB | ||
| 61 | #define S_0007C0_RCIU_BUSY(x) (((x) & 0x1) << 3) | ||
| 62 | #define G_0007C0_RCIU_BUSY(x) (((x) >> 3) & 0x1) | ||
| 63 | #define C_0007C0_RCIU_BUSY 0xFFFFFFF7 | ||
| 64 | #define S_0007C0_CSF_PRIMARY_BUSY(x) (((x) & 0x1) << 9) | ||
| 65 | #define G_0007C0_CSF_PRIMARY_BUSY(x) (((x) >> 9) & 0x1) | ||
| 66 | #define C_0007C0_CSF_PRIMARY_BUSY 0xFFFFFDFF | ||
| 67 | #define S_0007C0_CSF_INDIRECT_BUSY(x) (((x) & 0x1) << 10) | ||
| 68 | #define G_0007C0_CSF_INDIRECT_BUSY(x) (((x) >> 10) & 0x1) | ||
| 69 | #define C_0007C0_CSF_INDIRECT_BUSY 0xFFFFFBFF | ||
| 70 | #define S_0007C0_CSQ_PRIMARY_BUSY(x) (((x) & 0x1) << 11) | ||
| 71 | #define G_0007C0_CSQ_PRIMARY_BUSY(x) (((x) >> 11) & 0x1) | ||
| 72 | #define C_0007C0_CSQ_PRIMARY_BUSY 0xFFFFF7FF | ||
| 73 | #define S_0007C0_CSQ_INDIRECT_BUSY(x) (((x) & 0x1) << 12) | ||
| 74 | #define G_0007C0_CSQ_INDIRECT_BUSY(x) (((x) >> 12) & 0x1) | ||
| 75 | #define C_0007C0_CSQ_INDIRECT_BUSY 0xFFFFEFFF | ||
| 76 | #define S_0007C0_CSI_BUSY(x) (((x) & 0x1) << 13) | ||
| 77 | #define G_0007C0_CSI_BUSY(x) (((x) >> 13) & 0x1) | ||
| 78 | #define C_0007C0_CSI_BUSY 0xFFFFDFFF | ||
| 79 | #define S_0007C0_CSF_INDIRECT2_BUSY(x) (((x) & 0x1) << 14) | ||
| 80 | #define G_0007C0_CSF_INDIRECT2_BUSY(x) (((x) >> 14) & 0x1) | ||
| 81 | #define C_0007C0_CSF_INDIRECT2_BUSY 0xFFFFBFFF | ||
| 82 | #define S_0007C0_CSQ_INDIRECT2_BUSY(x) (((x) & 0x1) << 15) | ||
| 83 | #define G_0007C0_CSQ_INDIRECT2_BUSY(x) (((x) >> 15) & 0x1) | ||
| 84 | #define C_0007C0_CSQ_INDIRECT2_BUSY 0xFFFF7FFF | ||
| 85 | #define S_0007C0_GUIDMA_BUSY(x) (((x) & 0x1) << 28) | ||
| 86 | #define G_0007C0_GUIDMA_BUSY(x) (((x) >> 28) & 0x1) | ||
| 87 | #define C_0007C0_GUIDMA_BUSY 0xEFFFFFFF | ||
| 88 | #define S_0007C0_VIDDMA_BUSY(x) (((x) & 0x1) << 29) | ||
| 89 | #define G_0007C0_VIDDMA_BUSY(x) (((x) >> 29) & 0x1) | ||
| 90 | #define C_0007C0_VIDDMA_BUSY 0xDFFFFFFF | ||
| 91 | #define S_0007C0_CMDSTRM_BUSY(x) (((x) & 0x1) << 30) | ||
| 92 | #define G_0007C0_CMDSTRM_BUSY(x) (((x) >> 30) & 0x1) | ||
| 93 | #define C_0007C0_CMDSTRM_BUSY 0xBFFFFFFF | ||
| 94 | #define S_0007C0_CP_BUSY(x) (((x) & 0x1) << 31) | ||
| 95 | #define G_0007C0_CP_BUSY(x) (((x) >> 31) & 0x1) | ||
| 96 | #define C_0007C0_CP_BUSY 0x7FFFFFFF | ||
| 97 | #define R_000E40_RBBM_STATUS 0x000E40 | ||
| 98 | #define S_000E40_CMDFIFO_AVAIL(x) (((x) & 0x7F) << 0) | ||
| 99 | #define G_000E40_CMDFIFO_AVAIL(x) (((x) >> 0) & 0x7F) | ||
| 100 | #define C_000E40_CMDFIFO_AVAIL 0xFFFFFF80 | ||
| 101 | #define S_000E40_HIRQ_ON_RBB(x) (((x) & 0x1) << 8) | ||
| 102 | #define G_000E40_HIRQ_ON_RBB(x) (((x) >> 8) & 0x1) | ||
| 103 | #define C_000E40_HIRQ_ON_RBB 0xFFFFFEFF | ||
| 104 | #define S_000E40_CPRQ_ON_RBB(x) (((x) & 0x1) << 9) | ||
| 105 | #define G_000E40_CPRQ_ON_RBB(x) (((x) >> 9) & 0x1) | ||
| 106 | #define C_000E40_CPRQ_ON_RBB 0xFFFFFDFF | ||
| 107 | #define S_000E40_CFRQ_ON_RBB(x) (((x) & 0x1) << 10) | ||
| 108 | #define G_000E40_CFRQ_ON_RBB(x) (((x) >> 10) & 0x1) | ||
| 109 | #define C_000E40_CFRQ_ON_RBB 0xFFFFFBFF | ||
| 110 | #define S_000E40_HIRQ_IN_RTBUF(x) (((x) & 0x1) << 11) | ||
| 111 | #define G_000E40_HIRQ_IN_RTBUF(x) (((x) >> 11) & 0x1) | ||
| 112 | #define C_000E40_HIRQ_IN_RTBUF 0xFFFFF7FF | ||
| 113 | #define S_000E40_CPRQ_IN_RTBUF(x) (((x) & 0x1) << 12) | ||
| 114 | #define G_000E40_CPRQ_IN_RTBUF(x) (((x) >> 12) & 0x1) | ||
| 115 | #define C_000E40_CPRQ_IN_RTBUF 0xFFFFEFFF | ||
| 116 | #define S_000E40_CFRQ_IN_RTBUF(x) (((x) & 0x1) << 13) | ||
| 117 | #define G_000E40_CFRQ_IN_RTBUF(x) (((x) >> 13) & 0x1) | ||
| 118 | #define C_000E40_CFRQ_IN_RTBUF 0xFFFFDFFF | ||
| 119 | #define S_000E40_CF_PIPE_BUSY(x) (((x) & 0x1) << 14) | ||
| 120 | #define G_000E40_CF_PIPE_BUSY(x) (((x) >> 14) & 0x1) | ||
| 121 | #define C_000E40_CF_PIPE_BUSY 0xFFFFBFFF | ||
| 122 | #define S_000E40_ENG_EV_BUSY(x) (((x) & 0x1) << 15) | ||
| 123 | #define G_000E40_ENG_EV_BUSY(x) (((x) >> 15) & 0x1) | ||
| 124 | #define C_000E40_ENG_EV_BUSY 0xFFFF7FFF | ||
| 125 | #define S_000E40_CP_CMDSTRM_BUSY(x) (((x) & 0x1) << 16) | ||
| 126 | #define G_000E40_CP_CMDSTRM_BUSY(x) (((x) >> 16) & 0x1) | ||
| 127 | #define C_000E40_CP_CMDSTRM_BUSY 0xFFFEFFFF | ||
| 128 | #define S_000E40_E2_BUSY(x) (((x) & 0x1) << 17) | ||
| 129 | #define G_000E40_E2_BUSY(x) (((x) >> 17) & 0x1) | ||
| 130 | #define C_000E40_E2_BUSY 0xFFFDFFFF | ||
| 131 | #define S_000E40_RB2D_BUSY(x) (((x) & 0x1) << 18) | ||
| 132 | #define G_000E40_RB2D_BUSY(x) (((x) >> 18) & 0x1) | ||
| 133 | #define C_000E40_RB2D_BUSY 0xFFFBFFFF | ||
| 134 | #define S_000E40_RB3D_BUSY(x) (((x) & 0x1) << 19) | ||
| 135 | #define G_000E40_RB3D_BUSY(x) (((x) >> 19) & 0x1) | ||
| 136 | #define C_000E40_RB3D_BUSY 0xFFF7FFFF | ||
| 137 | #define S_000E40_VAP_BUSY(x) (((x) & 0x1) << 20) | ||
| 138 | #define G_000E40_VAP_BUSY(x) (((x) >> 20) & 0x1) | ||
| 139 | #define C_000E40_VAP_BUSY 0xFFEFFFFF | ||
| 140 | #define S_000E40_RE_BUSY(x) (((x) & 0x1) << 21) | ||
| 141 | #define G_000E40_RE_BUSY(x) (((x) >> 21) & 0x1) | ||
| 142 | #define C_000E40_RE_BUSY 0xFFDFFFFF | ||
| 143 | #define S_000E40_TAM_BUSY(x) (((x) & 0x1) << 22) | ||
| 144 | #define G_000E40_TAM_BUSY(x) (((x) >> 22) & 0x1) | ||
| 145 | #define C_000E40_TAM_BUSY 0xFFBFFFFF | ||
| 146 | #define S_000E40_TDM_BUSY(x) (((x) & 0x1) << 23) | ||
| 147 | #define G_000E40_TDM_BUSY(x) (((x) >> 23) & 0x1) | ||
| 148 | #define C_000E40_TDM_BUSY 0xFF7FFFFF | ||
| 149 | #define S_000E40_PB_BUSY(x) (((x) & 0x1) << 24) | ||
| 150 | #define G_000E40_PB_BUSY(x) (((x) >> 24) & 0x1) | ||
| 151 | #define C_000E40_PB_BUSY 0xFEFFFFFF | ||
| 152 | #define S_000E40_TIM_BUSY(x) (((x) & 0x1) << 25) | ||
| 153 | #define G_000E40_TIM_BUSY(x) (((x) >> 25) & 0x1) | ||
| 154 | #define C_000E40_TIM_BUSY 0xFDFFFFFF | ||
| 155 | #define S_000E40_GA_BUSY(x) (((x) & 0x1) << 26) | ||
| 156 | #define G_000E40_GA_BUSY(x) (((x) >> 26) & 0x1) | ||
| 157 | #define C_000E40_GA_BUSY 0xFBFFFFFF | ||
| 158 | #define S_000E40_CBA2D_BUSY(x) (((x) & 0x1) << 27) | ||
| 159 | #define G_000E40_CBA2D_BUSY(x) (((x) >> 27) & 0x1) | ||
| 160 | #define C_000E40_CBA2D_BUSY 0xF7FFFFFF | ||
| 161 | #define S_000E40_GUI_ACTIVE(x) (((x) & 0x1) << 31) | ||
| 162 | #define G_000E40_GUI_ACTIVE(x) (((x) >> 31) & 0x1) | ||
| 163 | #define C_000E40_GUI_ACTIVE 0x7FFFFFFF | ||
| 164 | #define R_006520_DC_LB_MEMORY_SPLIT 0x006520 | ||
| 165 | #define S_006520_DC_LB_MEMORY_SPLIT(x) (((x) & 0x3) << 0) | ||
| 166 | #define G_006520_DC_LB_MEMORY_SPLIT(x) (((x) >> 0) & 0x3) | ||
| 167 | #define C_006520_DC_LB_MEMORY_SPLIT 0xFFFFFFFC | ||
| 168 | #define S_006520_DC_LB_MEMORY_SPLIT_MODE(x) (((x) & 0x1) << 2) | ||
| 169 | #define G_006520_DC_LB_MEMORY_SPLIT_MODE(x) (((x) >> 2) & 0x1) | ||
| 170 | #define C_006520_DC_LB_MEMORY_SPLIT_MODE 0xFFFFFFFB | ||
| 171 | #define V_006520_DC_LB_MEMORY_SPLIT_D1HALF_D2HALF 0 | ||
| 172 | #define V_006520_DC_LB_MEMORY_SPLIT_D1_3Q_D2_1Q 1 | ||
| 173 | #define V_006520_DC_LB_MEMORY_SPLIT_D1_ONLY 2 | ||
| 174 | #define V_006520_DC_LB_MEMORY_SPLIT_D1_1Q_D2_3Q 3 | ||
| 175 | #define S_006520_DC_LB_DISP1_END_ADR(x) (((x) & 0x7FF) << 4) | ||
| 176 | #define G_006520_DC_LB_DISP1_END_ADR(x) (((x) >> 4) & 0x7FF) | ||
| 177 | #define C_006520_DC_LB_DISP1_END_ADR 0xFFFF800F | ||
| 178 | #define R_006548_D1MODE_PRIORITY_A_CNT 0x006548 | ||
| 179 | #define S_006548_D1MODE_PRIORITY_MARK_A(x) (((x) & 0x7FFF) << 0) | ||
| 180 | #define G_006548_D1MODE_PRIORITY_MARK_A(x) (((x) >> 0) & 0x7FFF) | ||
| 181 | #define C_006548_D1MODE_PRIORITY_MARK_A 0xFFFF8000 | ||
| 182 | #define S_006548_D1MODE_PRIORITY_A_OFF(x) (((x) & 0x1) << 16) | ||
| 183 | #define G_006548_D1MODE_PRIORITY_A_OFF(x) (((x) >> 16) & 0x1) | ||
| 184 | #define C_006548_D1MODE_PRIORITY_A_OFF 0xFFFEFFFF | ||
| 185 | #define S_006548_D1MODE_PRIORITY_A_FORCE_MASK(x) (((x) & 0x1) << 24) | ||
| 186 | #define G_006548_D1MODE_PRIORITY_A_FORCE_MASK(x) (((x) >> 24) & 0x1) | ||
| 187 | #define C_006548_D1MODE_PRIORITY_A_FORCE_MASK 0xFEFFFFFF | ||
| 188 | #define R_00654C_D1MODE_PRIORITY_B_CNT 0x00654C | ||
| 189 | #define S_00654C_D1MODE_PRIORITY_MARK_B(x) (((x) & 0x7FFF) << 0) | ||
| 190 | #define G_00654C_D1MODE_PRIORITY_MARK_B(x) (((x) >> 0) & 0x7FFF) | ||
| 191 | #define C_00654C_D1MODE_PRIORITY_MARK_B 0xFFFF8000 | ||
| 192 | #define S_00654C_D1MODE_PRIORITY_B_OFF(x) (((x) & 0x1) << 16) | ||
| 193 | #define G_00654C_D1MODE_PRIORITY_B_OFF(x) (((x) >> 16) & 0x1) | ||
| 194 | #define C_00654C_D1MODE_PRIORITY_B_OFF 0xFFFEFFFF | ||
| 195 | #define S_00654C_D1MODE_PRIORITY_B_ALWAYS_ON(x) (((x) & 0x1) << 20) | ||
| 196 | #define G_00654C_D1MODE_PRIORITY_B_ALWAYS_ON(x) (((x) >> 20) & 0x1) | ||
| 197 | #define C_00654C_D1MODE_PRIORITY_B_ALWAYS_ON 0xFFEFFFFF | ||
| 198 | #define S_00654C_D1MODE_PRIORITY_B_FORCE_MASK(x) (((x) & 0x1) << 24) | ||
| 199 | #define G_00654C_D1MODE_PRIORITY_B_FORCE_MASK(x) (((x) >> 24) & 0x1) | ||
| 200 | #define C_00654C_D1MODE_PRIORITY_B_FORCE_MASK 0xFEFFFFFF | ||
| 201 | #define R_006C9C_DCP_CONTROL 0x006C9C | ||
| 202 | #define R_006D48_D2MODE_PRIORITY_A_CNT 0x006D48 | ||
| 203 | #define S_006D48_D2MODE_PRIORITY_MARK_A(x) (((x) & 0x7FFF) << 0) | ||
| 204 | #define G_006D48_D2MODE_PRIORITY_MARK_A(x) (((x) >> 0) & 0x7FFF) | ||
| 205 | #define C_006D48_D2MODE_PRIORITY_MARK_A 0xFFFF8000 | ||
| 206 | #define S_006D48_D2MODE_PRIORITY_A_OFF(x) (((x) & 0x1) << 16) | ||
| 207 | #define G_006D48_D2MODE_PRIORITY_A_OFF(x) (((x) >> 16) & 0x1) | ||
| 208 | #define C_006D48_D2MODE_PRIORITY_A_OFF 0xFFFEFFFF | ||
| 209 | #define S_006D48_D2MODE_PRIORITY_A_ALWAYS_ON(x) (((x) & 0x1) << 20) | ||
| 210 | #define G_006D48_D2MODE_PRIORITY_A_ALWAYS_ON(x) (((x) >> 20) & 0x1) | ||
| 211 | #define C_006D48_D2MODE_PRIORITY_A_ALWAYS_ON 0xFFEFFFFF | ||
| 212 | #define S_006D48_D2MODE_PRIORITY_A_FORCE_MASK(x) (((x) & 0x1) << 24) | ||
| 213 | #define G_006D48_D2MODE_PRIORITY_A_FORCE_MASK(x) (((x) >> 24) & 0x1) | ||
| 214 | #define C_006D48_D2MODE_PRIORITY_A_FORCE_MASK 0xFEFFFFFF | ||
| 215 | #define R_006D4C_D2MODE_PRIORITY_B_CNT 0x006D4C | ||
| 216 | #define S_006D4C_D2MODE_PRIORITY_MARK_B(x) (((x) & 0x7FFF) << 0) | ||
| 217 | #define G_006D4C_D2MODE_PRIORITY_MARK_B(x) (((x) >> 0) & 0x7FFF) | ||
| 218 | #define C_006D4C_D2MODE_PRIORITY_MARK_B 0xFFFF8000 | ||
| 219 | #define S_006D4C_D2MODE_PRIORITY_B_OFF(x) (((x) & 0x1) << 16) | ||
| 220 | #define G_006D4C_D2MODE_PRIORITY_B_OFF(x) (((x) >> 16) & 0x1) | ||
| 221 | #define C_006D4C_D2MODE_PRIORITY_B_OFF 0xFFFEFFFF | ||
| 222 | #define S_006D4C_D2MODE_PRIORITY_B_ALWAYS_ON(x) (((x) & 0x1) << 20) | ||
| 223 | #define G_006D4C_D2MODE_PRIORITY_B_ALWAYS_ON(x) (((x) >> 20) & 0x1) | ||
| 224 | #define C_006D4C_D2MODE_PRIORITY_B_ALWAYS_ON 0xFFEFFFFF | ||
| 225 | #define S_006D4C_D2MODE_PRIORITY_B_FORCE_MASK(x) (((x) & 0x1) << 24) | ||
| 226 | #define G_006D4C_D2MODE_PRIORITY_B_FORCE_MASK(x) (((x) >> 24) & 0x1) | ||
| 227 | #define C_006D4C_D2MODE_PRIORITY_B_FORCE_MASK 0xFEFFFFFF | ||
| 228 | #define R_006D58_LB_MAX_REQ_OUTSTANDING 0x006D58 | ||
| 229 | #define S_006D58_LB_D1_MAX_REQ_OUTSTANDING(x) (((x) & 0xF) << 0) | ||
| 230 | #define G_006D58_LB_D1_MAX_REQ_OUTSTANDING(x) (((x) >> 0) & 0xF) | ||
| 231 | #define C_006D58_LB_D1_MAX_REQ_OUTSTANDING 0xFFFFFFF0 | ||
| 232 | #define S_006D58_LB_D2_MAX_REQ_OUTSTANDING(x) (((x) & 0xF) << 16) | ||
| 233 | #define G_006D58_LB_D2_MAX_REQ_OUTSTANDING(x) (((x) >> 16) & 0xF) | ||
| 234 | #define C_006D58_LB_D2_MAX_REQ_OUTSTANDING 0xFFF0FFFF | ||
| 235 | |||
| 236 | |||
| 237 | #define R_000090_MC_SYSTEM_STATUS 0x000090 | ||
| 238 | #define S_000090_MC_SYSTEM_IDLE(x) (((x) & 0x1) << 0) | ||
| 239 | #define G_000090_MC_SYSTEM_IDLE(x) (((x) >> 0) & 0x1) | ||
| 240 | #define C_000090_MC_SYSTEM_IDLE 0xFFFFFFFE | ||
| 241 | #define S_000090_MC_SEQUENCER_IDLE(x) (((x) & 0x1) << 1) | ||
| 242 | #define G_000090_MC_SEQUENCER_IDLE(x) (((x) >> 1) & 0x1) | ||
| 243 | #define C_000090_MC_SEQUENCER_IDLE 0xFFFFFFFD | ||
| 244 | #define S_000090_MC_ARBITER_IDLE(x) (((x) & 0x1) << 2) | ||
| 245 | #define G_000090_MC_ARBITER_IDLE(x) (((x) >> 2) & 0x1) | ||
| 246 | #define C_000090_MC_ARBITER_IDLE 0xFFFFFFFB | ||
| 247 | #define S_000090_MC_SELECT_PM(x) (((x) & 0x1) << 3) | ||
| 248 | #define G_000090_MC_SELECT_PM(x) (((x) >> 3) & 0x1) | ||
| 249 | #define C_000090_MC_SELECT_PM 0xFFFFFFF7 | ||
| 250 | #define S_000090_RESERVED4(x) (((x) & 0xF) << 4) | ||
| 251 | #define G_000090_RESERVED4(x) (((x) >> 4) & 0xF) | ||
| 252 | #define C_000090_RESERVED4 0xFFFFFF0F | ||
| 253 | #define S_000090_RESERVED8(x) (((x) & 0xF) << 8) | ||
| 254 | #define G_000090_RESERVED8(x) (((x) >> 8) & 0xF) | ||
| 255 | #define C_000090_RESERVED8 0xFFFFF0FF | ||
| 256 | #define S_000090_RESERVED12(x) (((x) & 0xF) << 12) | ||
| 257 | #define G_000090_RESERVED12(x) (((x) >> 12) & 0xF) | ||
| 258 | #define C_000090_RESERVED12 0xFFFF0FFF | ||
| 259 | #define S_000090_MCA_INIT_EXECUTED(x) (((x) & 0x1) << 16) | ||
| 260 | #define G_000090_MCA_INIT_EXECUTED(x) (((x) >> 16) & 0x1) | ||
| 261 | #define C_000090_MCA_INIT_EXECUTED 0xFFFEFFFF | ||
| 262 | #define S_000090_MCA_IDLE(x) (((x) & 0x1) << 17) | ||
| 263 | #define G_000090_MCA_IDLE(x) (((x) >> 17) & 0x1) | ||
| 264 | #define C_000090_MCA_IDLE 0xFFFDFFFF | ||
| 265 | #define S_000090_MCA_SEQ_IDLE(x) (((x) & 0x1) << 18) | ||
| 266 | #define G_000090_MCA_SEQ_IDLE(x) (((x) >> 18) & 0x1) | ||
| 267 | #define C_000090_MCA_SEQ_IDLE 0xFFFBFFFF | ||
| 268 | #define S_000090_MCA_ARB_IDLE(x) (((x) & 0x1) << 19) | ||
| 269 | #define G_000090_MCA_ARB_IDLE(x) (((x) >> 19) & 0x1) | ||
| 270 | #define C_000090_MCA_ARB_IDLE 0xFFF7FFFF | ||
| 271 | #define S_000090_RESERVED20(x) (((x) & 0xFFF) << 20) | ||
| 272 | #define G_000090_RESERVED20(x) (((x) >> 20) & 0xFFF) | ||
| 273 | #define C_000090_RESERVED20 0x000FFFFF | ||
| 274 | #define R_000100_MCCFG_FB_LOCATION 0x000100 | ||
| 275 | #define S_000100_MC_FB_START(x) (((x) & 0xFFFF) << 0) | ||
| 276 | #define G_000100_MC_FB_START(x) (((x) >> 0) & 0xFFFF) | ||
| 277 | #define C_000100_MC_FB_START 0xFFFF0000 | ||
| 278 | #define S_000100_MC_FB_TOP(x) (((x) & 0xFFFF) << 16) | ||
| 279 | #define G_000100_MC_FB_TOP(x) (((x) >> 16) & 0xFFFF) | ||
| 280 | #define C_000100_MC_FB_TOP 0x0000FFFF | ||
| 281 | #define R_000104_MC_INIT_MISC_LAT_TIMER 0x000104 | ||
| 282 | #define S_000104_MC_CPR_INIT_LAT(x) (((x) & 0xF) << 0) | ||
| 283 | #define G_000104_MC_CPR_INIT_LAT(x) (((x) >> 0) & 0xF) | ||
| 284 | #define C_000104_MC_CPR_INIT_LAT 0xFFFFFFF0 | ||
| 285 | #define S_000104_MC_VF_INIT_LAT(x) (((x) & 0xF) << 4) | ||
| 286 | #define G_000104_MC_VF_INIT_LAT(x) (((x) >> 4) & 0xF) | ||
| 287 | #define C_000104_MC_VF_INIT_LAT 0xFFFFFF0F | ||
| 288 | #define S_000104_MC_DISP0R_INIT_LAT(x) (((x) & 0xF) << 8) | ||
| 289 | #define G_000104_MC_DISP0R_INIT_LAT(x) (((x) >> 8) & 0xF) | ||
| 290 | #define C_000104_MC_DISP0R_INIT_LAT 0xFFFFF0FF | ||
| 291 | #define S_000104_MC_DISP1R_INIT_LAT(x) (((x) & 0xF) << 12) | ||
| 292 | #define G_000104_MC_DISP1R_INIT_LAT(x) (((x) >> 12) & 0xF) | ||
| 293 | #define C_000104_MC_DISP1R_INIT_LAT 0xFFFF0FFF | ||
| 294 | #define S_000104_MC_FIXED_INIT_LAT(x) (((x) & 0xF) << 16) | ||
| 295 | #define G_000104_MC_FIXED_INIT_LAT(x) (((x) >> 16) & 0xF) | ||
| 296 | #define C_000104_MC_FIXED_INIT_LAT 0xFFF0FFFF | ||
| 297 | #define S_000104_MC_E2R_INIT_LAT(x) (((x) & 0xF) << 20) | ||
| 298 | #define G_000104_MC_E2R_INIT_LAT(x) (((x) >> 20) & 0xF) | ||
| 299 | #define C_000104_MC_E2R_INIT_LAT 0xFF0FFFFF | ||
| 300 | #define S_000104_SAME_PAGE_PRIO(x) (((x) & 0xF) << 24) | ||
| 301 | #define G_000104_SAME_PAGE_PRIO(x) (((x) >> 24) & 0xF) | ||
| 302 | #define C_000104_SAME_PAGE_PRIO 0xF0FFFFFF | ||
| 303 | #define S_000104_MC_GLOBW_INIT_LAT(x) (((x) & 0xF) << 28) | ||
| 304 | #define G_000104_MC_GLOBW_INIT_LAT(x) (((x) >> 28) & 0xF) | ||
| 305 | #define C_000104_MC_GLOBW_INIT_LAT 0x0FFFFFFF | ||
| 306 | |||
| 307 | #endif | ||
diff --git a/drivers/gpu/drm/radeon/rs690r.h b/drivers/gpu/drm/radeon/rs690r.h deleted file mode 100644 index c0d9faa2175b..000000000000 --- a/drivers/gpu/drm/radeon/rs690r.h +++ /dev/null | |||
| @@ -1,99 +0,0 @@ | |||
| 1 | /* | ||
| 2 | * Copyright 2008 Advanced Micro Devices, Inc. | ||
| 3 | * Copyright 2008 Red Hat Inc. | ||
| 4 | * Copyright 2009 Jerome Glisse. | ||
| 5 | * | ||
| 6 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
| 7 | * copy of this software and associated documentation files (the "Software"), | ||
| 8 | * to deal in the Software without restriction, including without limitation | ||
| 9 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
| 10 | * and/or sell copies of the Software, and to permit persons to whom the | ||
| 11 | * Software is furnished to do so, subject to the following conditions: | ||
| 12 | * | ||
| 13 | * The above copyright notice and this permission notice shall be included in | ||
| 14 | * all copies or substantial portions of the Software. | ||
| 15 | * | ||
| 16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
| 17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
| 18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
| 19 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
| 20 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
| 21 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
| 22 | * OTHER DEALINGS IN THE SOFTWARE. | ||
| 23 | * | ||
| 24 | * Authors: Dave Airlie | ||
| 25 | * Alex Deucher | ||
| 26 | * Jerome Glisse | ||
| 27 | */ | ||
| 28 | #ifndef RS690R_H | ||
| 29 | #define RS690R_H | ||
| 30 | |||
| 31 | /* RS690/RS740 registers */ | ||
| 32 | #define MC_INDEX 0x0078 | ||
| 33 | # define MC_INDEX_MASK 0x1FF | ||
| 34 | # define MC_INDEX_WR_EN (1 << 9) | ||
| 35 | # define MC_INDEX_WR_ACK 0x7F | ||
| 36 | #define MC_DATA 0x007C | ||
| 37 | #define HDP_FB_LOCATION 0x0134 | ||
| 38 | #define DC_LB_MEMORY_SPLIT 0x6520 | ||
| 39 | #define DC_LB_MEMORY_SPLIT_MASK 0x00000003 | ||
| 40 | #define DC_LB_MEMORY_SPLIT_SHIFT 0 | ||
| 41 | #define DC_LB_MEMORY_SPLIT_D1HALF_D2HALF 0 | ||
| 42 | #define DC_LB_MEMORY_SPLIT_D1_3Q_D2_1Q 1 | ||
| 43 | #define DC_LB_MEMORY_SPLIT_D1_ONLY 2 | ||
| 44 | #define DC_LB_MEMORY_SPLIT_D1_1Q_D2_3Q 3 | ||
| 45 | #define DC_LB_MEMORY_SPLIT_SHIFT_MODE (1 << 2) | ||
| 46 | #define DC_LB_DISP1_END_ADR_SHIFT 4 | ||
| 47 | #define DC_LB_DISP1_END_ADR_MASK 0x00007FF0 | ||
| 48 | #define D1MODE_PRIORITY_A_CNT 0x6548 | ||
| 49 | #define MODE_PRIORITY_MARK_MASK 0x00007FFF | ||
| 50 | #define MODE_PRIORITY_OFF (1 << 16) | ||
| 51 | #define MODE_PRIORITY_ALWAYS_ON (1 << 20) | ||
| 52 | #define MODE_PRIORITY_FORCE_MASK (1 << 24) | ||
| 53 | #define D1MODE_PRIORITY_B_CNT 0x654C | ||
| 54 | #define LB_MAX_REQ_OUTSTANDING 0x6D58 | ||
| 55 | #define LB_D1_MAX_REQ_OUTSTANDING_MASK 0x0000000F | ||
| 56 | #define LB_D1_MAX_REQ_OUTSTANDING_SHIFT 0 | ||
| 57 | #define LB_D2_MAX_REQ_OUTSTANDING_MASK 0x000F0000 | ||
| 58 | #define LB_D2_MAX_REQ_OUTSTANDING_SHIFT 16 | ||
| 59 | #define DCP_CONTROL 0x6C9C | ||
| 60 | #define D2MODE_PRIORITY_A_CNT 0x6D48 | ||
| 61 | #define D2MODE_PRIORITY_B_CNT 0x6D4C | ||
| 62 | |||
| 63 | /* MC indirect registers */ | ||
| 64 | #define MC_STATUS_IDLE (1 << 0) | ||
| 65 | #define MC_MISC_CNTL 0x18 | ||
| 66 | #define DISABLE_GTW (1 << 1) | ||
| 67 | #define GART_INDEX_REG_EN (1 << 12) | ||
| 68 | #define BLOCK_GFX_D3_EN (1 << 14) | ||
| 69 | #define GART_FEATURE_ID 0x2B | ||
| 70 | #define HANG_EN (1 << 11) | ||
| 71 | #define TLB_ENABLE (1 << 18) | ||
| 72 | #define P2P_ENABLE (1 << 19) | ||
| 73 | #define GTW_LAC_EN (1 << 25) | ||
| 74 | #define LEVEL2_GART (0 << 30) | ||
| 75 | #define LEVEL1_GART (1 << 30) | ||
| 76 | #define PDC_EN (1 << 31) | ||
| 77 | #define GART_BASE 0x2C | ||
| 78 | #define GART_CACHE_CNTRL 0x2E | ||
| 79 | # define GART_CACHE_INVALIDATE (1 << 0) | ||
| 80 | #define MC_STATUS 0x90 | ||
| 81 | #define MCCFG_FB_LOCATION 0x100 | ||
| 82 | #define MC_FB_START_MASK 0x0000FFFF | ||
| 83 | #define MC_FB_START_SHIFT 0 | ||
| 84 | #define MC_FB_TOP_MASK 0xFFFF0000 | ||
| 85 | #define MC_FB_TOP_SHIFT 16 | ||
| 86 | #define MCCFG_AGP_LOCATION 0x101 | ||
| 87 | #define MC_AGP_START_MASK 0x0000FFFF | ||
| 88 | #define MC_AGP_START_SHIFT 0 | ||
| 89 | #define MC_AGP_TOP_MASK 0xFFFF0000 | ||
| 90 | #define MC_AGP_TOP_SHIFT 16 | ||
| 91 | #define MCCFG_AGP_BASE 0x102 | ||
| 92 | #define MCCFG_AGP_BASE_2 0x103 | ||
| 93 | #define MC_INIT_MISC_LAT_TIMER 0x104 | ||
| 94 | #define MC_DISP0R_INIT_LAT_SHIFT 8 | ||
| 95 | #define MC_DISP0R_INIT_LAT_MASK 0x00000F00 | ||
| 96 | #define MC_DISP1R_INIT_LAT_SHIFT 12 | ||
| 97 | #define MC_DISP1R_INIT_LAT_MASK 0x0000F000 | ||
| 98 | |||
| 99 | #endif | ||
diff --git a/drivers/gpu/drm/radeon/rv200d.h b/drivers/gpu/drm/radeon/rv200d.h new file mode 100644 index 000000000000..c5b398330c26 --- /dev/null +++ b/drivers/gpu/drm/radeon/rv200d.h | |||
| @@ -0,0 +1,36 @@ | |||
| 1 | /* | ||
| 2 | * Copyright 2008 Advanced Micro Devices, Inc. | ||
| 3 | * Copyright 2008 Red Hat Inc. | ||
| 4 | * Copyright 2009 Jerome Glisse. | ||
| 5 | * | ||
| 6 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
| 7 | * copy of this software and associated documentation files (the "Software"), | ||
| 8 | * to deal in the Software without restriction, including without limitation | ||
| 9 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
| 10 | * and/or sell copies of the Software, and to permit persons to whom the | ||
| 11 | * Software is furnished to do so, subject to the following conditions: | ||
| 12 | * | ||
| 13 | * The above copyright notice and this permission notice shall be included in | ||
| 14 | * all copies or substantial portions of the Software. | ||
| 15 | * | ||
| 16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
| 17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
| 18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
| 19 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
| 20 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
| 21 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
| 22 | * OTHER DEALINGS IN THE SOFTWARE. | ||
| 23 | * | ||
| 24 | * Authors: Dave Airlie | ||
| 25 | * Alex Deucher | ||
| 26 | * Jerome Glisse | ||
| 27 | */ | ||
| 28 | #ifndef __RV200D_H__ | ||
| 29 | #define __RV200D_H__ | ||
| 30 | |||
| 31 | #define R_00015C_AGP_BASE_2 0x00015C | ||
| 32 | #define S_00015C_AGP_BASE_ADDR_2(x) (((x) & 0xF) << 0) | ||
| 33 | #define G_00015C_AGP_BASE_ADDR_2(x) (((x) >> 0) & 0xF) | ||
| 34 | #define C_00015C_AGP_BASE_ADDR_2 0xFFFFFFF0 | ||
| 35 | |||
| 36 | #endif | ||
diff --git a/drivers/gpu/drm/radeon/rv250d.h b/drivers/gpu/drm/radeon/rv250d.h new file mode 100644 index 000000000000..e5a70b06fe1f --- /dev/null +++ b/drivers/gpu/drm/radeon/rv250d.h | |||
| @@ -0,0 +1,123 @@ | |||
| 1 | /* | ||
| 2 | * Copyright 2008 Advanced Micro Devices, Inc. | ||
| 3 | * Copyright 2008 Red Hat Inc. | ||
| 4 | * Copyright 2009 Jerome Glisse. | ||
| 5 | * | ||
| 6 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
| 7 | * copy of this software and associated documentation files (the "Software"), | ||
| 8 | * to deal in the Software without restriction, including without limitation | ||
| 9 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
| 10 | * and/or sell copies of the Software, and to permit persons to whom the | ||
| 11 | * Software is furnished to do so, subject to the following conditions: | ||
| 12 | * | ||
| 13 | * The above copyright notice and this permission notice shall be included in | ||
| 14 | * all copies or substantial portions of the Software. | ||
| 15 | * | ||
| 16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
| 17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
| 18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
| 19 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
| 20 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
| 21 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
| 22 | * OTHER DEALINGS IN THE SOFTWARE. | ||
| 23 | * | ||
| 24 | * Authors: Dave Airlie | ||
| 25 | * Alex Deucher | ||
| 26 | * Jerome Glisse | ||
| 27 | */ | ||
| 28 | #ifndef __RV250D_H__ | ||
| 29 | #define __RV250D_H__ | ||
| 30 | |||
| 31 | #define R_00000D_SCLK_CNTL_M6 0x00000D | ||
| 32 | #define S_00000D_SCLK_SRC_SEL(x) (((x) & 0x7) << 0) | ||
| 33 | #define G_00000D_SCLK_SRC_SEL(x) (((x) >> 0) & 0x7) | ||
| 34 | #define C_00000D_SCLK_SRC_SEL 0xFFFFFFF8 | ||
| 35 | #define S_00000D_CP_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 3) | ||
| 36 | #define G_00000D_CP_MAX_DYN_STOP_LAT(x) (((x) >> 3) & 0x1) | ||
| 37 | #define C_00000D_CP_MAX_DYN_STOP_LAT 0xFFFFFFF7 | ||
| 38 | #define S_00000D_HDP_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 4) | ||
| 39 | #define G_00000D_HDP_MAX_DYN_STOP_LAT(x) (((x) >> 4) & 0x1) | ||
| 40 | #define C_00000D_HDP_MAX_DYN_STOP_LAT 0xFFFFFFEF | ||
| 41 | #define S_00000D_TV_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 5) | ||
| 42 | #define G_00000D_TV_MAX_DYN_STOP_LAT(x) (((x) >> 5) & 0x1) | ||
| 43 | #define C_00000D_TV_MAX_DYN_STOP_LAT 0xFFFFFFDF | ||
| 44 | #define S_00000D_E2_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 6) | ||
| 45 | #define G_00000D_E2_MAX_DYN_STOP_LAT(x) (((x) >> 6) & 0x1) | ||
| 46 | #define C_00000D_E2_MAX_DYN_STOP_LAT 0xFFFFFFBF | ||
| 47 | #define S_00000D_SE_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 7) | ||
| 48 | #define G_00000D_SE_MAX_DYN_STOP_LAT(x) (((x) >> 7) & 0x1) | ||
| 49 | #define C_00000D_SE_MAX_DYN_STOP_LAT 0xFFFFFF7F | ||
| 50 | #define S_00000D_IDCT_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 8) | ||
| 51 | #define G_00000D_IDCT_MAX_DYN_STOP_LAT(x) (((x) >> 8) & 0x1) | ||
| 52 | #define C_00000D_IDCT_MAX_DYN_STOP_LAT 0xFFFFFEFF | ||
| 53 | #define S_00000D_VIP_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 9) | ||
| 54 | #define G_00000D_VIP_MAX_DYN_STOP_LAT(x) (((x) >> 9) & 0x1) | ||
| 55 | #define C_00000D_VIP_MAX_DYN_STOP_LAT 0xFFFFFDFF | ||
| 56 | #define S_00000D_RE_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 10) | ||
| 57 | #define G_00000D_RE_MAX_DYN_STOP_LAT(x) (((x) >> 10) & 0x1) | ||
| 58 | #define C_00000D_RE_MAX_DYN_STOP_LAT 0xFFFFFBFF | ||
| 59 | #define S_00000D_PB_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 11) | ||
| 60 | #define G_00000D_PB_MAX_DYN_STOP_LAT(x) (((x) >> 11) & 0x1) | ||
| 61 | #define C_00000D_PB_MAX_DYN_STOP_LAT 0xFFFFF7FF | ||
| 62 | #define S_00000D_TAM_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 12) | ||
| 63 | #define G_00000D_TAM_MAX_DYN_STOP_LAT(x) (((x) >> 12) & 0x1) | ||
| 64 | #define C_00000D_TAM_MAX_DYN_STOP_LAT 0xFFFFEFFF | ||
| 65 | #define S_00000D_TDM_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 13) | ||
| 66 | #define G_00000D_TDM_MAX_DYN_STOP_LAT(x) (((x) >> 13) & 0x1) | ||
| 67 | #define C_00000D_TDM_MAX_DYN_STOP_LAT 0xFFFFDFFF | ||
| 68 | #define S_00000D_RB_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 14) | ||
| 69 | #define G_00000D_RB_MAX_DYN_STOP_LAT(x) (((x) >> 14) & 0x1) | ||
| 70 | #define C_00000D_RB_MAX_DYN_STOP_LAT 0xFFFFBFFF | ||
| 71 | #define S_00000D_FORCE_DISP2(x) (((x) & 0x1) << 15) | ||
| 72 | #define G_00000D_FORCE_DISP2(x) (((x) >> 15) & 0x1) | ||
| 73 | #define C_00000D_FORCE_DISP2 0xFFFF7FFF | ||
| 74 | #define S_00000D_FORCE_CP(x) (((x) & 0x1) << 16) | ||
| 75 | #define G_00000D_FORCE_CP(x) (((x) >> 16) & 0x1) | ||
| 76 | #define C_00000D_FORCE_CP 0xFFFEFFFF | ||
| 77 | #define S_00000D_FORCE_HDP(x) (((x) & 0x1) << 17) | ||
| 78 | #define G_00000D_FORCE_HDP(x) (((x) >> 17) & 0x1) | ||
| 79 | #define C_00000D_FORCE_HDP 0xFFFDFFFF | ||
| 80 | #define S_00000D_FORCE_DISP1(x) (((x) & 0x1) << 18) | ||
| 81 | #define G_00000D_FORCE_DISP1(x) (((x) >> 18) & 0x1) | ||
| 82 | #define C_00000D_FORCE_DISP1 0xFFFBFFFF | ||
| 83 | #define S_00000D_FORCE_TOP(x) (((x) & 0x1) << 19) | ||
| 84 | #define G_00000D_FORCE_TOP(x) (((x) >> 19) & 0x1) | ||
| 85 | #define C_00000D_FORCE_TOP 0xFFF7FFFF | ||
| 86 | #define S_00000D_FORCE_E2(x) (((x) & 0x1) << 20) | ||
| 87 | #define G_00000D_FORCE_E2(x) (((x) >> 20) & 0x1) | ||
| 88 | #define C_00000D_FORCE_E2 0xFFEFFFFF | ||
| 89 | #define S_00000D_FORCE_SE(x) (((x) & 0x1) << 21) | ||
| 90 | #define G_00000D_FORCE_SE(x) (((x) >> 21) & 0x1) | ||
| 91 | #define C_00000D_FORCE_SE 0xFFDFFFFF | ||
| 92 | #define S_00000D_FORCE_IDCT(x) (((x) & 0x1) << 22) | ||
| 93 | #define G_00000D_FORCE_IDCT(x) (((x) >> 22) & 0x1) | ||
| 94 | #define C_00000D_FORCE_IDCT 0xFFBFFFFF | ||
| 95 | #define S_00000D_FORCE_VIP(x) (((x) & 0x1) << 23) | ||
| 96 | #define G_00000D_FORCE_VIP(x) (((x) >> 23) & 0x1) | ||
| 97 | #define C_00000D_FORCE_VIP 0xFF7FFFFF | ||
| 98 | #define S_00000D_FORCE_RE(x) (((x) & 0x1) << 24) | ||
| 99 | #define G_00000D_FORCE_RE(x) (((x) >> 24) & 0x1) | ||
| 100 | #define C_00000D_FORCE_RE 0xFEFFFFFF | ||
| 101 | #define S_00000D_FORCE_PB(x) (((x) & 0x1) << 25) | ||
| 102 | #define G_00000D_FORCE_PB(x) (((x) >> 25) & 0x1) | ||
| 103 | #define C_00000D_FORCE_PB 0xFDFFFFFF | ||
| 104 | #define S_00000D_FORCE_TAM(x) (((x) & 0x1) << 26) | ||
| 105 | #define G_00000D_FORCE_TAM(x) (((x) >> 26) & 0x1) | ||
| 106 | #define C_00000D_FORCE_TAM 0xFBFFFFFF | ||
| 107 | #define S_00000D_FORCE_TDM(x) (((x) & 0x1) << 27) | ||
| 108 | #define G_00000D_FORCE_TDM(x) (((x) >> 27) & 0x1) | ||
| 109 | #define C_00000D_FORCE_TDM 0xF7FFFFFF | ||
| 110 | #define S_00000D_FORCE_RB(x) (((x) & 0x1) << 28) | ||
| 111 | #define G_00000D_FORCE_RB(x) (((x) >> 28) & 0x1) | ||
| 112 | #define C_00000D_FORCE_RB 0xEFFFFFFF | ||
| 113 | #define S_00000D_FORCE_TV_SCLK(x) (((x) & 0x1) << 29) | ||
| 114 | #define G_00000D_FORCE_TV_SCLK(x) (((x) >> 29) & 0x1) | ||
| 115 | #define C_00000D_FORCE_TV_SCLK 0xDFFFFFFF | ||
| 116 | #define S_00000D_FORCE_SUBPIC(x) (((x) & 0x1) << 30) | ||
| 117 | #define G_00000D_FORCE_SUBPIC(x) (((x) >> 30) & 0x1) | ||
| 118 | #define C_00000D_FORCE_SUBPIC 0xBFFFFFFF | ||
| 119 | #define S_00000D_FORCE_OV0(x) (((x) & 0x1) << 31) | ||
| 120 | #define G_00000D_FORCE_OV0(x) (((x) >> 31) & 0x1) | ||
| 121 | #define C_00000D_FORCE_OV0 0x7FFFFFFF | ||
| 122 | |||
| 123 | #endif | ||
diff --git a/drivers/gpu/drm/radeon/rv350d.h b/drivers/gpu/drm/radeon/rv350d.h new file mode 100644 index 000000000000..c75c5ed9e654 --- /dev/null +++ b/drivers/gpu/drm/radeon/rv350d.h | |||
| @@ -0,0 +1,52 @@ | |||
| 1 | /* | ||
| 2 | * Copyright 2008 Advanced Micro Devices, Inc. | ||
| 3 | * Copyright 2008 Red Hat Inc. | ||
| 4 | * Copyright 2009 Jerome Glisse. | ||
| 5 | * | ||
| 6 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
| 7 | * copy of this software and associated documentation files (the "Software"), | ||
| 8 | * to deal in the Software without restriction, including without limitation | ||
| 9 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
| 10 | * and/or sell copies of the Software, and to permit persons to whom the | ||
| 11 | * Software is furnished to do so, subject to the following conditions: | ||
| 12 | * | ||
| 13 | * The above copyright notice and this permission notice shall be included in | ||
| 14 | * all copies or substantial portions of the Software. | ||
| 15 | * | ||
| 16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
| 17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
| 18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
| 19 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
| 20 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
| 21 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
| 22 | * OTHER DEALINGS IN THE SOFTWARE. | ||
| 23 | * | ||
| 24 | * Authors: Dave Airlie | ||
| 25 | * Alex Deucher | ||
| 26 | * Jerome Glisse | ||
| 27 | */ | ||
| 28 | #ifndef __RV350D_H__ | ||
| 29 | #define __RV350D_H__ | ||
| 30 | |||
| 31 | /* RV350, RV380 registers */ | ||
| 32 | /* #define R_00000D_SCLK_CNTL 0x00000D */ | ||
| 33 | #define S_00000D_FORCE_VAP(x) (((x) & 0x1) << 21) | ||
| 34 | #define G_00000D_FORCE_VAP(x) (((x) >> 21) & 0x1) | ||
| 35 | #define C_00000D_FORCE_VAP 0xFFDFFFFF | ||
| 36 | #define S_00000D_FORCE_SR(x) (((x) & 0x1) << 25) | ||
| 37 | #define G_00000D_FORCE_SR(x) (((x) >> 25) & 0x1) | ||
| 38 | #define C_00000D_FORCE_SR 0xFDFFFFFF | ||
| 39 | #define S_00000D_FORCE_PX(x) (((x) & 0x1) << 26) | ||
| 40 | #define G_00000D_FORCE_PX(x) (((x) >> 26) & 0x1) | ||
| 41 | #define C_00000D_FORCE_PX 0xFBFFFFFF | ||
| 42 | #define S_00000D_FORCE_TX(x) (((x) & 0x1) << 27) | ||
| 43 | #define G_00000D_FORCE_TX(x) (((x) >> 27) & 0x1) | ||
| 44 | #define C_00000D_FORCE_TX 0xF7FFFFFF | ||
| 45 | #define S_00000D_FORCE_US(x) (((x) & 0x1) << 28) | ||
| 46 | #define G_00000D_FORCE_US(x) (((x) >> 28) & 0x1) | ||
| 47 | #define C_00000D_FORCE_US 0xEFFFFFFF | ||
| 48 | #define S_00000D_FORCE_SU(x) (((x) & 0x1) << 30) | ||
| 49 | #define G_00000D_FORCE_SU(x) (((x) >> 30) & 0x1) | ||
| 50 | #define C_00000D_FORCE_SU 0xBFFFFFFF | ||
| 51 | |||
| 52 | #endif | ||
diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c index e53b5ca7a253..41a34c23e6d8 100644 --- a/drivers/gpu/drm/radeon/rv515.c +++ b/drivers/gpu/drm/radeon/rv515.c | |||
| @@ -478,7 +478,7 @@ static int rv515_startup(struct radeon_device *rdev) | |||
| 478 | } | 478 | } |
| 479 | /* Enable IRQ */ | 479 | /* Enable IRQ */ |
| 480 | rdev->irq.sw_int = true; | 480 | rdev->irq.sw_int = true; |
| 481 | r100_irq_set(rdev); | 481 | rs600_irq_set(rdev); |
| 482 | /* 1M ring buffer */ | 482 | /* 1M ring buffer */ |
| 483 | r = r100_cp_init(rdev, 1024 * 1024); | 483 | r = r100_cp_init(rdev, 1024 * 1024); |
| 484 | if (r) { | 484 | if (r) { |
| @@ -520,7 +520,7 @@ int rv515_suspend(struct radeon_device *rdev) | |||
| 520 | { | 520 | { |
| 521 | r100_cp_disable(rdev); | 521 | r100_cp_disable(rdev); |
| 522 | r100_wb_disable(rdev); | 522 | r100_wb_disable(rdev); |
| 523 | r100_irq_disable(rdev); | 523 | rs600_irq_disable(rdev); |
| 524 | if (rdev->flags & RADEON_IS_PCIE) | 524 | if (rdev->flags & RADEON_IS_PCIE) |
| 525 | rv370_pcie_gart_disable(rdev); | 525 | rv370_pcie_gart_disable(rdev); |
| 526 | return 0; | 526 | return 0; |
| @@ -553,7 +553,6 @@ int rv515_init(struct radeon_device *rdev) | |||
| 553 | { | 553 | { |
| 554 | int r; | 554 | int r; |
| 555 | 555 | ||
| 556 | rdev->new_init_path = true; | ||
| 557 | /* Initialize scratch registers */ | 556 | /* Initialize scratch registers */ |
| 558 | radeon_scratch_init(rdev); | 557 | radeon_scratch_init(rdev); |
| 559 | /* Initialize surface registers */ | 558 | /* Initialize surface registers */ |
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c index e0b97d161397..595ac638039d 100644 --- a/drivers/gpu/drm/radeon/rv770.c +++ b/drivers/gpu/drm/radeon/rv770.c | |||
| @@ -75,7 +75,7 @@ int rv770_pcie_gart_enable(struct radeon_device *rdev) | |||
| 75 | WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp); | 75 | WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp); |
| 76 | WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp); | 76 | WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp); |
| 77 | WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12); | 77 | WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12); |
| 78 | WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, (rdev->mc.gtt_end - 1) >> 12); | 78 | WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12); |
| 79 | WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12); | 79 | WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12); |
| 80 | WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) | | 80 | WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) | |
| 81 | RANGE_PROTECTION_FAULT_ENABLE_DEFAULT); | 81 | RANGE_PROTECTION_FAULT_ENABLE_DEFAULT); |
| @@ -126,17 +126,36 @@ void rv770_pcie_gart_fini(struct radeon_device *rdev) | |||
| 126 | } | 126 | } |
| 127 | 127 | ||
| 128 | 128 | ||
| 129 | /* | 129 | void rv770_agp_enable(struct radeon_device *rdev) |
| 130 | * MC | ||
| 131 | */ | ||
| 132 | static void rv770_mc_resume(struct radeon_device *rdev) | ||
| 133 | { | 130 | { |
| 134 | u32 d1vga_control, d2vga_control; | 131 | u32 tmp; |
| 135 | u32 vga_render_control, vga_hdp_control; | 132 | int i; |
| 136 | u32 d1crtc_control, d2crtc_control; | 133 | |
| 137 | u32 new_d1grph_primary, new_d1grph_secondary; | 134 | /* Setup L2 cache */ |
| 138 | u32 new_d2grph_primary, new_d2grph_secondary; | 135 | WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING | |
| 139 | u64 old_vram_start; | 136 | ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE | |
| 137 | EFFECTIVE_L2_QUEUE_SIZE(7)); | ||
| 138 | WREG32(VM_L2_CNTL2, 0); | ||
| 139 | WREG32(VM_L2_CNTL3, BANK_SELECT(0) | CACHE_UPDATE_MODE(2)); | ||
| 140 | /* Setup TLB control */ | ||
| 141 | tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING | | ||
| 142 | SYSTEM_ACCESS_MODE_NOT_IN_SYS | | ||
| 143 | SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU | | ||
| 144 | EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5); | ||
| 145 | WREG32(MC_VM_MD_L1_TLB0_CNTL, tmp); | ||
| 146 | WREG32(MC_VM_MD_L1_TLB1_CNTL, tmp); | ||
| 147 | WREG32(MC_VM_MD_L1_TLB2_CNTL, tmp); | ||
| 148 | WREG32(MC_VM_MB_L1_TLB0_CNTL, tmp); | ||
| 149 | WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp); | ||
| 150 | WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp); | ||
| 151 | WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp); | ||
| 152 | for (i = 0; i < 7; i++) | ||
| 153 | WREG32(VM_CONTEXT0_CNTL + (i * 4), 0); | ||
| 154 | } | ||
| 155 | |||
| 156 | static void rv770_mc_program(struct radeon_device *rdev) | ||
| 157 | { | ||
| 158 | struct rv515_mc_save save; | ||
| 140 | u32 tmp; | 159 | u32 tmp; |
| 141 | int i, j; | 160 | int i, j; |
| 142 | 161 | ||
| @@ -150,53 +169,42 @@ static void rv770_mc_resume(struct radeon_device *rdev) | |||
| 150 | } | 169 | } |
| 151 | WREG32(HDP_REG_COHERENCY_FLUSH_CNTL, 0); | 170 | WREG32(HDP_REG_COHERENCY_FLUSH_CNTL, 0); |
| 152 | 171 | ||
| 153 | d1vga_control = RREG32(D1VGA_CONTROL); | 172 | rv515_mc_stop(rdev, &save); |
| 154 | d2vga_control = RREG32(D2VGA_CONTROL); | ||
| 155 | vga_render_control = RREG32(VGA_RENDER_CONTROL); | ||
| 156 | vga_hdp_control = RREG32(VGA_HDP_CONTROL); | ||
| 157 | d1crtc_control = RREG32(D1CRTC_CONTROL); | ||
| 158 | d2crtc_control = RREG32(D2CRTC_CONTROL); | ||
| 159 | old_vram_start = (u64)(RREG32(MC_VM_FB_LOCATION) & 0xFFFF) << 24; | ||
| 160 | new_d1grph_primary = RREG32(D1GRPH_PRIMARY_SURFACE_ADDRESS); | ||
| 161 | new_d1grph_secondary = RREG32(D1GRPH_SECONDARY_SURFACE_ADDRESS); | ||
| 162 | new_d1grph_primary += rdev->mc.vram_start - old_vram_start; | ||
| 163 | new_d1grph_secondary += rdev->mc.vram_start - old_vram_start; | ||
| 164 | new_d2grph_primary = RREG32(D2GRPH_PRIMARY_SURFACE_ADDRESS); | ||
| 165 | new_d2grph_secondary = RREG32(D2GRPH_SECONDARY_SURFACE_ADDRESS); | ||
| 166 | new_d2grph_primary += rdev->mc.vram_start - old_vram_start; | ||
| 167 | new_d2grph_secondary += rdev->mc.vram_start - old_vram_start; | ||
| 168 | |||
| 169 | /* Stop all video */ | ||
| 170 | WREG32(D1VGA_CONTROL, 0); | ||
| 171 | WREG32(D2VGA_CONTROL, 0); | ||
| 172 | WREG32(VGA_RENDER_CONTROL, 0); | ||
| 173 | WREG32(D1CRTC_UPDATE_LOCK, 1); | ||
| 174 | WREG32(D2CRTC_UPDATE_LOCK, 1); | ||
| 175 | WREG32(D1CRTC_CONTROL, 0); | ||
| 176 | WREG32(D2CRTC_CONTROL, 0); | ||
| 177 | WREG32(D1CRTC_UPDATE_LOCK, 0); | ||
| 178 | WREG32(D2CRTC_UPDATE_LOCK, 0); | ||
| 179 | |||
| 180 | mdelay(1); | ||
| 181 | if (r600_mc_wait_for_idle(rdev)) { | 173 | if (r600_mc_wait_for_idle(rdev)) { |
| 182 | printk(KERN_WARNING "[drm] MC not idle !\n"); | 174 | dev_warn(rdev->dev, "Wait for MC idle timedout !\n"); |
| 183 | } | 175 | } |
| 184 | |||
| 185 | /* Lockout access through VGA aperture*/ | 176 | /* Lockout access through VGA aperture*/ |
| 186 | WREG32(VGA_HDP_CONTROL, VGA_MEMORY_DISABLE); | 177 | WREG32(VGA_HDP_CONTROL, VGA_MEMORY_DISABLE); |
| 187 | |||
| 188 | /* Update configuration */ | 178 | /* Update configuration */ |
| 189 | WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR, rdev->mc.vram_start >> 12); | 179 | if (rdev->flags & RADEON_IS_AGP) { |
| 190 | WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR, (rdev->mc.vram_end - 1) >> 12); | 180 | if (rdev->mc.vram_start < rdev->mc.gtt_start) { |
| 181 | /* VRAM before AGP */ | ||
| 182 | WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR, | ||
| 183 | rdev->mc.vram_start >> 12); | ||
| 184 | WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR, | ||
| 185 | rdev->mc.gtt_end >> 12); | ||
| 186 | } else { | ||
| 187 | /* VRAM after AGP */ | ||
| 188 | WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR, | ||
| 189 | rdev->mc.gtt_start >> 12); | ||
| 190 | WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR, | ||
| 191 | rdev->mc.vram_end >> 12); | ||
| 192 | } | ||
| 193 | } else { | ||
| 194 | WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR, | ||
| 195 | rdev->mc.vram_start >> 12); | ||
| 196 | WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR, | ||
| 197 | rdev->mc.vram_end >> 12); | ||
| 198 | } | ||
| 191 | WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, 0); | 199 | WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, 0); |
| 192 | tmp = (((rdev->mc.vram_end - 1) >> 24) & 0xFFFF) << 16; | 200 | tmp = ((rdev->mc.vram_end >> 24) & 0xFFFF) << 16; |
| 193 | tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF); | 201 | tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF); |
| 194 | WREG32(MC_VM_FB_LOCATION, tmp); | 202 | WREG32(MC_VM_FB_LOCATION, tmp); |
| 195 | WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8)); | 203 | WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8)); |
| 196 | WREG32(HDP_NONSURFACE_INFO, (2 << 7)); | 204 | WREG32(HDP_NONSURFACE_INFO, (2 << 7)); |
| 197 | WREG32(HDP_NONSURFACE_SIZE, (rdev->mc.mc_vram_size - 1) | 0x3FF); | 205 | WREG32(HDP_NONSURFACE_SIZE, (rdev->mc.mc_vram_size - 1) | 0x3FF); |
| 198 | if (rdev->flags & RADEON_IS_AGP) { | 206 | if (rdev->flags & RADEON_IS_AGP) { |
| 199 | WREG32(MC_VM_AGP_TOP, (rdev->mc.gtt_end - 1) >> 16); | 207 | WREG32(MC_VM_AGP_TOP, rdev->mc.gtt_end >> 16); |
| 200 | WREG32(MC_VM_AGP_BOT, rdev->mc.gtt_start >> 16); | 208 | WREG32(MC_VM_AGP_BOT, rdev->mc.gtt_start >> 16); |
| 201 | WREG32(MC_VM_AGP_BASE, rdev->mc.agp_base >> 22); | 209 | WREG32(MC_VM_AGP_BASE, rdev->mc.agp_base >> 22); |
| 202 | } else { | 210 | } else { |
| @@ -204,31 +212,10 @@ static void rv770_mc_resume(struct radeon_device *rdev) | |||
| 204 | WREG32(MC_VM_AGP_TOP, 0x0FFFFFFF); | 212 | WREG32(MC_VM_AGP_TOP, 0x0FFFFFFF); |
| 205 | WREG32(MC_VM_AGP_BOT, 0x0FFFFFFF); | 213 | WREG32(MC_VM_AGP_BOT, 0x0FFFFFFF); |
| 206 | } | 214 | } |
| 207 | WREG32(D1GRPH_PRIMARY_SURFACE_ADDRESS, new_d1grph_primary); | ||
| 208 | WREG32(D1GRPH_SECONDARY_SURFACE_ADDRESS, new_d1grph_secondary); | ||
| 209 | WREG32(D2GRPH_PRIMARY_SURFACE_ADDRESS, new_d2grph_primary); | ||
| 210 | WREG32(D2GRPH_SECONDARY_SURFACE_ADDRESS, new_d2grph_secondary); | ||
| 211 | WREG32(VGA_MEMORY_BASE_ADDRESS, rdev->mc.vram_start); | ||
| 212 | |||
| 213 | /* Unlock host access */ | ||
| 214 | WREG32(VGA_HDP_CONTROL, vga_hdp_control); | ||
| 215 | |||
| 216 | mdelay(1); | ||
| 217 | if (r600_mc_wait_for_idle(rdev)) { | 215 | if (r600_mc_wait_for_idle(rdev)) { |
| 218 | printk(KERN_WARNING "[drm] MC not idle !\n"); | 216 | dev_warn(rdev->dev, "Wait for MC idle timedout !\n"); |
| 219 | } | 217 | } |
| 220 | 218 | rv515_mc_resume(rdev, &save); | |
| 221 | /* Restore video state */ | ||
| 222 | WREG32(D1CRTC_UPDATE_LOCK, 1); | ||
| 223 | WREG32(D2CRTC_UPDATE_LOCK, 1); | ||
| 224 | WREG32(D1CRTC_CONTROL, d1crtc_control); | ||
| 225 | WREG32(D2CRTC_CONTROL, d2crtc_control); | ||
| 226 | WREG32(D1CRTC_UPDATE_LOCK, 0); | ||
| 227 | WREG32(D2CRTC_UPDATE_LOCK, 0); | ||
| 228 | WREG32(D1VGA_CONTROL, d1vga_control); | ||
| 229 | WREG32(D2VGA_CONTROL, d2vga_control); | ||
| 230 | WREG32(VGA_RENDER_CONTROL, vga_render_control); | ||
| 231 | |||
| 232 | /* we need to own VRAM, so turn off the VGA renderer here | 219 | /* we need to own VRAM, so turn off the VGA renderer here |
| 233 | * to stop it overwriting our objects */ | 220 | * to stop it overwriting our objects */ |
| 234 | rv515_vga_render_disable(rdev); | 221 | rv515_vga_render_disable(rdev); |
| @@ -840,9 +827,9 @@ int rv770_mc_init(struct radeon_device *rdev) | |||
| 840 | rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024; | 827 | rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024; |
| 841 | } | 828 | } |
| 842 | rdev->mc.vram_start = rdev->mc.vram_location; | 829 | rdev->mc.vram_start = rdev->mc.vram_location; |
| 843 | rdev->mc.vram_end = rdev->mc.vram_location + rdev->mc.mc_vram_size; | 830 | rdev->mc.vram_end = rdev->mc.vram_location + rdev->mc.mc_vram_size - 1; |
| 844 | rdev->mc.gtt_start = rdev->mc.gtt_location; | 831 | rdev->mc.gtt_start = rdev->mc.gtt_location; |
| 845 | rdev->mc.gtt_end = rdev->mc.gtt_location + rdev->mc.gtt_size; | 832 | rdev->mc.gtt_end = rdev->mc.gtt_location + rdev->mc.gtt_size - 1; |
| 846 | /* FIXME: we should enforce default clock in case GPU is not in | 833 | /* FIXME: we should enforce default clock in case GPU is not in |
| 847 | * default setup | 834 | * default setup |
| 848 | */ | 835 | */ |
| @@ -861,11 +848,14 @@ static int rv770_startup(struct radeon_device *rdev) | |||
| 861 | { | 848 | { |
| 862 | int r; | 849 | int r; |
| 863 | 850 | ||
| 864 | radeon_gpu_reset(rdev); | 851 | rv770_mc_program(rdev); |
| 865 | rv770_mc_resume(rdev); | 852 | if (rdev->flags & RADEON_IS_AGP) { |
| 866 | r = rv770_pcie_gart_enable(rdev); | 853 | rv770_agp_enable(rdev); |
| 867 | if (r) | 854 | } else { |
| 868 | return r; | 855 | r = rv770_pcie_gart_enable(rdev); |
| 856 | if (r) | ||
| 857 | return r; | ||
| 858 | } | ||
| 869 | rv770_gpu_init(rdev); | 859 | rv770_gpu_init(rdev); |
| 870 | 860 | ||
| 871 | r = radeon_object_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM, | 861 | r = radeon_object_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM, |
| @@ -884,9 +874,8 @@ static int rv770_startup(struct radeon_device *rdev) | |||
| 884 | r = r600_cp_resume(rdev); | 874 | r = r600_cp_resume(rdev); |
| 885 | if (r) | 875 | if (r) |
| 886 | return r; | 876 | return r; |
| 887 | r = r600_wb_init(rdev); | 877 | /* write back buffer are not vital so don't worry about failure */ |
| 888 | if (r) | 878 | r600_wb_enable(rdev); |
| 889 | return r; | ||
| 890 | return 0; | 879 | return 0; |
| 891 | } | 880 | } |
| 892 | 881 | ||
| @@ -894,15 +883,12 @@ int rv770_resume(struct radeon_device *rdev) | |||
| 894 | { | 883 | { |
| 895 | int r; | 884 | int r; |
| 896 | 885 | ||
| 897 | if (radeon_gpu_reset(rdev)) { | 886 | /* Do not reset GPU before posting, on rv770 hw unlike on r500 hw, |
| 898 | /* FIXME: what do we want to do here ? */ | 887 | * posting will perform necessary task to bring back GPU into good |
| 899 | } | 888 | * shape. |
| 889 | */ | ||
| 900 | /* post card */ | 890 | /* post card */ |
| 901 | if (rdev->is_atom_bios) { | 891 | atom_asic_init(rdev->mode_info.atom_context); |
| 902 | atom_asic_init(rdev->mode_info.atom_context); | ||
| 903 | } else { | ||
| 904 | radeon_combios_asic_init(rdev->ddev); | ||
| 905 | } | ||
| 906 | /* Initialize clocks */ | 892 | /* Initialize clocks */ |
| 907 | r = radeon_clocks_init(rdev); | 893 | r = radeon_clocks_init(rdev); |
| 908 | if (r) { | 894 | if (r) { |
| @@ -915,7 +901,7 @@ int rv770_resume(struct radeon_device *rdev) | |||
| 915 | return r; | 901 | return r; |
| 916 | } | 902 | } |
| 917 | 903 | ||
| 918 | r = radeon_ib_test(rdev); | 904 | r = r600_ib_test(rdev); |
| 919 | if (r) { | 905 | if (r) { |
| 920 | DRM_ERROR("radeon: failled testing IB (%d).\n", r); | 906 | DRM_ERROR("radeon: failled testing IB (%d).\n", r); |
| 921 | return r; | 907 | return r; |
| @@ -929,8 +915,8 @@ int rv770_suspend(struct radeon_device *rdev) | |||
| 929 | /* FIXME: we should wait for ring to be empty */ | 915 | /* FIXME: we should wait for ring to be empty */ |
| 930 | r700_cp_stop(rdev); | 916 | r700_cp_stop(rdev); |
| 931 | rdev->cp.ready = false; | 917 | rdev->cp.ready = false; |
| 918 | r600_wb_disable(rdev); | ||
| 932 | rv770_pcie_gart_disable(rdev); | 919 | rv770_pcie_gart_disable(rdev); |
| 933 | |||
| 934 | /* unpin shaders bo */ | 920 | /* unpin shaders bo */ |
| 935 | radeon_object_unpin(rdev->r600_blit.shader_obj); | 921 | radeon_object_unpin(rdev->r600_blit.shader_obj); |
| 936 | return 0; | 922 | return 0; |
| @@ -946,7 +932,6 @@ int rv770_init(struct radeon_device *rdev) | |||
| 946 | { | 932 | { |
| 947 | int r; | 933 | int r; |
| 948 | 934 | ||
| 949 | rdev->new_init_path = true; | ||
| 950 | r = radeon_dummy_page_init(rdev); | 935 | r = radeon_dummy_page_init(rdev); |
| 951 | if (r) | 936 | if (r) |
| 952 | return r; | 937 | return r; |
| @@ -960,8 +945,10 @@ int rv770_init(struct radeon_device *rdev) | |||
| 960 | return -EINVAL; | 945 | return -EINVAL; |
| 961 | } | 946 | } |
| 962 | /* Must be an ATOMBIOS */ | 947 | /* Must be an ATOMBIOS */ |
| 963 | if (!rdev->is_atom_bios) | 948 | if (!rdev->is_atom_bios) { |
| 949 | dev_err(rdev->dev, "Expecting atombios for R600 GPU\n"); | ||
| 964 | return -EINVAL; | 950 | return -EINVAL; |
| 951 | } | ||
| 965 | r = radeon_atombios_init(rdev); | 952 | r = radeon_atombios_init(rdev); |
| 966 | if (r) | 953 | if (r) |
| 967 | return r; | 954 | return r; |
| @@ -983,15 +970,8 @@ int rv770_init(struct radeon_device *rdev) | |||
| 983 | if (r) | 970 | if (r) |
| 984 | return r; | 971 | return r; |
| 985 | r = rv770_mc_init(rdev); | 972 | r = rv770_mc_init(rdev); |
| 986 | if (r) { | 973 | if (r) |
| 987 | if (rdev->flags & RADEON_IS_AGP) { | ||
| 988 | /* Retry with disabling AGP */ | ||
| 989 | rv770_fini(rdev); | ||
| 990 | rdev->flags &= ~RADEON_IS_AGP; | ||
| 991 | return rv770_init(rdev); | ||
| 992 | } | ||
| 993 | return r; | 974 | return r; |
| 994 | } | ||
| 995 | /* Memory manager */ | 975 | /* Memory manager */ |
| 996 | r = radeon_object_init(rdev); | 976 | r = radeon_object_init(rdev); |
| 997 | if (r) | 977 | if (r) |
| @@ -1020,12 +1000,10 @@ int rv770_init(struct radeon_device *rdev) | |||
| 1020 | 1000 | ||
| 1021 | r = rv770_startup(rdev); | 1001 | r = rv770_startup(rdev); |
| 1022 | if (r) { | 1002 | if (r) { |
| 1023 | if (rdev->flags & RADEON_IS_AGP) { | 1003 | rv770_suspend(rdev); |
| 1024 | /* Retry with disabling AGP */ | 1004 | r600_wb_fini(rdev); |
| 1025 | rv770_fini(rdev); | 1005 | radeon_ring_fini(rdev); |
| 1026 | rdev->flags &= ~RADEON_IS_AGP; | 1006 | rv770_pcie_gart_fini(rdev); |
| 1027 | return rv770_init(rdev); | ||
| 1028 | } | ||
| 1029 | rdev->accel_working = false; | 1007 | rdev->accel_working = false; |
| 1030 | } | 1008 | } |
| 1031 | if (rdev->accel_working) { | 1009 | if (rdev->accel_working) { |
| @@ -1034,7 +1012,7 @@ int rv770_init(struct radeon_device *rdev) | |||
| 1034 | DRM_ERROR("radeon: failled initializing IB pool (%d).\n", r); | 1012 | DRM_ERROR("radeon: failled initializing IB pool (%d).\n", r); |
| 1035 | rdev->accel_working = false; | 1013 | rdev->accel_working = false; |
| 1036 | } | 1014 | } |
| 1037 | r = radeon_ib_test(rdev); | 1015 | r = r600_ib_test(rdev); |
| 1038 | if (r) { | 1016 | if (r) { |
| 1039 | DRM_ERROR("radeon: failled testing IB (%d).\n", r); | 1017 | DRM_ERROR("radeon: failled testing IB (%d).\n", r); |
| 1040 | rdev->accel_working = false; | 1018 | rdev->accel_working = false; |
| @@ -1049,20 +1027,15 @@ void rv770_fini(struct radeon_device *rdev) | |||
| 1049 | 1027 | ||
| 1050 | r600_blit_fini(rdev); | 1028 | r600_blit_fini(rdev); |
| 1051 | radeon_ring_fini(rdev); | 1029 | radeon_ring_fini(rdev); |
| 1030 | r600_wb_fini(rdev); | ||
| 1052 | rv770_pcie_gart_fini(rdev); | 1031 | rv770_pcie_gart_fini(rdev); |
| 1053 | radeon_gem_fini(rdev); | 1032 | radeon_gem_fini(rdev); |
| 1054 | radeon_fence_driver_fini(rdev); | 1033 | radeon_fence_driver_fini(rdev); |
| 1055 | radeon_clocks_fini(rdev); | 1034 | radeon_clocks_fini(rdev); |
| 1056 | #if __OS_HAS_AGP | ||
| 1057 | if (rdev->flags & RADEON_IS_AGP) | 1035 | if (rdev->flags & RADEON_IS_AGP) |
| 1058 | radeon_agp_fini(rdev); | 1036 | radeon_agp_fini(rdev); |
| 1059 | #endif | ||
| 1060 | radeon_object_fini(rdev); | 1037 | radeon_object_fini(rdev); |
| 1061 | if (rdev->is_atom_bios) { | 1038 | radeon_atombios_fini(rdev); |
| 1062 | radeon_atombios_fini(rdev); | ||
| 1063 | } else { | ||
| 1064 | radeon_combios_fini(rdev); | ||
| 1065 | } | ||
| 1066 | kfree(rdev->bios); | 1039 | kfree(rdev->bios); |
| 1067 | rdev->bios = NULL; | 1040 | rdev->bios = NULL; |
| 1068 | radeon_dummy_page_fini(rdev); | 1041 | radeon_dummy_page_fini(rdev); |
diff --git a/drivers/gpu/drm/ttm/ttm_global.c b/drivers/gpu/drm/ttm/ttm_global.c index 541744d00d3e..b17007178a36 100644 --- a/drivers/gpu/drm/ttm/ttm_global.c +++ b/drivers/gpu/drm/ttm/ttm_global.c | |||
| @@ -82,8 +82,8 @@ int ttm_global_item_ref(struct ttm_global_reference *ref) | |||
| 82 | if (unlikely(ret != 0)) | 82 | if (unlikely(ret != 0)) |
| 83 | goto out_err; | 83 | goto out_err; |
| 84 | 84 | ||
| 85 | ++item->refcount; | ||
| 86 | } | 85 | } |
| 86 | ++item->refcount; | ||
| 87 | ref->object = item->object; | 87 | ref->object = item->object; |
| 88 | object = item->object; | 88 | object = item->object; |
| 89 | mutex_unlock(&item->mutex); | 89 | mutex_unlock(&item->mutex); |
diff --git a/drivers/hwmon/lis3lv02d_spi.c b/drivers/hwmon/lis3lv02d_spi.c index ecd739534f6a..82b16808a274 100644 --- a/drivers/hwmon/lis3lv02d_spi.c +++ b/drivers/hwmon/lis3lv02d_spi.c | |||
| @@ -83,7 +83,8 @@ static int __devexit lis302dl_spi_remove(struct spi_device *spi) | |||
| 83 | struct lis3lv02d *lis3 = spi_get_drvdata(spi); | 83 | struct lis3lv02d *lis3 = spi_get_drvdata(spi); |
| 84 | lis3lv02d_joystick_disable(); | 84 | lis3lv02d_joystick_disable(); |
| 85 | lis3lv02d_poweroff(lis3); | 85 | lis3lv02d_poweroff(lis3); |
| 86 | return 0; | 86 | |
| 87 | return lis3lv02d_remove_fs(&lis3_dev); | ||
| 87 | } | 88 | } |
| 88 | 89 | ||
| 89 | #ifdef CONFIG_PM | 90 | #ifdef CONFIG_PM |
diff --git a/drivers/ide/ide-proc.c b/drivers/ide/ide-proc.c index 28d09a5d8450..017c09540c2f 100644 --- a/drivers/ide/ide-proc.c +++ b/drivers/ide/ide-proc.c | |||
| @@ -273,14 +273,8 @@ static const struct ide_proc_devset ide_generic_settings[] = { | |||
| 273 | 273 | ||
| 274 | static void proc_ide_settings_warn(void) | 274 | static void proc_ide_settings_warn(void) |
| 275 | { | 275 | { |
| 276 | static int warned; | 276 | printk_once(KERN_WARNING "Warning: /proc/ide/hd?/settings interface is " |
| 277 | |||
| 278 | if (warned) | ||
| 279 | return; | ||
| 280 | |||
| 281 | printk(KERN_WARNING "Warning: /proc/ide/hd?/settings interface is " | ||
| 282 | "obsolete, and will be removed soon!\n"); | 277 | "obsolete, and will be removed soon!\n"); |
| 283 | warned = 1; | ||
| 284 | } | 278 | } |
| 285 | 279 | ||
| 286 | static int ide_settings_proc_show(struct seq_file *m, void *v) | 280 | static int ide_settings_proc_show(struct seq_file *m, void *v) |
diff --git a/drivers/ide/sis5513.c b/drivers/ide/sis5513.c index afca22beaadf..3b88eba04c9c 100644 --- a/drivers/ide/sis5513.c +++ b/drivers/ide/sis5513.c | |||
| @@ -2,7 +2,7 @@ | |||
| 2 | * Copyright (C) 1999-2000 Andre Hedrick <andre@linux-ide.org> | 2 | * Copyright (C) 1999-2000 Andre Hedrick <andre@linux-ide.org> |
| 3 | * Copyright (C) 2002 Lionel Bouton <Lionel.Bouton@inet6.fr>, Maintainer | 3 | * Copyright (C) 2002 Lionel Bouton <Lionel.Bouton@inet6.fr>, Maintainer |
| 4 | * Copyright (C) 2003 Vojtech Pavlik <vojtech@suse.cz> | 4 | * Copyright (C) 2003 Vojtech Pavlik <vojtech@suse.cz> |
| 5 | * Copyright (C) 2007 Bartlomiej Zolnierkiewicz | 5 | * Copyright (C) 2007-2009 Bartlomiej Zolnierkiewicz |
| 6 | * | 6 | * |
| 7 | * May be copied or modified under the terms of the GNU General Public License | 7 | * May be copied or modified under the terms of the GNU General Public License |
| 8 | * | 8 | * |
| @@ -281,11 +281,13 @@ static void config_drive_art_rwp(ide_drive_t *drive) | |||
| 281 | 281 | ||
| 282 | pci_read_config_byte(dev, 0x4b, ®4bh); | 282 | pci_read_config_byte(dev, 0x4b, ®4bh); |
| 283 | 283 | ||
| 284 | rw_prefetch = reg4bh & ~(0x11 << drive->dn); | ||
| 285 | |||
| 284 | if (drive->media == ide_disk) | 286 | if (drive->media == ide_disk) |
| 285 | rw_prefetch = 0x11 << drive->dn; | 287 | rw_prefetch |= 0x11 << drive->dn; |
| 286 | 288 | ||
| 287 | if ((reg4bh & (0x11 << drive->dn)) != rw_prefetch) | 289 | if (reg4bh != rw_prefetch) |
| 288 | pci_write_config_byte(dev, 0x4b, reg4bh|rw_prefetch); | 290 | pci_write_config_byte(dev, 0x4b, rw_prefetch); |
| 289 | } | 291 | } |
| 290 | 292 | ||
| 291 | static void sis_set_pio_mode(ide_drive_t *drive, const u8 pio) | 293 | static void sis_set_pio_mode(ide_drive_t *drive, const u8 pio) |
diff --git a/drivers/isdn/capi/capi.c b/drivers/isdn/capi/capi.c index 2d8352419c0d..65bf91e16a42 100644 --- a/drivers/isdn/capi/capi.c +++ b/drivers/isdn/capi/capi.c | |||
| @@ -603,7 +603,7 @@ static void capi_recv_message(struct capi20_appl *ap, struct sk_buff *skb) | |||
| 603 | 603 | ||
| 604 | if (CAPIMSG_CMD(skb->data) == CAPI_CONNECT_B3_CONF) { | 604 | if (CAPIMSG_CMD(skb->data) == CAPI_CONNECT_B3_CONF) { |
| 605 | u16 info = CAPIMSG_U16(skb->data, 12); // Info field | 605 | u16 info = CAPIMSG_U16(skb->data, 12); // Info field |
| 606 | if (info == 0) { | 606 | if ((info & 0xff00) == 0) { |
| 607 | mutex_lock(&cdev->ncci_list_mtx); | 607 | mutex_lock(&cdev->ncci_list_mtx); |
| 608 | capincci_alloc(cdev, CAPIMSG_NCCI(skb->data)); | 608 | capincci_alloc(cdev, CAPIMSG_NCCI(skb->data)); |
| 609 | mutex_unlock(&cdev->ncci_list_mtx); | 609 | mutex_unlock(&cdev->ncci_list_mtx); |
diff --git a/drivers/isdn/capi/capidrv.c b/drivers/isdn/capi/capidrv.c index 650120261abf..3e6d17f42a98 100644 --- a/drivers/isdn/capi/capidrv.c +++ b/drivers/isdn/capi/capidrv.c | |||
| @@ -40,7 +40,7 @@ static int debugmode = 0; | |||
| 40 | MODULE_DESCRIPTION("CAPI4Linux: Interface to ISDN4Linux"); | 40 | MODULE_DESCRIPTION("CAPI4Linux: Interface to ISDN4Linux"); |
| 41 | MODULE_AUTHOR("Carsten Paeth"); | 41 | MODULE_AUTHOR("Carsten Paeth"); |
| 42 | MODULE_LICENSE("GPL"); | 42 | MODULE_LICENSE("GPL"); |
| 43 | module_param(debugmode, uint, 0); | 43 | module_param(debugmode, uint, S_IRUGO|S_IWUSR); |
| 44 | 44 | ||
| 45 | /* -------- type definitions ----------------------------------------- */ | 45 | /* -------- type definitions ----------------------------------------- */ |
| 46 | 46 | ||
| @@ -671,8 +671,8 @@ static void n0(capidrv_contr * card, capidrv_ncci * ncci) | |||
| 671 | NULL, /* Useruserdata */ /* $$$$ */ | 671 | NULL, /* Useruserdata */ /* $$$$ */ |
| 672 | NULL /* Facilitydataarray */ | 672 | NULL /* Facilitydataarray */ |
| 673 | ); | 673 | ); |
| 674 | send_message(card, &cmsg); | ||
| 675 | plci_change_state(card, ncci->plcip, EV_PLCI_DISCONNECT_REQ); | 674 | plci_change_state(card, ncci->plcip, EV_PLCI_DISCONNECT_REQ); |
| 675 | send_message(card, &cmsg); | ||
| 676 | 676 | ||
| 677 | cmd.command = ISDN_STAT_BHUP; | 677 | cmd.command = ISDN_STAT_BHUP; |
| 678 | cmd.driver = card->myid; | 678 | cmd.driver = card->myid; |
| @@ -924,8 +924,8 @@ static void handle_incoming_call(capidrv_contr * card, _cmsg * cmsg) | |||
| 924 | */ | 924 | */ |
| 925 | capi_cmsg_answer(cmsg); | 925 | capi_cmsg_answer(cmsg); |
| 926 | cmsg->Reject = 1; /* ignore */ | 926 | cmsg->Reject = 1; /* ignore */ |
| 927 | send_message(card, cmsg); | ||
| 928 | plci_change_state(card, plcip, EV_PLCI_CONNECT_REJECT); | 927 | plci_change_state(card, plcip, EV_PLCI_CONNECT_REJECT); |
| 928 | send_message(card, cmsg); | ||
| 929 | printk(KERN_INFO "capidrv-%d: incoming call %s,%d,%d,%s ignored\n", | 929 | printk(KERN_INFO "capidrv-%d: incoming call %s,%d,%d,%s ignored\n", |
| 930 | card->contrnr, | 930 | card->contrnr, |
| 931 | cmd.parm.setup.phone, | 931 | cmd.parm.setup.phone, |
| @@ -974,8 +974,8 @@ static void handle_incoming_call(capidrv_contr * card, _cmsg * cmsg) | |||
| 974 | case 2: /* Call will be rejected. */ | 974 | case 2: /* Call will be rejected. */ |
| 975 | capi_cmsg_answer(cmsg); | 975 | capi_cmsg_answer(cmsg); |
| 976 | cmsg->Reject = 2; /* reject call, normal call clearing */ | 976 | cmsg->Reject = 2; /* reject call, normal call clearing */ |
| 977 | send_message(card, cmsg); | ||
| 978 | plci_change_state(card, plcip, EV_PLCI_CONNECT_REJECT); | 977 | plci_change_state(card, plcip, EV_PLCI_CONNECT_REJECT); |
| 978 | send_message(card, cmsg); | ||
| 979 | break; | 979 | break; |
| 980 | 980 | ||
| 981 | default: | 981 | default: |
| @@ -983,8 +983,8 @@ static void handle_incoming_call(capidrv_contr * card, _cmsg * cmsg) | |||
| 983 | capi_cmsg_answer(cmsg); | 983 | capi_cmsg_answer(cmsg); |
| 984 | cmsg->Reject = 8; /* reject call, | 984 | cmsg->Reject = 8; /* reject call, |
| 985 | destination out of order */ | 985 | destination out of order */ |
| 986 | send_message(card, cmsg); | ||
| 987 | plci_change_state(card, plcip, EV_PLCI_CONNECT_REJECT); | 986 | plci_change_state(card, plcip, EV_PLCI_CONNECT_REJECT); |
| 987 | send_message(card, cmsg); | ||
| 988 | break; | 988 | break; |
| 989 | } | 989 | } |
| 990 | return; | 990 | return; |
| @@ -1020,8 +1020,8 @@ static void handle_plci(_cmsg * cmsg) | |||
| 1020 | card->bchans[plcip->chan].disconnecting = 1; | 1020 | card->bchans[plcip->chan].disconnecting = 1; |
| 1021 | plci_change_state(card, plcip, EV_PLCI_DISCONNECT_IND); | 1021 | plci_change_state(card, plcip, EV_PLCI_DISCONNECT_IND); |
| 1022 | capi_cmsg_answer(cmsg); | 1022 | capi_cmsg_answer(cmsg); |
| 1023 | send_message(card, cmsg); | ||
| 1024 | plci_change_state(card, plcip, EV_PLCI_DISCONNECT_RESP); | 1023 | plci_change_state(card, plcip, EV_PLCI_DISCONNECT_RESP); |
| 1024 | send_message(card, cmsg); | ||
| 1025 | break; | 1025 | break; |
| 1026 | 1026 | ||
| 1027 | case CAPI_DISCONNECT_CONF: /* plci */ | 1027 | case CAPI_DISCONNECT_CONF: /* plci */ |
| @@ -1078,8 +1078,8 @@ static void handle_plci(_cmsg * cmsg) | |||
| 1078 | 1078 | ||
| 1079 | if (card->bchans[plcip->chan].incoming) { | 1079 | if (card->bchans[plcip->chan].incoming) { |
| 1080 | capi_cmsg_answer(cmsg); | 1080 | capi_cmsg_answer(cmsg); |
| 1081 | send_message(card, cmsg); | ||
| 1082 | plci_change_state(card, plcip, EV_PLCI_CONNECT_ACTIVE_IND); | 1081 | plci_change_state(card, plcip, EV_PLCI_CONNECT_ACTIVE_IND); |
| 1082 | send_message(card, cmsg); | ||
| 1083 | } else { | 1083 | } else { |
| 1084 | capidrv_ncci *nccip; | 1084 | capidrv_ncci *nccip; |
| 1085 | capi_cmsg_answer(cmsg); | 1085 | capi_cmsg_answer(cmsg); |
| @@ -1098,13 +1098,14 @@ static void handle_plci(_cmsg * cmsg) | |||
| 1098 | NULL /* NCPI */ | 1098 | NULL /* NCPI */ |
| 1099 | ); | 1099 | ); |
| 1100 | nccip->msgid = cmsg->Messagenumber; | 1100 | nccip->msgid = cmsg->Messagenumber; |
| 1101 | plci_change_state(card, plcip, | ||
| 1102 | EV_PLCI_CONNECT_ACTIVE_IND); | ||
| 1103 | ncci_change_state(card, nccip, EV_NCCI_CONNECT_B3_REQ); | ||
| 1101 | send_message(card, cmsg); | 1104 | send_message(card, cmsg); |
| 1102 | cmd.command = ISDN_STAT_DCONN; | 1105 | cmd.command = ISDN_STAT_DCONN; |
| 1103 | cmd.driver = card->myid; | 1106 | cmd.driver = card->myid; |
| 1104 | cmd.arg = plcip->chan; | 1107 | cmd.arg = plcip->chan; |
| 1105 | card->interface.statcallb(&cmd); | 1108 | card->interface.statcallb(&cmd); |
| 1106 | plci_change_state(card, plcip, EV_PLCI_CONNECT_ACTIVE_IND); | ||
| 1107 | ncci_change_state(card, nccip, EV_NCCI_CONNECT_B3_REQ); | ||
| 1108 | } | 1109 | } |
| 1109 | break; | 1110 | break; |
| 1110 | 1111 | ||
| @@ -1193,8 +1194,8 @@ static void handle_ncci(_cmsg * cmsg) | |||
| 1193 | goto notfound; | 1194 | goto notfound; |
| 1194 | 1195 | ||
| 1195 | capi_cmsg_answer(cmsg); | 1196 | capi_cmsg_answer(cmsg); |
| 1196 | send_message(card, cmsg); | ||
| 1197 | ncci_change_state(card, nccip, EV_NCCI_CONNECT_B3_ACTIVE_IND); | 1197 | ncci_change_state(card, nccip, EV_NCCI_CONNECT_B3_ACTIVE_IND); |
| 1198 | send_message(card, cmsg); | ||
| 1198 | 1199 | ||
| 1199 | cmd.command = ISDN_STAT_BCONN; | 1200 | cmd.command = ISDN_STAT_BCONN; |
| 1200 | cmd.driver = card->myid; | 1201 | cmd.driver = card->myid; |
| @@ -1222,8 +1223,8 @@ static void handle_ncci(_cmsg * cmsg) | |||
| 1222 | 0, /* Reject */ | 1223 | 0, /* Reject */ |
| 1223 | NULL /* NCPI */ | 1224 | NULL /* NCPI */ |
| 1224 | ); | 1225 | ); |
| 1225 | send_message(card, cmsg); | ||
| 1226 | ncci_change_state(card, nccip, EV_NCCI_CONNECT_B3_RESP); | 1226 | ncci_change_state(card, nccip, EV_NCCI_CONNECT_B3_RESP); |
| 1227 | send_message(card, cmsg); | ||
| 1227 | break; | 1228 | break; |
| 1228 | } | 1229 | } |
| 1229 | printk(KERN_ERR "capidrv-%d: no mem for ncci, sorry\n", card->contrnr); | 1230 | printk(KERN_ERR "capidrv-%d: no mem for ncci, sorry\n", card->contrnr); |
| @@ -1299,8 +1300,8 @@ static void handle_ncci(_cmsg * cmsg) | |||
| 1299 | card->bchans[nccip->chan].disconnecting = 1; | 1300 | card->bchans[nccip->chan].disconnecting = 1; |
| 1300 | ncci_change_state(card, nccip, EV_NCCI_DISCONNECT_B3_IND); | 1301 | ncci_change_state(card, nccip, EV_NCCI_DISCONNECT_B3_IND); |
| 1301 | capi_cmsg_answer(cmsg); | 1302 | capi_cmsg_answer(cmsg); |
| 1302 | send_message(card, cmsg); | ||
| 1303 | ncci_change_state(card, nccip, EV_NCCI_DISCONNECT_B3_RESP); | 1303 | ncci_change_state(card, nccip, EV_NCCI_DISCONNECT_B3_RESP); |
| 1304 | send_message(card, cmsg); | ||
| 1304 | break; | 1305 | break; |
| 1305 | 1306 | ||
| 1306 | case CAPI_DISCONNECT_B3_CONF: /* ncci */ | 1307 | case CAPI_DISCONNECT_B3_CONF: /* ncci */ |
| @@ -2014,8 +2015,8 @@ static void send_listen(capidrv_contr *card) | |||
| 2014 | card->cipmask, | 2015 | card->cipmask, |
| 2015 | card->cipmask2, | 2016 | card->cipmask2, |
| 2016 | NULL, NULL); | 2017 | NULL, NULL); |
| 2017 | send_message(card, &cmdcmsg); | ||
| 2018 | listen_change_state(card, EV_LISTEN_REQ); | 2018 | listen_change_state(card, EV_LISTEN_REQ); |
| 2019 | send_message(card, &cmdcmsg); | ||
| 2019 | } | 2020 | } |
| 2020 | 2021 | ||
| 2021 | static void listentimerfunc(unsigned long x) | 2022 | static void listentimerfunc(unsigned long x) |
diff --git a/drivers/isdn/gigaset/asyncdata.c b/drivers/isdn/gigaset/asyncdata.c index 234cc5d53312..44a58e6f8f65 100644 --- a/drivers/isdn/gigaset/asyncdata.c +++ b/drivers/isdn/gigaset/asyncdata.c | |||
| @@ -334,7 +334,14 @@ static inline int iraw_loop(unsigned char c, unsigned char *src, int numbytes, | |||
| 334 | return startbytes - numbytes; | 334 | return startbytes - numbytes; |
| 335 | } | 335 | } |
| 336 | 336 | ||
| 337 | /* process a block of data received from the device | 337 | /** |
| 338 | * gigaset_m10x_input() - process a block of data received from the device | ||
| 339 | * @inbuf: received data and device descriptor structure. | ||
| 340 | * | ||
| 341 | * Called by hardware module {ser,usb}_gigaset with a block of received | ||
| 342 | * bytes. Separates the bytes received over the serial data channel into | ||
| 343 | * user data and command replies (locked/unlocked) according to the | ||
| 344 | * current state of the interface. | ||
| 338 | */ | 345 | */ |
| 339 | void gigaset_m10x_input(struct inbuf_t *inbuf) | 346 | void gigaset_m10x_input(struct inbuf_t *inbuf) |
| 340 | { | 347 | { |
| @@ -543,16 +550,17 @@ static struct sk_buff *iraw_encode(struct sk_buff *skb, int head, int tail) | |||
| 543 | return iraw_skb; | 550 | return iraw_skb; |
| 544 | } | 551 | } |
| 545 | 552 | ||
| 546 | /* gigaset_send_skb | 553 | /** |
| 547 | * called by common.c to queue an skb for sending | 554 | * gigaset_m10x_send_skb() - queue an skb for sending |
| 548 | * and start transmission if necessary | 555 | * @bcs: B channel descriptor structure. |
| 549 | * parameters: | 556 | * @skb: data to send. |
| 550 | * B Channel control structure | 557 | * |
| 551 | * skb | 558 | * Called by i4l.c to encode and queue an skb for sending, and start |
| 559 | * transmission if necessary. | ||
| 560 | * | ||
| 552 | * Return value: | 561 | * Return value: |
| 553 | * number of bytes accepted for sending | 562 | * number of bytes accepted for sending (skb->len) if ok, |
| 554 | * (skb->len if ok, 0 if out of buffer space) | 563 | * error code < 0 (eg. -ENOMEM) on error |
| 555 | * or error code (< 0, eg. -EINVAL) | ||
| 556 | */ | 564 | */ |
| 557 | int gigaset_m10x_send_skb(struct bc_state *bcs, struct sk_buff *skb) | 565 | int gigaset_m10x_send_skb(struct bc_state *bcs, struct sk_buff *skb) |
| 558 | { | 566 | { |
diff --git a/drivers/isdn/gigaset/bas-gigaset.c b/drivers/isdn/gigaset/bas-gigaset.c index 781c4041f7b0..5ed1d99eb9f3 100644 --- a/drivers/isdn/gigaset/bas-gigaset.c +++ b/drivers/isdn/gigaset/bas-gigaset.c | |||
| @@ -134,6 +134,7 @@ struct bas_cardstate { | |||
| 134 | #define BS_ATRDPEND 0x040 /* urb_cmd_in in use */ | 134 | #define BS_ATRDPEND 0x040 /* urb_cmd_in in use */ |
| 135 | #define BS_ATWRPEND 0x080 /* urb_cmd_out in use */ | 135 | #define BS_ATWRPEND 0x080 /* urb_cmd_out in use */ |
| 136 | #define BS_SUSPEND 0x100 /* USB port suspended */ | 136 | #define BS_SUSPEND 0x100 /* USB port suspended */ |
| 137 | #define BS_RESETTING 0x200 /* waiting for HD_RESET_INTERRUPT_PIPE_ACK */ | ||
| 137 | 138 | ||
| 138 | 139 | ||
| 139 | static struct gigaset_driver *driver = NULL; | 140 | static struct gigaset_driver *driver = NULL; |
| @@ -319,6 +320,21 @@ static int gigaset_set_line_ctrl(struct cardstate *cs, unsigned cflag) | |||
| 319 | return -EINVAL; | 320 | return -EINVAL; |
| 320 | } | 321 | } |
| 321 | 322 | ||
| 323 | /* set/clear bits in base connection state, return previous state | ||
| 324 | */ | ||
| 325 | static inline int update_basstate(struct bas_cardstate *ucs, | ||
| 326 | int set, int clear) | ||
| 327 | { | ||
| 328 | unsigned long flags; | ||
| 329 | int state; | ||
| 330 | |||
| 331 | spin_lock_irqsave(&ucs->lock, flags); | ||
| 332 | state = ucs->basstate; | ||
| 333 | ucs->basstate = (state & ~clear) | set; | ||
| 334 | spin_unlock_irqrestore(&ucs->lock, flags); | ||
| 335 | return state; | ||
| 336 | } | ||
| 337 | |||
| 322 | /* error_hangup | 338 | /* error_hangup |
| 323 | * hang up any existing connection because of an unrecoverable error | 339 | * hang up any existing connection because of an unrecoverable error |
| 324 | * This function may be called from any context and takes care of scheduling | 340 | * This function may be called from any context and takes care of scheduling |
| @@ -350,12 +366,9 @@ static inline void error_hangup(struct bc_state *bcs) | |||
| 350 | */ | 366 | */ |
| 351 | static inline void error_reset(struct cardstate *cs) | 367 | static inline void error_reset(struct cardstate *cs) |
| 352 | { | 368 | { |
| 353 | /* close AT command channel to recover (ignore errors) */ | 369 | /* reset interrupt pipe to recover (ignore errors) */ |
| 354 | req_submit(cs->bcs, HD_CLOSE_ATCHANNEL, 0, BAS_TIMEOUT); | 370 | update_basstate(cs->hw.bas, BS_RESETTING, 0); |
| 355 | 371 | req_submit(cs->bcs, HD_RESET_INTERRUPT_PIPE, 0, BAS_TIMEOUT); | |
| 356 | //FIXME try to recover without bothering the user | ||
| 357 | dev_err(cs->dev, | ||
| 358 | "unrecoverable error - please disconnect Gigaset base to reset\n"); | ||
| 359 | } | 372 | } |
| 360 | 373 | ||
| 361 | /* check_pending | 374 | /* check_pending |
| @@ -398,8 +411,13 @@ static void check_pending(struct bas_cardstate *ucs) | |||
| 398 | case HD_DEVICE_INIT_ACK: /* no reply expected */ | 411 | case HD_DEVICE_INIT_ACK: /* no reply expected */ |
| 399 | ucs->pending = 0; | 412 | ucs->pending = 0; |
| 400 | break; | 413 | break; |
| 401 | /* HD_READ_ATMESSAGE, HD_WRITE_ATMESSAGE, HD_RESET_INTERRUPTPIPE | 414 | case HD_RESET_INTERRUPT_PIPE: |
| 402 | * are handled separately and should never end up here | 415 | if (!(ucs->basstate & BS_RESETTING)) |
| 416 | ucs->pending = 0; | ||
| 417 | break; | ||
| 418 | /* | ||
| 419 | * HD_READ_ATMESSAGE and HD_WRITE_ATMESSAGE are handled separately | ||
| 420 | * and should never end up here | ||
| 403 | */ | 421 | */ |
| 404 | default: | 422 | default: |
| 405 | dev_warn(&ucs->interface->dev, | 423 | dev_warn(&ucs->interface->dev, |
| @@ -449,21 +467,6 @@ static void cmd_in_timeout(unsigned long data) | |||
| 449 | error_reset(cs); | 467 | error_reset(cs); |
| 450 | } | 468 | } |
| 451 | 469 | ||
| 452 | /* set/clear bits in base connection state, return previous state | ||
| 453 | */ | ||
| 454 | inline static int update_basstate(struct bas_cardstate *ucs, | ||
| 455 | int set, int clear) | ||
| 456 | { | ||
| 457 | unsigned long flags; | ||
| 458 | int state; | ||
| 459 | |||
| 460 | spin_lock_irqsave(&ucs->lock, flags); | ||
| 461 | state = ucs->basstate; | ||
| 462 | ucs->basstate = (state & ~clear) | set; | ||
| 463 | spin_unlock_irqrestore(&ucs->lock, flags); | ||
| 464 | return state; | ||
| 465 | } | ||
| 466 | |||
| 467 | /* read_ctrl_callback | 470 | /* read_ctrl_callback |
| 468 | * USB completion handler for control pipe input | 471 | * USB completion handler for control pipe input |
| 469 | * called by the USB subsystem in interrupt context | 472 | * called by the USB subsystem in interrupt context |
| @@ -762,7 +765,8 @@ static void read_int_callback(struct urb *urb) | |||
| 762 | break; | 765 | break; |
| 763 | 766 | ||
| 764 | case HD_RESET_INTERRUPT_PIPE_ACK: | 767 | case HD_RESET_INTERRUPT_PIPE_ACK: |
| 765 | gig_dbg(DEBUG_USBREQ, "HD_RESET_INTERRUPT_PIPE_ACK"); | 768 | update_basstate(ucs, 0, BS_RESETTING); |
| 769 | dev_notice(cs->dev, "interrupt pipe reset\n"); | ||
| 766 | break; | 770 | break; |
| 767 | 771 | ||
| 768 | case HD_SUSPEND_END: | 772 | case HD_SUSPEND_END: |
| @@ -1331,28 +1335,24 @@ static void read_iso_tasklet(unsigned long data) | |||
| 1331 | rcvbuf = urb->transfer_buffer; | 1335 | rcvbuf = urb->transfer_buffer; |
| 1332 | totleft = urb->actual_length; | 1336 | totleft = urb->actual_length; |
| 1333 | for (frame = 0; totleft > 0 && frame < BAS_NUMFRAMES; frame++) { | 1337 | for (frame = 0; totleft > 0 && frame < BAS_NUMFRAMES; frame++) { |
| 1334 | if (unlikely(urb->iso_frame_desc[frame].status)) { | 1338 | numbytes = urb->iso_frame_desc[frame].actual_length; |
| 1339 | if (unlikely(urb->iso_frame_desc[frame].status)) | ||
| 1335 | dev_warn(cs->dev, | 1340 | dev_warn(cs->dev, |
| 1336 | "isochronous read: frame %d: %s\n", | 1341 | "isochronous read: frame %d[%d]: %s\n", |
| 1337 | frame, | 1342 | frame, numbytes, |
| 1338 | get_usb_statmsg( | 1343 | get_usb_statmsg( |
| 1339 | urb->iso_frame_desc[frame].status)); | 1344 | urb->iso_frame_desc[frame].status)); |
| 1340 | break; | 1345 | if (unlikely(numbytes > BAS_MAXFRAME)) |
| 1341 | } | ||
| 1342 | numbytes = urb->iso_frame_desc[frame].actual_length; | ||
| 1343 | if (unlikely(numbytes > BAS_MAXFRAME)) { | ||
| 1344 | dev_warn(cs->dev, | 1346 | dev_warn(cs->dev, |
| 1345 | "isochronous read: frame %d: " | 1347 | "isochronous read: frame %d: " |
| 1346 | "numbytes (%d) > BAS_MAXFRAME\n", | 1348 | "numbytes (%d) > BAS_MAXFRAME\n", |
| 1347 | frame, numbytes); | 1349 | frame, numbytes); |
| 1348 | break; | ||
| 1349 | } | ||
| 1350 | if (unlikely(numbytes > totleft)) { | 1350 | if (unlikely(numbytes > totleft)) { |
| 1351 | dev_warn(cs->dev, | 1351 | dev_warn(cs->dev, |
| 1352 | "isochronous read: frame %d: " | 1352 | "isochronous read: frame %d: " |
| 1353 | "numbytes (%d) > totleft (%d)\n", | 1353 | "numbytes (%d) > totleft (%d)\n", |
| 1354 | frame, numbytes, totleft); | 1354 | frame, numbytes, totleft); |
| 1355 | break; | 1355 | numbytes = totleft; |
| 1356 | } | 1356 | } |
| 1357 | offset = urb->iso_frame_desc[frame].offset; | 1357 | offset = urb->iso_frame_desc[frame].offset; |
| 1358 | if (unlikely(offset + numbytes > BAS_INBUFSIZE)) { | 1358 | if (unlikely(offset + numbytes > BAS_INBUFSIZE)) { |
| @@ -1361,7 +1361,7 @@ static void read_iso_tasklet(unsigned long data) | |||
| 1361 | "offset (%d) + numbytes (%d) " | 1361 | "offset (%d) + numbytes (%d) " |
| 1362 | "> BAS_INBUFSIZE\n", | 1362 | "> BAS_INBUFSIZE\n", |
| 1363 | frame, offset, numbytes); | 1363 | frame, offset, numbytes); |
| 1364 | break; | 1364 | numbytes = BAS_INBUFSIZE - offset; |
| 1365 | } | 1365 | } |
| 1366 | gigaset_isoc_receive(rcvbuf + offset, numbytes, bcs); | 1366 | gigaset_isoc_receive(rcvbuf + offset, numbytes, bcs); |
| 1367 | totleft -= numbytes; | 1367 | totleft -= numbytes; |
| @@ -1433,6 +1433,7 @@ static void req_timeout(unsigned long data) | |||
| 1433 | 1433 | ||
| 1434 | case HD_CLOSE_ATCHANNEL: | 1434 | case HD_CLOSE_ATCHANNEL: |
| 1435 | dev_err(bcs->cs->dev, "timeout closing AT channel\n"); | 1435 | dev_err(bcs->cs->dev, "timeout closing AT channel\n"); |
| 1436 | error_reset(bcs->cs); | ||
| 1436 | break; | 1437 | break; |
| 1437 | 1438 | ||
| 1438 | case HD_CLOSE_B2CHANNEL: | 1439 | case HD_CLOSE_B2CHANNEL: |
| @@ -1442,6 +1443,13 @@ static void req_timeout(unsigned long data) | |||
| 1442 | error_reset(bcs->cs); | 1443 | error_reset(bcs->cs); |
| 1443 | break; | 1444 | break; |
| 1444 | 1445 | ||
| 1446 | case HD_RESET_INTERRUPT_PIPE: | ||
| 1447 | /* error recovery escalation */ | ||
| 1448 | dev_err(bcs->cs->dev, | ||
| 1449 | "reset interrupt pipe timeout, attempting USB reset\n"); | ||
| 1450 | usb_queue_reset_device(bcs->cs->hw.bas->interface); | ||
| 1451 | break; | ||
| 1452 | |||
| 1445 | default: | 1453 | default: |
| 1446 | dev_warn(bcs->cs->dev, "request 0x%02x timed out, clearing\n", | 1454 | dev_warn(bcs->cs->dev, "request 0x%02x timed out, clearing\n", |
| 1447 | pending); | 1455 | pending); |
| @@ -1934,6 +1942,15 @@ static int gigaset_write_cmd(struct cardstate *cs, | |||
| 1934 | goto notqueued; | 1942 | goto notqueued; |
| 1935 | } | 1943 | } |
| 1936 | 1944 | ||
| 1945 | /* translate "+++" escape sequence sent as a single separate command | ||
| 1946 | * into "close AT channel" command for error recovery | ||
| 1947 | * The next command will reopen the AT channel automatically. | ||
| 1948 | */ | ||
| 1949 | if (len == 3 && !memcmp(buf, "+++", 3)) { | ||
| 1950 | rc = req_submit(cs->bcs, HD_CLOSE_ATCHANNEL, 0, BAS_TIMEOUT); | ||
| 1951 | goto notqueued; | ||
| 1952 | } | ||
| 1953 | |||
| 1937 | if (len > IF_WRITEBUF) | 1954 | if (len > IF_WRITEBUF) |
| 1938 | len = IF_WRITEBUF; | 1955 | len = IF_WRITEBUF; |
| 1939 | if (!(cb = kmalloc(sizeof(struct cmdbuf_t) + len, GFP_ATOMIC))) { | 1956 | if (!(cb = kmalloc(sizeof(struct cmdbuf_t) + len, GFP_ATOMIC))) { |
diff --git a/drivers/isdn/gigaset/common.c b/drivers/isdn/gigaset/common.c index e4141bf8b2f3..33dcd8d72b7c 100644 --- a/drivers/isdn/gigaset/common.c +++ b/drivers/isdn/gigaset/common.c | |||
| @@ -22,6 +22,12 @@ | |||
| 22 | #define DRIVER_AUTHOR "Hansjoerg Lipp <hjlipp@web.de>, Tilman Schmidt <tilman@imap.cc>, Stefan Eilers" | 22 | #define DRIVER_AUTHOR "Hansjoerg Lipp <hjlipp@web.de>, Tilman Schmidt <tilman@imap.cc>, Stefan Eilers" |
| 23 | #define DRIVER_DESC "Driver for Gigaset 307x" | 23 | #define DRIVER_DESC "Driver for Gigaset 307x" |
| 24 | 24 | ||
| 25 | #ifdef CONFIG_GIGASET_DEBUG | ||
| 26 | #define DRIVER_DESC_DEBUG " (debug build)" | ||
| 27 | #else | ||
| 28 | #define DRIVER_DESC_DEBUG "" | ||
| 29 | #endif | ||
| 30 | |||
| 25 | /* Module parameters */ | 31 | /* Module parameters */ |
| 26 | int gigaset_debuglevel = DEBUG_DEFAULT; | 32 | int gigaset_debuglevel = DEBUG_DEFAULT; |
| 27 | EXPORT_SYMBOL_GPL(gigaset_debuglevel); | 33 | EXPORT_SYMBOL_GPL(gigaset_debuglevel); |
| @@ -32,6 +38,17 @@ MODULE_PARM_DESC(debug, "debug level"); | |||
| 32 | #define VALID_MINOR 0x01 | 38 | #define VALID_MINOR 0x01 |
| 33 | #define VALID_ID 0x02 | 39 | #define VALID_ID 0x02 |
| 34 | 40 | ||
| 41 | /** | ||
| 42 | * gigaset_dbg_buffer() - dump data in ASCII and hex for debugging | ||
| 43 | * @level: debugging level. | ||
| 44 | * @msg: message prefix. | ||
| 45 | * @len: number of bytes to dump. | ||
| 46 | * @buf: data to dump. | ||
| 47 | * | ||
| 48 | * If the current debugging level includes one of the bits set in @level, | ||
| 49 | * @len bytes starting at @buf are logged to dmesg at KERN_DEBUG prio, | ||
| 50 | * prefixed by the text @msg. | ||
| 51 | */ | ||
| 35 | void gigaset_dbg_buffer(enum debuglevel level, const unsigned char *msg, | 52 | void gigaset_dbg_buffer(enum debuglevel level, const unsigned char *msg, |
| 36 | size_t len, const unsigned char *buf) | 53 | size_t len, const unsigned char *buf) |
| 37 | { | 54 | { |
| @@ -274,6 +291,20 @@ static void clear_events(struct cardstate *cs) | |||
| 274 | spin_unlock_irqrestore(&cs->ev_lock, flags); | 291 | spin_unlock_irqrestore(&cs->ev_lock, flags); |
| 275 | } | 292 | } |
| 276 | 293 | ||
| 294 | /** | ||
| 295 | * gigaset_add_event() - add event to device event queue | ||
| 296 | * @cs: device descriptor structure. | ||
| 297 | * @at_state: connection state structure. | ||
| 298 | * @type: event type. | ||
| 299 | * @ptr: pointer parameter for event. | ||
| 300 | * @parameter: integer parameter for event. | ||
| 301 | * @arg: pointer parameter for event. | ||
| 302 | * | ||
| 303 | * Allocate an event queue entry from the device's event queue, and set it up | ||
| 304 | * with the parameters given. | ||
| 305 | * | ||
| 306 | * Return value: added event | ||
| 307 | */ | ||
| 277 | struct event_t *gigaset_add_event(struct cardstate *cs, | 308 | struct event_t *gigaset_add_event(struct cardstate *cs, |
| 278 | struct at_state_t *at_state, int type, | 309 | struct at_state_t *at_state, int type, |
| 279 | void *ptr, int parameter, void *arg) | 310 | void *ptr, int parameter, void *arg) |
| @@ -398,6 +429,15 @@ static void make_invalid(struct cardstate *cs, unsigned mask) | |||
| 398 | spin_unlock_irqrestore(&drv->lock, flags); | 429 | spin_unlock_irqrestore(&drv->lock, flags); |
| 399 | } | 430 | } |
| 400 | 431 | ||
| 432 | /** | ||
| 433 | * gigaset_freecs() - free all associated ressources of a device | ||
| 434 | * @cs: device descriptor structure. | ||
| 435 | * | ||
| 436 | * Stops all tasklets and timers, unregisters the device from all | ||
| 437 | * subsystems it was registered to, deallocates the device structure | ||
| 438 | * @cs and all structures referenced from it. | ||
| 439 | * Operations on the device should be stopped before calling this. | ||
| 440 | */ | ||
| 401 | void gigaset_freecs(struct cardstate *cs) | 441 | void gigaset_freecs(struct cardstate *cs) |
| 402 | { | 442 | { |
| 403 | int i; | 443 | int i; |
| @@ -506,7 +546,12 @@ static void gigaset_inbuf_init(struct inbuf_t *inbuf, struct bc_state *bcs, | |||
| 506 | inbuf->inputstate = inputstate; | 546 | inbuf->inputstate = inputstate; |
| 507 | } | 547 | } |
| 508 | 548 | ||
| 509 | /* append received bytes to inbuf */ | 549 | /** |
| 550 | * gigaset_fill_inbuf() - append received data to input buffer | ||
| 551 | * @inbuf: buffer structure. | ||
| 552 | * @src: received data. | ||
| 553 | * @numbytes: number of bytes received. | ||
| 554 | */ | ||
| 510 | int gigaset_fill_inbuf(struct inbuf_t *inbuf, const unsigned char *src, | 555 | int gigaset_fill_inbuf(struct inbuf_t *inbuf, const unsigned char *src, |
| 511 | unsigned numbytes) | 556 | unsigned numbytes) |
| 512 | { | 557 | { |
| @@ -606,20 +651,22 @@ static struct bc_state *gigaset_initbcs(struct bc_state *bcs, | |||
| 606 | return NULL; | 651 | return NULL; |
| 607 | } | 652 | } |
| 608 | 653 | ||
| 609 | /* gigaset_initcs | 654 | /** |
| 655 | * gigaset_initcs() - initialize device structure | ||
| 656 | * @drv: hardware driver the device belongs to | ||
| 657 | * @channels: number of B channels supported by device | ||
| 658 | * @onechannel: !=0 if B channel data and AT commands share one | ||
| 659 | * communication channel (M10x), | ||
| 660 | * ==0 if B channels have separate communication channels (base) | ||
| 661 | * @ignoreframes: number of frames to ignore after setting up B channel | ||
| 662 | * @cidmode: !=0: start in CallID mode | ||
| 663 | * @modulename: name of driver module for LL registration | ||
| 664 | * | ||
| 610 | * Allocate and initialize cardstate structure for Gigaset driver | 665 | * Allocate and initialize cardstate structure for Gigaset driver |
| 611 | * Calls hardware dependent gigaset_initcshw() function | 666 | * Calls hardware dependent gigaset_initcshw() function |
| 612 | * Calls B channel initialization function gigaset_initbcs() for each B channel | 667 | * Calls B channel initialization function gigaset_initbcs() for each B channel |
| 613 | * parameters: | 668 | * |
| 614 | * drv hardware driver the device belongs to | 669 | * Return value: |
| 615 | * channels number of B channels supported by device | ||
| 616 | * onechannel !=0: B channel data and AT commands share one | ||
| 617 | * communication channel | ||
| 618 | * ==0: B channels have separate communication channels | ||
| 619 | * ignoreframes number of frames to ignore after setting up B channel | ||
| 620 | * cidmode !=0: start in CallID mode | ||
| 621 | * modulename name of driver module (used for I4L registration) | ||
| 622 | * return value: | ||
| 623 | * pointer to cardstate structure | 670 | * pointer to cardstate structure |
| 624 | */ | 671 | */ |
| 625 | struct cardstate *gigaset_initcs(struct gigaset_driver *drv, int channels, | 672 | struct cardstate *gigaset_initcs(struct gigaset_driver *drv, int channels, |
| @@ -837,6 +884,17 @@ static void cleanup_cs(struct cardstate *cs) | |||
| 837 | } | 884 | } |
| 838 | 885 | ||
| 839 | 886 | ||
| 887 | /** | ||
| 888 | * gigaset_start() - start device operations | ||
| 889 | * @cs: device descriptor structure. | ||
| 890 | * | ||
| 891 | * Prepares the device for use by setting up communication parameters, | ||
| 892 | * scheduling an EV_START event to initiate device initialization, and | ||
| 893 | * waiting for completion of the initialization. | ||
| 894 | * | ||
| 895 | * Return value: | ||
| 896 | * 1 - success, 0 - error | ||
| 897 | */ | ||
| 840 | int gigaset_start(struct cardstate *cs) | 898 | int gigaset_start(struct cardstate *cs) |
| 841 | { | 899 | { |
| 842 | unsigned long flags; | 900 | unsigned long flags; |
| @@ -879,9 +937,15 @@ error: | |||
| 879 | } | 937 | } |
| 880 | EXPORT_SYMBOL_GPL(gigaset_start); | 938 | EXPORT_SYMBOL_GPL(gigaset_start); |
| 881 | 939 | ||
| 882 | /* gigaset_shutdown | 940 | /** |
| 883 | * check if a device is associated to the cardstate structure and stop it | 941 | * gigaset_shutdown() - shut down device operations |
| 884 | * return value: 0 if ok, -1 if no device was associated | 942 | * @cs: device descriptor structure. |
| 943 | * | ||
| 944 | * Deactivates the device by scheduling an EV_SHUTDOWN event and | ||
| 945 | * waiting for completion of the shutdown. | ||
| 946 | * | ||
| 947 | * Return value: | ||
| 948 | * 0 - success, -1 - error (no device associated) | ||
| 885 | */ | 949 | */ |
| 886 | int gigaset_shutdown(struct cardstate *cs) | 950 | int gigaset_shutdown(struct cardstate *cs) |
| 887 | { | 951 | { |
| @@ -912,6 +976,13 @@ exit: | |||
| 912 | } | 976 | } |
| 913 | EXPORT_SYMBOL_GPL(gigaset_shutdown); | 977 | EXPORT_SYMBOL_GPL(gigaset_shutdown); |
| 914 | 978 | ||
| 979 | /** | ||
| 980 | * gigaset_stop() - stop device operations | ||
| 981 | * @cs: device descriptor structure. | ||
| 982 | * | ||
| 983 | * Stops operations on the device by scheduling an EV_STOP event and | ||
| 984 | * waiting for completion of the shutdown. | ||
| 985 | */ | ||
| 915 | void gigaset_stop(struct cardstate *cs) | 986 | void gigaset_stop(struct cardstate *cs) |
| 916 | { | 987 | { |
| 917 | mutex_lock(&cs->mutex); | 988 | mutex_lock(&cs->mutex); |
| @@ -1020,6 +1091,14 @@ struct cardstate *gigaset_get_cs_by_tty(struct tty_struct *tty) | |||
| 1020 | return gigaset_get_cs_by_minor(tty->index + tty->driver->minor_start); | 1091 | return gigaset_get_cs_by_minor(tty->index + tty->driver->minor_start); |
| 1021 | } | 1092 | } |
| 1022 | 1093 | ||
| 1094 | /** | ||
| 1095 | * gigaset_freedriver() - free all associated ressources of a driver | ||
| 1096 | * @drv: driver descriptor structure. | ||
| 1097 | * | ||
| 1098 | * Unregisters the driver from the system and deallocates the driver | ||
| 1099 | * structure @drv and all structures referenced from it. | ||
| 1100 | * All devices should be shut down before calling this. | ||
| 1101 | */ | ||
| 1023 | void gigaset_freedriver(struct gigaset_driver *drv) | 1102 | void gigaset_freedriver(struct gigaset_driver *drv) |
| 1024 | { | 1103 | { |
| 1025 | unsigned long flags; | 1104 | unsigned long flags; |
| @@ -1035,14 +1114,16 @@ void gigaset_freedriver(struct gigaset_driver *drv) | |||
| 1035 | } | 1114 | } |
| 1036 | EXPORT_SYMBOL_GPL(gigaset_freedriver); | 1115 | EXPORT_SYMBOL_GPL(gigaset_freedriver); |
| 1037 | 1116 | ||
| 1038 | /* gigaset_initdriver | 1117 | /** |
| 1118 | * gigaset_initdriver() - initialize driver structure | ||
| 1119 | * @minor: First minor number | ||
| 1120 | * @minors: Number of minors this driver can handle | ||
| 1121 | * @procname: Name of the driver | ||
| 1122 | * @devname: Name of the device files (prefix without minor number) | ||
| 1123 | * | ||
| 1039 | * Allocate and initialize gigaset_driver structure. Initialize interface. | 1124 | * Allocate and initialize gigaset_driver structure. Initialize interface. |
| 1040 | * parameters: | 1125 | * |
| 1041 | * minor First minor number | 1126 | * Return value: |
| 1042 | * minors Number of minors this driver can handle | ||
| 1043 | * procname Name of the driver | ||
| 1044 | * devname Name of the device files (prefix without minor number) | ||
| 1045 | * return value: | ||
| 1046 | * Pointer to the gigaset_driver structure on success, NULL on failure. | 1127 | * Pointer to the gigaset_driver structure on success, NULL on failure. |
| 1047 | */ | 1128 | */ |
| 1048 | struct gigaset_driver *gigaset_initdriver(unsigned minor, unsigned minors, | 1129 | struct gigaset_driver *gigaset_initdriver(unsigned minor, unsigned minors, |
| @@ -1095,6 +1176,13 @@ error: | |||
| 1095 | } | 1176 | } |
| 1096 | EXPORT_SYMBOL_GPL(gigaset_initdriver); | 1177 | EXPORT_SYMBOL_GPL(gigaset_initdriver); |
| 1097 | 1178 | ||
| 1179 | /** | ||
| 1180 | * gigaset_blockdriver() - block driver | ||
| 1181 | * @drv: driver descriptor structure. | ||
| 1182 | * | ||
| 1183 | * Prevents the driver from attaching new devices, in preparation for | ||
| 1184 | * deregistration. | ||
| 1185 | */ | ||
| 1098 | void gigaset_blockdriver(struct gigaset_driver *drv) | 1186 | void gigaset_blockdriver(struct gigaset_driver *drv) |
| 1099 | { | 1187 | { |
| 1100 | drv->blocked = 1; | 1188 | drv->blocked = 1; |
| @@ -1110,7 +1198,7 @@ static int __init gigaset_init_module(void) | |||
| 1110 | if (gigaset_debuglevel == 1) | 1198 | if (gigaset_debuglevel == 1) |
| 1111 | gigaset_debuglevel = DEBUG_DEFAULT; | 1199 | gigaset_debuglevel = DEBUG_DEFAULT; |
| 1112 | 1200 | ||
| 1113 | pr_info(DRIVER_DESC "\n"); | 1201 | pr_info(DRIVER_DESC DRIVER_DESC_DEBUG "\n"); |
| 1114 | return 0; | 1202 | return 0; |
| 1115 | } | 1203 | } |
| 1116 | 1204 | ||
diff --git a/drivers/isdn/gigaset/ev-layer.c b/drivers/isdn/gigaset/ev-layer.c index 2d91049571a4..cc768caa38f5 100644 --- a/drivers/isdn/gigaset/ev-layer.c +++ b/drivers/isdn/gigaset/ev-layer.c | |||
| @@ -207,7 +207,6 @@ struct reply_t gigaset_tab_nocid[] = | |||
| 207 | /* leave dle mode */ | 207 | /* leave dle mode */ |
| 208 | {RSP_INIT, 0, 0,SEQ_DLE0, 201, 5, {0}, "^SDLE=0\r"}, | 208 | {RSP_INIT, 0, 0,SEQ_DLE0, 201, 5, {0}, "^SDLE=0\r"}, |
| 209 | {RSP_OK, 201,201, -1, 202,-1}, | 209 | {RSP_OK, 201,201, -1, 202,-1}, |
| 210 | //{RSP_ZDLE, 202,202, 0, 202, 0, {ACT_ERROR}},//DELETE | ||
| 211 | {RSP_ZDLE, 202,202, 0, 0, 0, {ACT_DLE0}}, | 210 | {RSP_ZDLE, 202,202, 0, 0, 0, {ACT_DLE0}}, |
| 212 | {RSP_NODEV, 200,249, -1, 0, 0, {ACT_FAKEDLE0}}, | 211 | {RSP_NODEV, 200,249, -1, 0, 0, {ACT_FAKEDLE0}}, |
| 213 | {RSP_ERROR, 200,249, -1, 0, 0, {ACT_FAILDLE0}}, | 212 | {RSP_ERROR, 200,249, -1, 0, 0, {ACT_FAILDLE0}}, |
| @@ -265,6 +264,7 @@ struct reply_t gigaset_tab_nocid[] = | |||
| 265 | {EV_SHUTDOWN, -1, -1, -1, -1,-1, {ACT_SHUTDOWN}}, //FIXME | 264 | {EV_SHUTDOWN, -1, -1, -1, -1,-1, {ACT_SHUTDOWN}}, //FIXME |
| 266 | 265 | ||
| 267 | /* misc. */ | 266 | /* misc. */ |
| 267 | {RSP_ERROR, -1, -1, -1, -1, -1, {ACT_ERROR} }, | ||
| 268 | {RSP_EMPTY, -1, -1, -1, -1,-1, {ACT_DEBUG}}, //FIXME | 268 | {RSP_EMPTY, -1, -1, -1, -1,-1, {ACT_DEBUG}}, //FIXME |
| 269 | {RSP_ZCFGT, -1, -1, -1, -1,-1, {ACT_DEBUG}}, //FIXME | 269 | {RSP_ZCFGT, -1, -1, -1, -1,-1, {ACT_DEBUG}}, //FIXME |
| 270 | {RSP_ZCFG, -1, -1, -1, -1,-1, {ACT_DEBUG}}, //FIXME | 270 | {RSP_ZCFG, -1, -1, -1, -1,-1, {ACT_DEBUG}}, //FIXME |
| @@ -328,10 +328,9 @@ struct reply_t gigaset_tab_cid[] = | |||
| 328 | {RSP_INIT, -1, -1,SEQ_HUP, 401, 5, {0}, "+VLS=0\r"}, /* hang up */ //-1,-1? | 328 | {RSP_INIT, -1, -1,SEQ_HUP, 401, 5, {0}, "+VLS=0\r"}, /* hang up */ //-1,-1? |
| 329 | {RSP_OK, 401,401, -1, 402, 5}, | 329 | {RSP_OK, 401,401, -1, 402, 5}, |
| 330 | {RSP_ZVLS, 402,402, 0, 403, 5}, | 330 | {RSP_ZVLS, 402,402, 0, 403, 5}, |
| 331 | {RSP_ZSAU, 403,403,ZSAU_DISCONNECT_REQ, -1,-1, {ACT_DEBUG}}, /* if not remote hup */ | 331 | {RSP_ZSAU, 403, 403, ZSAU_DISCONNECT_REQ, -1, -1, {ACT_DEBUG} }, |
| 332 | //{RSP_ZSAU, 403,403,ZSAU_NULL, 401, 0, {ACT_ERROR}}, //DELETE//FIXME -> DLE0 // should we do this _before_ hanging up for base driver? | 332 | {RSP_ZSAU, 403, 403, ZSAU_NULL, 0, 0, {ACT_DISCONNECT} }, |
| 333 | {RSP_ZSAU, 403,403,ZSAU_NULL, 0, 0, {ACT_DISCONNECT}}, //FIXME -> DLE0 // should we do this _before_ hanging up for base driver? | 333 | {RSP_NODEV, 401, 403, -1, 0, 0, {ACT_FAKEHUP} }, |
| 334 | {RSP_NODEV, 401,403, -1, 0, 0, {ACT_FAKEHUP}}, //FIXME -> DLE0 // should we do this _before_ hanging up for base driver? | ||
| 335 | {RSP_ERROR, 401,401, -1, 0, 0, {ACT_ABORTHUP}}, | 334 | {RSP_ERROR, 401,401, -1, 0, 0, {ACT_ABORTHUP}}, |
| 336 | {EV_TIMEOUT, 401,403, -1, 0, 0, {ACT_ABORTHUP}}, | 335 | {EV_TIMEOUT, 401,403, -1, 0, 0, {ACT_ABORTHUP}}, |
| 337 | 336 | ||
| @@ -474,8 +473,13 @@ static int cid_of_response(char *s) | |||
| 474 | //FIXME is ;<digit>+ at end of non-CID response really impossible? | 473 | //FIXME is ;<digit>+ at end of non-CID response really impossible? |
| 475 | } | 474 | } |
| 476 | 475 | ||
| 477 | /* This function will be called via task queue from the callback handler. | 476 | /** |
| 478 | * We received a modem response and have to handle it.. | 477 | * gigaset_handle_modem_response() - process received modem response |
| 478 | * @cs: device descriptor structure. | ||
| 479 | * | ||
| 480 | * Called by asyncdata/isocdata if a block of data received from the | ||
| 481 | * device must be processed as a modem command response. The data is | ||
| 482 | * already in the cs structure. | ||
| 479 | */ | 483 | */ |
| 480 | void gigaset_handle_modem_response(struct cardstate *cs) | 484 | void gigaset_handle_modem_response(struct cardstate *cs) |
| 481 | { | 485 | { |
| @@ -707,6 +711,11 @@ static void disconnect(struct at_state_t **at_state_p) | |||
| 707 | if (bcs) { | 711 | if (bcs) { |
| 708 | /* B channel assigned: invoke hardware specific handler */ | 712 | /* B channel assigned: invoke hardware specific handler */ |
| 709 | cs->ops->close_bchannel(bcs); | 713 | cs->ops->close_bchannel(bcs); |
| 714 | /* notify LL */ | ||
| 715 | if (bcs->chstate & (CHS_D_UP | CHS_NOTIFY_LL)) { | ||
| 716 | bcs->chstate &= ~(CHS_D_UP | CHS_NOTIFY_LL); | ||
| 717 | gigaset_i4l_channel_cmd(bcs, ISDN_STAT_DHUP); | ||
| 718 | } | ||
| 710 | } else { | 719 | } else { |
| 711 | /* no B channel assigned: just deallocate */ | 720 | /* no B channel assigned: just deallocate */ |
| 712 | spin_lock_irqsave(&cs->lock, flags); | 721 | spin_lock_irqsave(&cs->lock, flags); |
| @@ -1429,11 +1438,12 @@ static void do_action(int action, struct cardstate *cs, | |||
| 1429 | cs->gotfwver = -1; | 1438 | cs->gotfwver = -1; |
| 1430 | dev_err(cs->dev, "could not read firmware version.\n"); | 1439 | dev_err(cs->dev, "could not read firmware version.\n"); |
| 1431 | break; | 1440 | break; |
| 1432 | #ifdef CONFIG_GIGASET_DEBUG | ||
| 1433 | case ACT_ERROR: | 1441 | case ACT_ERROR: |
| 1434 | *p_genresp = 1; | 1442 | gig_dbg(DEBUG_ANY, "%s: ERROR response in ConState %d", |
| 1435 | *p_resp_code = RSP_ERROR; | 1443 | __func__, at_state->ConState); |
| 1444 | cs->cur_at_seq = SEQ_NONE; | ||
| 1436 | break; | 1445 | break; |
| 1446 | #ifdef CONFIG_GIGASET_DEBUG | ||
| 1437 | case ACT_TEST: | 1447 | case ACT_TEST: |
| 1438 | { | 1448 | { |
| 1439 | static int count = 3; //2; //1; | 1449 | static int count = 3; //2; //1; |
diff --git a/drivers/isdn/gigaset/i4l.c b/drivers/isdn/gigaset/i4l.c index 9b22f9cf2f33..654489d836cd 100644 --- a/drivers/isdn/gigaset/i4l.c +++ b/drivers/isdn/gigaset/i4l.c | |||
| @@ -51,6 +51,12 @@ static int writebuf_from_LL(int driverID, int channel, int ack, | |||
| 51 | return -ENODEV; | 51 | return -ENODEV; |
| 52 | } | 52 | } |
| 53 | bcs = &cs->bcs[channel]; | 53 | bcs = &cs->bcs[channel]; |
| 54 | |||
| 55 | /* can only handle linear sk_buffs */ | ||
| 56 | if (skb_linearize(skb) < 0) { | ||
| 57 | dev_err(cs->dev, "%s: skb_linearize failed\n", __func__); | ||
| 58 | return -ENOMEM; | ||
| 59 | } | ||
| 54 | len = skb->len; | 60 | len = skb->len; |
| 55 | 61 | ||
| 56 | gig_dbg(DEBUG_LLDATA, | 62 | gig_dbg(DEBUG_LLDATA, |
| @@ -79,6 +85,14 @@ static int writebuf_from_LL(int driverID, int channel, int ack, | |||
| 79 | return cs->ops->send_skb(bcs, skb); | 85 | return cs->ops->send_skb(bcs, skb); |
| 80 | } | 86 | } |
| 81 | 87 | ||
| 88 | /** | ||
| 89 | * gigaset_skb_sent() - acknowledge sending an skb | ||
| 90 | * @bcs: B channel descriptor structure. | ||
| 91 | * @skb: sent data. | ||
| 92 | * | ||
| 93 | * Called by hardware module {bas,ser,usb}_gigaset when the data in a | ||
| 94 | * skb has been successfully sent, for signalling completion to the LL. | ||
| 95 | */ | ||
| 82 | void gigaset_skb_sent(struct bc_state *bcs, struct sk_buff *skb) | 96 | void gigaset_skb_sent(struct bc_state *bcs, struct sk_buff *skb) |
| 83 | { | 97 | { |
| 84 | unsigned len; | 98 | unsigned len; |
| @@ -455,6 +469,15 @@ int gigaset_isdn_setup_accept(struct at_state_t *at_state) | |||
| 455 | return 0; | 469 | return 0; |
| 456 | } | 470 | } |
| 457 | 471 | ||
| 472 | /** | ||
| 473 | * gigaset_isdn_icall() - signal incoming call | ||
| 474 | * @at_state: connection state structure. | ||
| 475 | * | ||
| 476 | * Called by main module to notify the LL that an incoming call has been | ||
| 477 | * received. @at_state contains the parameters of the call. | ||
| 478 | * | ||
| 479 | * Return value: call disposition (ICALL_*) | ||
| 480 | */ | ||
| 458 | int gigaset_isdn_icall(struct at_state_t *at_state) | 481 | int gigaset_isdn_icall(struct at_state_t *at_state) |
| 459 | { | 482 | { |
| 460 | struct cardstate *cs = at_state->cs; | 483 | struct cardstate *cs = at_state->cs; |
diff --git a/drivers/isdn/gigaset/interface.c b/drivers/isdn/gigaset/interface.c index f33ac27de643..6a8e1384e7bd 100644 --- a/drivers/isdn/gigaset/interface.c +++ b/drivers/isdn/gigaset/interface.c | |||
| @@ -616,6 +616,15 @@ void gigaset_if_free(struct cardstate *cs) | |||
| 616 | tty_unregister_device(drv->tty, cs->minor_index); | 616 | tty_unregister_device(drv->tty, cs->minor_index); |
| 617 | } | 617 | } |
| 618 | 618 | ||
| 619 | /** | ||
| 620 | * gigaset_if_receive() - pass a received block of data to the tty device | ||
| 621 | * @cs: device descriptor structure. | ||
| 622 | * @buffer: received data. | ||
| 623 | * @len: number of bytes received. | ||
| 624 | * | ||
| 625 | * Called by asyncdata/isocdata if a block of data received from the | ||
| 626 | * device must be sent to userspace through the ttyG* device. | ||
| 627 | */ | ||
| 619 | void gigaset_if_receive(struct cardstate *cs, | 628 | void gigaset_if_receive(struct cardstate *cs, |
| 620 | unsigned char *buffer, size_t len) | 629 | unsigned char *buffer, size_t len) |
| 621 | { | 630 | { |
diff --git a/drivers/isdn/gigaset/isocdata.c b/drivers/isdn/gigaset/isocdata.c index bed38fcc432b..9f3ef7b4248c 100644 --- a/drivers/isdn/gigaset/isocdata.c +++ b/drivers/isdn/gigaset/isocdata.c | |||
| @@ -429,7 +429,7 @@ static inline int hdlc_buildframe(struct isowbuf_t *iwb, | |||
| 429 | return -EAGAIN; | 429 | return -EAGAIN; |
| 430 | } | 430 | } |
| 431 | 431 | ||
| 432 | dump_bytes(DEBUG_STREAM, "snd data", in, count); | 432 | dump_bytes(DEBUG_STREAM_DUMP, "snd data", in, count); |
| 433 | 433 | ||
| 434 | /* bitstuff and checksum input data */ | 434 | /* bitstuff and checksum input data */ |
| 435 | fcs = PPP_INITFCS; | 435 | fcs = PPP_INITFCS; |
| @@ -448,7 +448,6 @@ static inline int hdlc_buildframe(struct isowbuf_t *iwb, | |||
| 448 | /* put closing flag and repeat byte for flag idle */ | 448 | /* put closing flag and repeat byte for flag idle */ |
| 449 | isowbuf_putflag(iwb); | 449 | isowbuf_putflag(iwb); |
| 450 | end = isowbuf_donewrite(iwb); | 450 | end = isowbuf_donewrite(iwb); |
| 451 | dump_bytes(DEBUG_STREAM_DUMP, "isowbuf", iwb->data, end + 1); | ||
| 452 | return end; | 451 | return end; |
| 453 | } | 452 | } |
| 454 | 453 | ||
| @@ -482,6 +481,8 @@ static inline int trans_buildframe(struct isowbuf_t *iwb, | |||
| 482 | } | 481 | } |
| 483 | 482 | ||
| 484 | gig_dbg(DEBUG_STREAM, "put %d bytes", count); | 483 | gig_dbg(DEBUG_STREAM, "put %d bytes", count); |
| 484 | dump_bytes(DEBUG_STREAM_DUMP, "snd data", in, count); | ||
| 485 | |||
| 485 | write = iwb->write; | 486 | write = iwb->write; |
| 486 | do { | 487 | do { |
| 487 | c = bitrev8(*in++); | 488 | c = bitrev8(*in++); |
| @@ -583,7 +584,7 @@ static inline void hdlc_done(struct bc_state *bcs) | |||
| 583 | procskb->tail -= 2; | 584 | procskb->tail -= 2; |
| 584 | gig_dbg(DEBUG_ISO, "%s: good frame (%d octets)", | 585 | gig_dbg(DEBUG_ISO, "%s: good frame (%d octets)", |
| 585 | __func__, procskb->len); | 586 | __func__, procskb->len); |
| 586 | dump_bytes(DEBUG_STREAM, | 587 | dump_bytes(DEBUG_STREAM_DUMP, |
| 587 | "rcv data", procskb->data, procskb->len); | 588 | "rcv data", procskb->data, procskb->len); |
| 588 | bcs->hw.bas->goodbytes += procskb->len; | 589 | bcs->hw.bas->goodbytes += procskb->len; |
| 589 | gigaset_rcv_skb(procskb, bcs->cs, bcs); | 590 | gigaset_rcv_skb(procskb, bcs->cs, bcs); |
| @@ -878,6 +879,8 @@ static inline void trans_receive(unsigned char *src, unsigned count, | |||
| 878 | dobytes--; | 879 | dobytes--; |
| 879 | } | 880 | } |
| 880 | if (dobytes == 0) { | 881 | if (dobytes == 0) { |
| 882 | dump_bytes(DEBUG_STREAM_DUMP, | ||
| 883 | "rcv data", skb->data, skb->len); | ||
| 881 | gigaset_rcv_skb(skb, bcs->cs, bcs); | 884 | gigaset_rcv_skb(skb, bcs->cs, bcs); |
| 882 | bcs->skb = skb = dev_alloc_skb(SBUFSIZE + HW_HDR_LEN); | 885 | bcs->skb = skb = dev_alloc_skb(SBUFSIZE + HW_HDR_LEN); |
| 883 | if (!skb) { | 886 | if (!skb) { |
| @@ -973,16 +976,17 @@ void gigaset_isoc_input(struct inbuf_t *inbuf) | |||
| 973 | 976 | ||
| 974 | /* == data output ========================================================== */ | 977 | /* == data output ========================================================== */ |
| 975 | 978 | ||
| 976 | /* gigaset_send_skb | 979 | /** |
| 977 | * called by common.c to queue an skb for sending | 980 | * gigaset_isoc_send_skb() - queue an skb for sending |
| 978 | * and start transmission if necessary | 981 | * @bcs: B channel descriptor structure. |
| 979 | * parameters: | 982 | * @skb: data to send. |
| 980 | * B Channel control structure | 983 | * |
| 981 | * skb | 984 | * Called by i4l.c to queue an skb for sending, and start transmission if |
| 982 | * return value: | 985 | * necessary. |
| 983 | * number of bytes accepted for sending | 986 | * |
| 984 | * (skb->len if ok, 0 if out of buffer space) | 987 | * Return value: |
| 985 | * or error code (< 0, eg. -EINVAL) | 988 | * number of bytes accepted for sending (skb->len) if ok, |
| 989 | * error code < 0 (eg. -ENODEV) on error | ||
| 986 | */ | 990 | */ |
| 987 | int gigaset_isoc_send_skb(struct bc_state *bcs, struct sk_buff *skb) | 991 | int gigaset_isoc_send_skb(struct bc_state *bcs, struct sk_buff *skb) |
| 988 | { | 992 | { |
diff --git a/drivers/mmc/core/sdio_cis.c b/drivers/mmc/core/sdio_cis.c index e1035c895808..f85dcd536508 100644 --- a/drivers/mmc/core/sdio_cis.c +++ b/drivers/mmc/core/sdio_cis.c | |||
| @@ -29,6 +29,8 @@ static int cistpl_vers_1(struct mmc_card *card, struct sdio_func *func, | |||
| 29 | unsigned i, nr_strings; | 29 | unsigned i, nr_strings; |
| 30 | char **buffer, *string; | 30 | char **buffer, *string; |
| 31 | 31 | ||
| 32 | /* Find all null-terminated (including zero length) strings in | ||
| 33 | the TPLLV1_INFO field. Trailing garbage is ignored. */ | ||
| 32 | buf += 2; | 34 | buf += 2; |
| 33 | size -= 2; | 35 | size -= 2; |
| 34 | 36 | ||
| @@ -39,11 +41,8 @@ static int cistpl_vers_1(struct mmc_card *card, struct sdio_func *func, | |||
| 39 | if (buf[i] == 0) | 41 | if (buf[i] == 0) |
| 40 | nr_strings++; | 42 | nr_strings++; |
| 41 | } | 43 | } |
| 42 | 44 | if (nr_strings == 0) | |
| 43 | if (nr_strings < 4) { | ||
| 44 | printk(KERN_WARNING "SDIO: ignoring broken CISTPL_VERS_1\n"); | ||
| 45 | return 0; | 45 | return 0; |
| 46 | } | ||
| 47 | 46 | ||
| 48 | size = i; | 47 | size = i; |
| 49 | 48 | ||
diff --git a/drivers/net/au1000_eth.c b/drivers/net/au1000_eth.c index fdf5937233fc..04f63c77071d 100644 --- a/drivers/net/au1000_eth.c +++ b/drivers/net/au1000_eth.c | |||
| @@ -721,7 +721,7 @@ static inline void update_rx_stats(struct net_device *dev, u32 status) | |||
| 721 | ps->rx_errors++; | 721 | ps->rx_errors++; |
| 722 | if (status & RX_MISSED_FRAME) | 722 | if (status & RX_MISSED_FRAME) |
| 723 | ps->rx_missed_errors++; | 723 | ps->rx_missed_errors++; |
| 724 | if (status & (RX_OVERLEN | RX_OVERLEN | RX_LEN_ERROR)) | 724 | if (status & (RX_OVERLEN | RX_RUNT | RX_LEN_ERROR)) |
| 725 | ps->rx_length_errors++; | 725 | ps->rx_length_errors++; |
| 726 | if (status & RX_CRC_ERROR) | 726 | if (status & RX_CRC_ERROR) |
| 727 | ps->rx_crc_errors++; | 727 | ps->rx_crc_errors++; |
| @@ -794,8 +794,6 @@ static int au1000_rx(struct net_device *dev) | |||
| 794 | printk("rx len error\n"); | 794 | printk("rx len error\n"); |
| 795 | if (status & RX_U_CNTRL_FRAME) | 795 | if (status & RX_U_CNTRL_FRAME) |
| 796 | printk("rx u control frame\n"); | 796 | printk("rx u control frame\n"); |
| 797 | if (status & RX_MISSED_FRAME) | ||
| 798 | printk("rx miss\n"); | ||
| 799 | } | 797 | } |
| 800 | } | 798 | } |
| 801 | prxd->buff_stat = (u32)(pDB->dma_addr | RX_DMA_ENABLE); | 799 | prxd->buff_stat = (u32)(pDB->dma_addr | RX_DMA_ENABLE); |
diff --git a/drivers/net/benet/be_cmds.c b/drivers/net/benet/be_cmds.c index 79d35d122c08..89876ade5e33 100644 --- a/drivers/net/benet/be_cmds.c +++ b/drivers/net/benet/be_cmds.c | |||
| @@ -1129,7 +1129,6 @@ int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd, | |||
| 1129 | spin_lock_bh(&adapter->mcc_lock); | 1129 | spin_lock_bh(&adapter->mcc_lock); |
| 1130 | 1130 | ||
| 1131 | wrb = wrb_from_mccq(adapter); | 1131 | wrb = wrb_from_mccq(adapter); |
| 1132 | req = embedded_payload(wrb); | ||
| 1133 | sge = nonembedded_sgl(wrb); | 1132 | sge = nonembedded_sgl(wrb); |
| 1134 | 1133 | ||
| 1135 | be_wrb_hdr_prepare(wrb, cmd->size, false, 1); | 1134 | be_wrb_hdr_prepare(wrb, cmd->size, false, 1); |
diff --git a/drivers/net/benet/be_cmds.h b/drivers/net/benet/be_cmds.h index 8b4c2cb9ad62..a86f917f85f4 100644 --- a/drivers/net/benet/be_cmds.h +++ b/drivers/net/benet/be_cmds.h | |||
| @@ -62,7 +62,7 @@ enum { | |||
| 62 | MCC_STATUS_QUEUE_FLUSHING = 0x4, | 62 | MCC_STATUS_QUEUE_FLUSHING = 0x4, |
| 63 | /* The command is completing with a DMA error */ | 63 | /* The command is completing with a DMA error */ |
| 64 | MCC_STATUS_DMA_FAILED = 0x5, | 64 | MCC_STATUS_DMA_FAILED = 0x5, |
| 65 | MCC_STATUS_NOT_SUPPORTED = 0x66 | 65 | MCC_STATUS_NOT_SUPPORTED = 66 |
| 66 | }; | 66 | }; |
| 67 | 67 | ||
| 68 | #define CQE_STATUS_COMPL_MASK 0xFFFF | 68 | #define CQE_STATUS_COMPL_MASK 0xFFFF |
diff --git a/drivers/net/benet/be_ethtool.c b/drivers/net/benet/be_ethtool.c index 11445df3dbc0..cda5bf2fc50a 100644 --- a/drivers/net/benet/be_ethtool.c +++ b/drivers/net/benet/be_ethtool.c | |||
| @@ -358,7 +358,7 @@ const struct ethtool_ops be_ethtool_ops = { | |||
| 358 | .get_rx_csum = be_get_rx_csum, | 358 | .get_rx_csum = be_get_rx_csum, |
| 359 | .set_rx_csum = be_set_rx_csum, | 359 | .set_rx_csum = be_set_rx_csum, |
| 360 | .get_tx_csum = ethtool_op_get_tx_csum, | 360 | .get_tx_csum = ethtool_op_get_tx_csum, |
| 361 | .set_tx_csum = ethtool_op_set_tx_csum, | 361 | .set_tx_csum = ethtool_op_set_tx_hw_csum, |
| 362 | .get_sg = ethtool_op_get_sg, | 362 | .get_sg = ethtool_op_get_sg, |
| 363 | .set_sg = ethtool_op_set_sg, | 363 | .set_sg = ethtool_op_set_sg, |
| 364 | .get_tso = ethtool_op_get_tso, | 364 | .get_tso = ethtool_op_get_tso, |
diff --git a/drivers/net/benet/be_main.c b/drivers/net/benet/be_main.c index 2f9b50156e0c..6d5e81f7046f 100644 --- a/drivers/net/benet/be_main.c +++ b/drivers/net/benet/be_main.c | |||
| @@ -197,7 +197,7 @@ void netdev_stats_update(struct be_adapter *adapter) | |||
| 197 | /* no space available in linux */ | 197 | /* no space available in linux */ |
| 198 | dev_stats->tx_dropped = 0; | 198 | dev_stats->tx_dropped = 0; |
| 199 | 199 | ||
| 200 | dev_stats->multicast = port_stats->tx_multicastframes; | 200 | dev_stats->multicast = port_stats->rx_multicast_frames; |
| 201 | dev_stats->collisions = 0; | 201 | dev_stats->collisions = 0; |
| 202 | 202 | ||
| 203 | /* detailed tx_errors */ | 203 | /* detailed tx_errors */ |
| @@ -1899,8 +1899,8 @@ static void be_netdev_init(struct net_device *netdev) | |||
| 1899 | struct be_adapter *adapter = netdev_priv(netdev); | 1899 | struct be_adapter *adapter = netdev_priv(netdev); |
| 1900 | 1900 | ||
| 1901 | netdev->features |= NETIF_F_SG | NETIF_F_HW_VLAN_RX | NETIF_F_TSO | | 1901 | netdev->features |= NETIF_F_SG | NETIF_F_HW_VLAN_RX | NETIF_F_TSO | |
| 1902 | NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_FILTER | NETIF_F_IP_CSUM | | 1902 | NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_FILTER | NETIF_F_HW_CSUM | |
| 1903 | NETIF_F_IPV6_CSUM | NETIF_F_GRO; | 1903 | NETIF_F_GRO; |
| 1904 | 1904 | ||
| 1905 | netdev->flags |= IFF_MULTICAST; | 1905 | netdev->flags |= IFF_MULTICAST; |
| 1906 | 1906 | ||
diff --git a/drivers/net/e1000e/82571.c b/drivers/net/e1000e/82571.c index b53b40ba88a8..d1e0563a67df 100644 --- a/drivers/net/e1000e/82571.c +++ b/drivers/net/e1000e/82571.c | |||
| @@ -1803,7 +1803,7 @@ struct e1000_info e1000_82574_info = { | |||
| 1803 | | FLAG_HAS_AMT | 1803 | | FLAG_HAS_AMT |
| 1804 | | FLAG_HAS_CTRLEXT_ON_LOAD, | 1804 | | FLAG_HAS_CTRLEXT_ON_LOAD, |
| 1805 | .pba = 20, | 1805 | .pba = 20, |
| 1806 | .max_hw_frame_size = ETH_FRAME_LEN + ETH_FCS_LEN, | 1806 | .max_hw_frame_size = DEFAULT_JUMBO, |
| 1807 | .get_variants = e1000_get_variants_82571, | 1807 | .get_variants = e1000_get_variants_82571, |
| 1808 | .mac_ops = &e82571_mac_ops, | 1808 | .mac_ops = &e82571_mac_ops, |
| 1809 | .phy_ops = &e82_phy_ops_bm, | 1809 | .phy_ops = &e82_phy_ops_bm, |
| @@ -1820,7 +1820,7 @@ struct e1000_info e1000_82583_info = { | |||
| 1820 | | FLAG_HAS_AMT | 1820 | | FLAG_HAS_AMT |
| 1821 | | FLAG_HAS_CTRLEXT_ON_LOAD, | 1821 | | FLAG_HAS_CTRLEXT_ON_LOAD, |
| 1822 | .pba = 20, | 1822 | .pba = 20, |
| 1823 | .max_hw_frame_size = DEFAULT_JUMBO, | 1823 | .max_hw_frame_size = ETH_FRAME_LEN + ETH_FCS_LEN, |
| 1824 | .get_variants = e1000_get_variants_82571, | 1824 | .get_variants = e1000_get_variants_82571, |
| 1825 | .mac_ops = &e82571_mac_ops, | 1825 | .mac_ops = &e82571_mac_ops, |
| 1826 | .phy_ops = &e82_phy_ops_bm, | 1826 | .phy_ops = &e82_phy_ops_bm, |
diff --git a/drivers/net/ethoc.c b/drivers/net/ethoc.c index b7311bc00258..34d0c69e67f7 100644 --- a/drivers/net/ethoc.c +++ b/drivers/net/ethoc.c | |||
| @@ -19,6 +19,10 @@ | |||
| 19 | #include <linux/platform_device.h> | 19 | #include <linux/platform_device.h> |
| 20 | #include <net/ethoc.h> | 20 | #include <net/ethoc.h> |
| 21 | 21 | ||
| 22 | static int buffer_size = 0x8000; /* 32 KBytes */ | ||
| 23 | module_param(buffer_size, int, 0); | ||
| 24 | MODULE_PARM_DESC(buffer_size, "DMA buffer allocation size"); | ||
| 25 | |||
| 22 | /* register offsets */ | 26 | /* register offsets */ |
| 23 | #define MODER 0x00 | 27 | #define MODER 0x00 |
| 24 | #define INT_SOURCE 0x04 | 28 | #define INT_SOURCE 0x04 |
| @@ -167,6 +171,7 @@ | |||
| 167 | * struct ethoc - driver-private device structure | 171 | * struct ethoc - driver-private device structure |
| 168 | * @iobase: pointer to I/O memory region | 172 | * @iobase: pointer to I/O memory region |
| 169 | * @membase: pointer to buffer memory region | 173 | * @membase: pointer to buffer memory region |
| 174 | * @dma_alloc: dma allocated buffer size | ||
| 170 | * @num_tx: number of send buffers | 175 | * @num_tx: number of send buffers |
| 171 | * @cur_tx: last send buffer written | 176 | * @cur_tx: last send buffer written |
| 172 | * @dty_tx: last buffer actually sent | 177 | * @dty_tx: last buffer actually sent |
| @@ -185,6 +190,7 @@ | |||
| 185 | struct ethoc { | 190 | struct ethoc { |
| 186 | void __iomem *iobase; | 191 | void __iomem *iobase; |
| 187 | void __iomem *membase; | 192 | void __iomem *membase; |
| 193 | int dma_alloc; | ||
| 188 | 194 | ||
| 189 | unsigned int num_tx; | 195 | unsigned int num_tx; |
| 190 | unsigned int cur_tx; | 196 | unsigned int cur_tx; |
| @@ -284,7 +290,7 @@ static int ethoc_init_ring(struct ethoc *dev) | |||
| 284 | dev->cur_rx = 0; | 290 | dev->cur_rx = 0; |
| 285 | 291 | ||
| 286 | /* setup transmission buffers */ | 292 | /* setup transmission buffers */ |
| 287 | bd.addr = 0; | 293 | bd.addr = virt_to_phys(dev->membase); |
| 288 | bd.stat = TX_BD_IRQ | TX_BD_CRC; | 294 | bd.stat = TX_BD_IRQ | TX_BD_CRC; |
| 289 | 295 | ||
| 290 | for (i = 0; i < dev->num_tx; i++) { | 296 | for (i = 0; i < dev->num_tx; i++) { |
| @@ -295,7 +301,6 @@ static int ethoc_init_ring(struct ethoc *dev) | |||
| 295 | bd.addr += ETHOC_BUFSIZ; | 301 | bd.addr += ETHOC_BUFSIZ; |
| 296 | } | 302 | } |
| 297 | 303 | ||
| 298 | bd.addr = dev->num_tx * ETHOC_BUFSIZ; | ||
| 299 | bd.stat = RX_BD_EMPTY | RX_BD_IRQ; | 304 | bd.stat = RX_BD_EMPTY | RX_BD_IRQ; |
| 300 | 305 | ||
| 301 | for (i = 0; i < dev->num_rx; i++) { | 306 | for (i = 0; i < dev->num_rx; i++) { |
| @@ -400,8 +405,12 @@ static int ethoc_rx(struct net_device *dev, int limit) | |||
| 400 | if (ethoc_update_rx_stats(priv, &bd) == 0) { | 405 | if (ethoc_update_rx_stats(priv, &bd) == 0) { |
| 401 | int size = bd.stat >> 16; | 406 | int size = bd.stat >> 16; |
| 402 | struct sk_buff *skb = netdev_alloc_skb(dev, size); | 407 | struct sk_buff *skb = netdev_alloc_skb(dev, size); |
| 408 | |||
| 409 | size -= 4; /* strip the CRC */ | ||
| 410 | skb_reserve(skb, 2); /* align TCP/IP header */ | ||
| 411 | |||
| 403 | if (likely(skb)) { | 412 | if (likely(skb)) { |
| 404 | void *src = priv->membase + bd.addr; | 413 | void *src = phys_to_virt(bd.addr); |
| 405 | memcpy_fromio(skb_put(skb, size), src, size); | 414 | memcpy_fromio(skb_put(skb, size), src, size); |
| 406 | skb->protocol = eth_type_trans(skb, dev); | 415 | skb->protocol = eth_type_trans(skb, dev); |
| 407 | priv->stats.rx_packets++; | 416 | priv->stats.rx_packets++; |
| @@ -653,9 +662,9 @@ static int ethoc_open(struct net_device *dev) | |||
| 653 | if (ret) | 662 | if (ret) |
| 654 | return ret; | 663 | return ret; |
| 655 | 664 | ||
| 656 | /* calculate the number of TX/RX buffers */ | 665 | /* calculate the number of TX/RX buffers, maximum 128 supported */ |
| 657 | num_bd = (dev->mem_end - dev->mem_start + 1) / ETHOC_BUFSIZ; | 666 | num_bd = min(128, (dev->mem_end - dev->mem_start + 1) / ETHOC_BUFSIZ); |
| 658 | priv->num_tx = min(min_tx, num_bd / 4); | 667 | priv->num_tx = max(min_tx, num_bd / 4); |
| 659 | priv->num_rx = num_bd - priv->num_tx; | 668 | priv->num_rx = num_bd - priv->num_tx; |
| 660 | ethoc_write(priv, TX_BD_NUM, priv->num_tx); | 669 | ethoc_write(priv, TX_BD_NUM, priv->num_tx); |
| 661 | 670 | ||
| @@ -823,7 +832,7 @@ static netdev_tx_t ethoc_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
| 823 | else | 832 | else |
| 824 | bd.stat &= ~TX_BD_PAD; | 833 | bd.stat &= ~TX_BD_PAD; |
| 825 | 834 | ||
| 826 | dest = priv->membase + bd.addr; | 835 | dest = phys_to_virt(bd.addr); |
| 827 | memcpy_toio(dest, skb->data, skb->len); | 836 | memcpy_toio(dest, skb->data, skb->len); |
| 828 | 837 | ||
| 829 | bd.stat &= ~(TX_BD_STATS | TX_BD_LEN_MASK); | 838 | bd.stat &= ~(TX_BD_STATS | TX_BD_LEN_MASK); |
| @@ -903,22 +912,19 @@ static int ethoc_probe(struct platform_device *pdev) | |||
| 903 | 912 | ||
| 904 | /* obtain buffer memory space */ | 913 | /* obtain buffer memory space */ |
| 905 | res = platform_get_resource(pdev, IORESOURCE_MEM, 1); | 914 | res = platform_get_resource(pdev, IORESOURCE_MEM, 1); |
| 906 | if (!res) { | 915 | if (res) { |
| 907 | dev_err(&pdev->dev, "cannot obtain memory space\n"); | 916 | mem = devm_request_mem_region(&pdev->dev, res->start, |
| 908 | ret = -ENXIO; | ||
| 909 | goto free; | ||
| 910 | } | ||
| 911 | |||
| 912 | mem = devm_request_mem_region(&pdev->dev, res->start, | ||
| 913 | res->end - res->start + 1, res->name); | 917 | res->end - res->start + 1, res->name); |
| 914 | if (!mem) { | 918 | if (!mem) { |
| 915 | dev_err(&pdev->dev, "cannot request memory space\n"); | 919 | dev_err(&pdev->dev, "cannot request memory space\n"); |
| 916 | ret = -ENXIO; | 920 | ret = -ENXIO; |
| 917 | goto free; | 921 | goto free; |
| 922 | } | ||
| 923 | |||
| 924 | netdev->mem_start = mem->start; | ||
| 925 | netdev->mem_end = mem->end; | ||
| 918 | } | 926 | } |
| 919 | 927 | ||
| 920 | netdev->mem_start = mem->start; | ||
| 921 | netdev->mem_end = mem->end; | ||
| 922 | 928 | ||
| 923 | /* obtain device IRQ number */ | 929 | /* obtain device IRQ number */ |
| 924 | res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); | 930 | res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); |
| @@ -933,6 +939,7 @@ static int ethoc_probe(struct platform_device *pdev) | |||
| 933 | /* setup driver-private data */ | 939 | /* setup driver-private data */ |
| 934 | priv = netdev_priv(netdev); | 940 | priv = netdev_priv(netdev); |
| 935 | priv->netdev = netdev; | 941 | priv->netdev = netdev; |
| 942 | priv->dma_alloc = 0; | ||
| 936 | 943 | ||
| 937 | priv->iobase = devm_ioremap_nocache(&pdev->dev, netdev->base_addr, | 944 | priv->iobase = devm_ioremap_nocache(&pdev->dev, netdev->base_addr, |
| 938 | mmio->end - mmio->start + 1); | 945 | mmio->end - mmio->start + 1); |
| @@ -942,12 +949,27 @@ static int ethoc_probe(struct platform_device *pdev) | |||
| 942 | goto error; | 949 | goto error; |
| 943 | } | 950 | } |
| 944 | 951 | ||
| 945 | priv->membase = devm_ioremap_nocache(&pdev->dev, netdev->mem_start, | 952 | if (netdev->mem_end) { |
| 946 | mem->end - mem->start + 1); | 953 | priv->membase = devm_ioremap_nocache(&pdev->dev, |
| 947 | if (!priv->membase) { | 954 | netdev->mem_start, mem->end - mem->start + 1); |
| 948 | dev_err(&pdev->dev, "cannot remap memory space\n"); | 955 | if (!priv->membase) { |
| 949 | ret = -ENXIO; | 956 | dev_err(&pdev->dev, "cannot remap memory space\n"); |
| 950 | goto error; | 957 | ret = -ENXIO; |
| 958 | goto error; | ||
| 959 | } | ||
| 960 | } else { | ||
| 961 | /* Allocate buffer memory */ | ||
| 962 | priv->membase = dma_alloc_coherent(NULL, | ||
| 963 | buffer_size, (void *)&netdev->mem_start, | ||
| 964 | GFP_KERNEL); | ||
| 965 | if (!priv->membase) { | ||
| 966 | dev_err(&pdev->dev, "cannot allocate %dB buffer\n", | ||
| 967 | buffer_size); | ||
| 968 | ret = -ENOMEM; | ||
| 969 | goto error; | ||
| 970 | } | ||
| 971 | netdev->mem_end = netdev->mem_start + buffer_size; | ||
| 972 | priv->dma_alloc = buffer_size; | ||
| 951 | } | 973 | } |
| 952 | 974 | ||
| 953 | /* Allow the platform setup code to pass in a MAC address. */ | 975 | /* Allow the platform setup code to pass in a MAC address. */ |
| @@ -1034,6 +1056,9 @@ free_mdio: | |||
| 1034 | kfree(priv->mdio->irq); | 1056 | kfree(priv->mdio->irq); |
| 1035 | mdiobus_free(priv->mdio); | 1057 | mdiobus_free(priv->mdio); |
| 1036 | free: | 1058 | free: |
| 1059 | if (priv->dma_alloc) | ||
| 1060 | dma_free_coherent(NULL, priv->dma_alloc, priv->membase, | ||
| 1061 | netdev->mem_start); | ||
| 1037 | free_netdev(netdev); | 1062 | free_netdev(netdev); |
| 1038 | out: | 1063 | out: |
| 1039 | return ret; | 1064 | return ret; |
| @@ -1059,7 +1084,9 @@ static int ethoc_remove(struct platform_device *pdev) | |||
| 1059 | kfree(priv->mdio->irq); | 1084 | kfree(priv->mdio->irq); |
| 1060 | mdiobus_free(priv->mdio); | 1085 | mdiobus_free(priv->mdio); |
| 1061 | } | 1086 | } |
| 1062 | 1087 | if (priv->dma_alloc) | |
| 1088 | dma_free_coherent(NULL, priv->dma_alloc, priv->membase, | ||
| 1089 | netdev->mem_start); | ||
| 1063 | unregister_netdev(netdev); | 1090 | unregister_netdev(netdev); |
| 1064 | free_netdev(netdev); | 1091 | free_netdev(netdev); |
| 1065 | } | 1092 | } |
diff --git a/drivers/net/ixgbe/ixgbe_82599.c b/drivers/net/ixgbe/ixgbe_82599.c index 2ec58dcdb82b..34b04924c8a1 100644 --- a/drivers/net/ixgbe/ixgbe_82599.c +++ b/drivers/net/ixgbe/ixgbe_82599.c | |||
| @@ -330,6 +330,8 @@ static enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw) | |||
| 330 | 330 | ||
| 331 | switch (hw->device_id) { | 331 | switch (hw->device_id) { |
| 332 | case IXGBE_DEV_ID_82599_KX4: | 332 | case IXGBE_DEV_ID_82599_KX4: |
| 333 | case IXGBE_DEV_ID_82599_KX4_MEZZ: | ||
| 334 | case IXGBE_DEV_ID_82599_COMBO_BACKPLANE: | ||
| 333 | case IXGBE_DEV_ID_82599_XAUI_LOM: | 335 | case IXGBE_DEV_ID_82599_XAUI_LOM: |
| 334 | /* Default device ID is mezzanine card KX/KX4 */ | 336 | /* Default device ID is mezzanine card KX/KX4 */ |
| 335 | media_type = ixgbe_media_type_backplane; | 337 | media_type = ixgbe_media_type_backplane; |
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c index 28fbb9d281f9..cbb143ca1eb8 100644 --- a/drivers/net/ixgbe/ixgbe_main.c +++ b/drivers/net/ixgbe/ixgbe_main.c | |||
| @@ -97,8 +97,12 @@ static struct pci_device_id ixgbe_pci_tbl[] = { | |||
| 97 | board_82599 }, | 97 | board_82599 }, |
| 98 | {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP), | 98 | {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP), |
| 99 | board_82599 }, | 99 | board_82599 }, |
| 100 | {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KX4_MEZZ), | ||
| 101 | board_82599 }, | ||
| 100 | {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_CX4), | 102 | {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_CX4), |
| 101 | board_82599 }, | 103 | board_82599 }, |
| 104 | {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_COMBO_BACKPLANE), | ||
| 105 | board_82599 }, | ||
| 102 | 106 | ||
| 103 | /* required last entry */ | 107 | /* required last entry */ |
| 104 | {0, } | 108 | {0, } |
diff --git a/drivers/net/ixgbe/ixgbe_type.h b/drivers/net/ixgbe/ixgbe_type.h index 7c93e923bf2e..ef4bdd58e016 100644 --- a/drivers/net/ixgbe/ixgbe_type.h +++ b/drivers/net/ixgbe/ixgbe_type.h | |||
| @@ -49,9 +49,11 @@ | |||
| 49 | #define IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM 0x10E1 | 49 | #define IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM 0x10E1 |
| 50 | #define IXGBE_DEV_ID_82598EB_XF_LR 0x10F4 | 50 | #define IXGBE_DEV_ID_82598EB_XF_LR 0x10F4 |
| 51 | #define IXGBE_DEV_ID_82599_KX4 0x10F7 | 51 | #define IXGBE_DEV_ID_82599_KX4 0x10F7 |
| 52 | #define IXGBE_DEV_ID_82599_KX4_MEZZ 0x1514 | ||
| 52 | #define IXGBE_DEV_ID_82599_CX4 0x10F9 | 53 | #define IXGBE_DEV_ID_82599_CX4 0x10F9 |
| 53 | #define IXGBE_DEV_ID_82599_SFP 0x10FB | 54 | #define IXGBE_DEV_ID_82599_SFP 0x10FB |
| 54 | #define IXGBE_DEV_ID_82599_XAUI_LOM 0x10FC | 55 | #define IXGBE_DEV_ID_82599_XAUI_LOM 0x10FC |
| 56 | #define IXGBE_DEV_ID_82599_COMBO_BACKPLANE 0x10F8 | ||
| 55 | 57 | ||
| 56 | /* General Registers */ | 58 | /* General Registers */ |
| 57 | #define IXGBE_CTRL 0x00000 | 59 | #define IXGBE_CTRL 0x00000 |
diff --git a/drivers/net/netxen/netxen_nic_main.c b/drivers/net/netxen/netxen_nic_main.c index b5aa974827e5..9b9eab107704 100644 --- a/drivers/net/netxen/netxen_nic_main.c +++ b/drivers/net/netxen/netxen_nic_main.c | |||
| @@ -1714,7 +1714,7 @@ netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev) | |||
| 1714 | /* 4 fragments per cmd des */ | 1714 | /* 4 fragments per cmd des */ |
| 1715 | no_of_desc = (frag_count + 3) >> 2; | 1715 | no_of_desc = (frag_count + 3) >> 2; |
| 1716 | 1716 | ||
| 1717 | if (unlikely(no_of_desc + 2) > netxen_tx_avail(tx_ring)) { | 1717 | if (unlikely(no_of_desc + 2 > netxen_tx_avail(tx_ring))) { |
| 1718 | netif_stop_queue(netdev); | 1718 | netif_stop_queue(netdev); |
| 1719 | return NETDEV_TX_BUSY; | 1719 | return NETDEV_TX_BUSY; |
| 1720 | } | 1720 | } |
diff --git a/drivers/net/pasemi_mac_ethtool.c b/drivers/net/pasemi_mac_ethtool.c index 064a4fe1dd90..28a86224879d 100644 --- a/drivers/net/pasemi_mac_ethtool.c +++ b/drivers/net/pasemi_mac_ethtool.c | |||
| @@ -71,6 +71,9 @@ pasemi_mac_ethtool_get_settings(struct net_device *netdev, | |||
| 71 | struct pasemi_mac *mac = netdev_priv(netdev); | 71 | struct pasemi_mac *mac = netdev_priv(netdev); |
| 72 | struct phy_device *phydev = mac->phydev; | 72 | struct phy_device *phydev = mac->phydev; |
| 73 | 73 | ||
| 74 | if (!phydev) | ||
| 75 | return -EOPNOTSUPP; | ||
| 76 | |||
| 74 | return phy_ethtool_gset(phydev, cmd); | 77 | return phy_ethtool_gset(phydev, cmd); |
| 75 | } | 78 | } |
| 76 | 79 | ||
diff --git a/drivers/net/pcmcia/pcnet_cs.c b/drivers/net/pcmcia/pcnet_cs.c index 474876c879cb..bd3447f04902 100644 --- a/drivers/net/pcmcia/pcnet_cs.c +++ b/drivers/net/pcmcia/pcnet_cs.c | |||
| @@ -1754,14 +1754,14 @@ static struct pcmcia_device_id pcnet_ids[] = { | |||
| 1754 | PCMCIA_PFC_DEVICE_CIS_PROD_ID12(0, "Psion Dacom", "Gold Card V34 Ethernet", 0xf5f025c2, 0x338e8155, "cis/PCMLM28.cis"), | 1754 | PCMCIA_PFC_DEVICE_CIS_PROD_ID12(0, "Psion Dacom", "Gold Card V34 Ethernet", 0xf5f025c2, 0x338e8155, "cis/PCMLM28.cis"), |
| 1755 | PCMCIA_PFC_DEVICE_CIS_PROD_ID12(0, "Psion Dacom", "Gold Card V34 Ethernet GSM", 0xf5f025c2, 0x4ae85d35, "cis/PCMLM28.cis"), | 1755 | PCMCIA_PFC_DEVICE_CIS_PROD_ID12(0, "Psion Dacom", "Gold Card V34 Ethernet GSM", 0xf5f025c2, 0x4ae85d35, "cis/PCMLM28.cis"), |
| 1756 | PCMCIA_PFC_DEVICE_CIS_PROD_ID12(0, "LINKSYS", "PCMLM28", 0xf7cb0b07, 0x66881874, "cis/PCMLM28.cis"), | 1756 | PCMCIA_PFC_DEVICE_CIS_PROD_ID12(0, "LINKSYS", "PCMLM28", 0xf7cb0b07, 0x66881874, "cis/PCMLM28.cis"), |
| 1757 | PCMCIA_MFC_DEVICE_CIS_PROD_ID12(0, "DAYNA COMMUNICATIONS", "LAN AND MODEM MULTIFUNCTION", 0x8fdf8f89, 0xdd5ed9e8, "DP83903.cis"), | 1757 | PCMCIA_MFC_DEVICE_CIS_PROD_ID12(0, "DAYNA COMMUNICATIONS", "LAN AND MODEM MULTIFUNCTION", 0x8fdf8f89, 0xdd5ed9e8, "cis/DP83903.cis"), |
| 1758 | PCMCIA_MFC_DEVICE_CIS_PROD_ID4(0, "NSC MF LAN/Modem", 0x58fc6056, "DP83903.cis"), | 1758 | PCMCIA_MFC_DEVICE_CIS_PROD_ID4(0, "NSC MF LAN/Modem", 0x58fc6056, "cis/DP83903.cis"), |
| 1759 | PCMCIA_MFC_DEVICE_CIS_MANF_CARD(0, 0x0175, 0x0000, "DP83903.cis"), | 1759 | PCMCIA_MFC_DEVICE_CIS_MANF_CARD(0, 0x0175, 0x0000, "cis/DP83903.cis"), |
| 1760 | PCMCIA_DEVICE_CIS_MANF_CARD(0xc00f, 0x0002, "cis/LA-PCM.cis"), | 1760 | PCMCIA_DEVICE_CIS_MANF_CARD(0xc00f, 0x0002, "cis/LA-PCM.cis"), |
| 1761 | PCMCIA_DEVICE_CIS_PROD_ID12("KTI", "PE520 PLUS", 0xad180345, 0x9d58d392, "PE520.cis"), | 1761 | PCMCIA_DEVICE_CIS_PROD_ID12("KTI", "PE520 PLUS", 0xad180345, 0x9d58d392, "PE520.cis"), |
| 1762 | PCMCIA_DEVICE_CIS_PROD_ID12("NDC", "Ethernet", 0x01c43ae1, 0x00b2e941, "NE2K.cis"), | 1762 | PCMCIA_DEVICE_CIS_PROD_ID12("NDC", "Ethernet", 0x01c43ae1, 0x00b2e941, "cis/NE2K.cis"), |
| 1763 | PCMCIA_DEVICE_CIS_PROD_ID12("PMX ", "PE-200", 0x34f3f1c8, 0x10b59f8c, "PE-200.cis"), | 1763 | PCMCIA_DEVICE_CIS_PROD_ID12("PMX ", "PE-200", 0x34f3f1c8, 0x10b59f8c, "PE-200.cis"), |
| 1764 | PCMCIA_DEVICE_CIS_PROD_ID12("TAMARACK", "Ethernet", 0xcf434fba, 0x00b2e941, "tamarack.cis"), | 1764 | PCMCIA_DEVICE_CIS_PROD_ID12("TAMARACK", "Ethernet", 0xcf434fba, 0x00b2e941, "cis/tamarack.cis"), |
| 1765 | PCMCIA_DEVICE_PROD_ID12("Ethernet", "CF Size PC Card", 0x00b2e941, 0x43ac239b), | 1765 | PCMCIA_DEVICE_PROD_ID12("Ethernet", "CF Size PC Card", 0x00b2e941, 0x43ac239b), |
| 1766 | PCMCIA_DEVICE_PROD_ID123("Fast Ethernet", "CF Size PC Card", "1.0", | 1766 | PCMCIA_DEVICE_PROD_ID123("Fast Ethernet", "CF Size PC Card", "1.0", |
| 1767 | 0xb4be14e3, 0x43ac239b, 0x0877b627), | 1767 | 0xb4be14e3, 0x43ac239b, 0x0877b627), |
diff --git a/drivers/net/qlge/qlge.h b/drivers/net/qlge/qlge.h index 30d5585beeee..3ec6e85587a2 100644 --- a/drivers/net/qlge/qlge.h +++ b/drivers/net/qlge/qlge.h | |||
| @@ -9,6 +9,7 @@ | |||
| 9 | 9 | ||
| 10 | #include <linux/pci.h> | 10 | #include <linux/pci.h> |
| 11 | #include <linux/netdevice.h> | 11 | #include <linux/netdevice.h> |
| 12 | #include <linux/rtnetlink.h> | ||
| 12 | 13 | ||
| 13 | /* | 14 | /* |
| 14 | * General definitions... | 15 | * General definitions... |
| @@ -135,9 +136,9 @@ enum { | |||
| 135 | RST_FO_TFO = (1 << 0), | 136 | RST_FO_TFO = (1 << 0), |
| 136 | RST_FO_RR_MASK = 0x00060000, | 137 | RST_FO_RR_MASK = 0x00060000, |
| 137 | RST_FO_RR_CQ_CAM = 0x00000000, | 138 | RST_FO_RR_CQ_CAM = 0x00000000, |
| 138 | RST_FO_RR_DROP = 0x00000001, | 139 | RST_FO_RR_DROP = 0x00000002, |
| 139 | RST_FO_RR_DQ = 0x00000002, | 140 | RST_FO_RR_DQ = 0x00000004, |
| 140 | RST_FO_RR_RCV_FUNC_CQ = 0x00000003, | 141 | RST_FO_RR_RCV_FUNC_CQ = 0x00000006, |
| 141 | RST_FO_FRB = (1 << 12), | 142 | RST_FO_FRB = (1 << 12), |
| 142 | RST_FO_MOP = (1 << 13), | 143 | RST_FO_MOP = (1 << 13), |
| 143 | RST_FO_REG = (1 << 14), | 144 | RST_FO_REG = (1 << 14), |
| @@ -1477,7 +1478,6 @@ struct ql_adapter { | |||
| 1477 | u32 mailbox_in; | 1478 | u32 mailbox_in; |
| 1478 | u32 mailbox_out; | 1479 | u32 mailbox_out; |
| 1479 | struct mbox_params idc_mbc; | 1480 | struct mbox_params idc_mbc; |
| 1480 | struct mutex mpi_mutex; | ||
| 1481 | 1481 | ||
| 1482 | int tx_ring_size; | 1482 | int tx_ring_size; |
| 1483 | int rx_ring_size; | 1483 | int rx_ring_size; |
diff --git a/drivers/net/qlge/qlge_ethtool.c b/drivers/net/qlge/qlge_ethtool.c index 68f9bd280f86..52073946bce3 100644 --- a/drivers/net/qlge/qlge_ethtool.c +++ b/drivers/net/qlge/qlge_ethtool.c | |||
| @@ -45,7 +45,6 @@ static int ql_update_ring_coalescing(struct ql_adapter *qdev) | |||
| 45 | if (!netif_running(qdev->ndev)) | 45 | if (!netif_running(qdev->ndev)) |
| 46 | return status; | 46 | return status; |
| 47 | 47 | ||
| 48 | spin_lock(&qdev->hw_lock); | ||
| 49 | /* Skip the default queue, and update the outbound handler | 48 | /* Skip the default queue, and update the outbound handler |
| 50 | * queues if they changed. | 49 | * queues if they changed. |
| 51 | */ | 50 | */ |
| @@ -92,7 +91,6 @@ static int ql_update_ring_coalescing(struct ql_adapter *qdev) | |||
| 92 | } | 91 | } |
| 93 | } | 92 | } |
| 94 | exit: | 93 | exit: |
| 95 | spin_unlock(&qdev->hw_lock); | ||
| 96 | return status; | 94 | return status; |
| 97 | } | 95 | } |
| 98 | 96 | ||
diff --git a/drivers/net/qlge/qlge_main.c b/drivers/net/qlge/qlge_main.c index 3d0efea32111..61680715cde0 100644 --- a/drivers/net/qlge/qlge_main.c +++ b/drivers/net/qlge/qlge_main.c | |||
| @@ -34,7 +34,6 @@ | |||
| 34 | #include <linux/etherdevice.h> | 34 | #include <linux/etherdevice.h> |
| 35 | #include <linux/ethtool.h> | 35 | #include <linux/ethtool.h> |
| 36 | #include <linux/skbuff.h> | 36 | #include <linux/skbuff.h> |
| 37 | #include <linux/rtnetlink.h> | ||
| 38 | #include <linux/if_vlan.h> | 37 | #include <linux/if_vlan.h> |
| 39 | #include <linux/delay.h> | 38 | #include <linux/delay.h> |
| 40 | #include <linux/mm.h> | 39 | #include <linux/mm.h> |
| @@ -1926,12 +1925,10 @@ static void ql_vlan_rx_add_vid(struct net_device *ndev, u16 vid) | |||
| 1926 | status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK); | 1925 | status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK); |
| 1927 | if (status) | 1926 | if (status) |
| 1928 | return; | 1927 | return; |
| 1929 | spin_lock(&qdev->hw_lock); | ||
| 1930 | if (ql_set_mac_addr_reg | 1928 | if (ql_set_mac_addr_reg |
| 1931 | (qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) { | 1929 | (qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) { |
| 1932 | QPRINTK(qdev, IFUP, ERR, "Failed to init vlan address.\n"); | 1930 | QPRINTK(qdev, IFUP, ERR, "Failed to init vlan address.\n"); |
| 1933 | } | 1931 | } |
| 1934 | spin_unlock(&qdev->hw_lock); | ||
| 1935 | ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK); | 1932 | ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK); |
| 1936 | } | 1933 | } |
| 1937 | 1934 | ||
| @@ -1945,12 +1942,10 @@ static void ql_vlan_rx_kill_vid(struct net_device *ndev, u16 vid) | |||
| 1945 | if (status) | 1942 | if (status) |
| 1946 | return; | 1943 | return; |
| 1947 | 1944 | ||
| 1948 | spin_lock(&qdev->hw_lock); | ||
| 1949 | if (ql_set_mac_addr_reg | 1945 | if (ql_set_mac_addr_reg |
| 1950 | (qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) { | 1946 | (qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) { |
| 1951 | QPRINTK(qdev, IFUP, ERR, "Failed to clear vlan address.\n"); | 1947 | QPRINTK(qdev, IFUP, ERR, "Failed to clear vlan address.\n"); |
| 1952 | } | 1948 | } |
| 1953 | spin_unlock(&qdev->hw_lock); | ||
| 1954 | ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK); | 1949 | ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK); |
| 1955 | 1950 | ||
| 1956 | } | 1951 | } |
| @@ -2001,15 +1996,17 @@ static irqreturn_t qlge_isr(int irq, void *dev_id) | |||
| 2001 | /* | 1996 | /* |
| 2002 | * Check MPI processor activity. | 1997 | * Check MPI processor activity. |
| 2003 | */ | 1998 | */ |
| 2004 | if (var & STS_PI) { | 1999 | if ((var & STS_PI) && |
| 2000 | (ql_read32(qdev, INTR_MASK) & INTR_MASK_PI)) { | ||
| 2005 | /* | 2001 | /* |
| 2006 | * We've got an async event or mailbox completion. | 2002 | * We've got an async event or mailbox completion. |
| 2007 | * Handle it and clear the source of the interrupt. | 2003 | * Handle it and clear the source of the interrupt. |
| 2008 | */ | 2004 | */ |
| 2009 | QPRINTK(qdev, INTR, ERR, "Got MPI processor interrupt.\n"); | 2005 | QPRINTK(qdev, INTR, ERR, "Got MPI processor interrupt.\n"); |
| 2010 | ql_disable_completion_interrupt(qdev, intr_context->intr); | 2006 | ql_disable_completion_interrupt(qdev, intr_context->intr); |
| 2011 | queue_delayed_work_on(smp_processor_id(), qdev->workqueue, | 2007 | ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16)); |
| 2012 | &qdev->mpi_work, 0); | 2008 | queue_delayed_work_on(smp_processor_id(), |
| 2009 | qdev->workqueue, &qdev->mpi_work, 0); | ||
| 2013 | work_done++; | 2010 | work_done++; |
| 2014 | } | 2011 | } |
| 2015 | 2012 | ||
| @@ -3585,7 +3582,6 @@ static void qlge_set_multicast_list(struct net_device *ndev) | |||
| 3585 | status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK); | 3582 | status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK); |
| 3586 | if (status) | 3583 | if (status) |
| 3587 | return; | 3584 | return; |
| 3588 | spin_lock(&qdev->hw_lock); | ||
| 3589 | /* | 3585 | /* |
| 3590 | * Set or clear promiscuous mode if a | 3586 | * Set or clear promiscuous mode if a |
| 3591 | * transition is taking place. | 3587 | * transition is taking place. |
| @@ -3662,7 +3658,6 @@ static void qlge_set_multicast_list(struct net_device *ndev) | |||
| 3662 | } | 3658 | } |
| 3663 | } | 3659 | } |
| 3664 | exit: | 3660 | exit: |
| 3665 | spin_unlock(&qdev->hw_lock); | ||
| 3666 | ql_sem_unlock(qdev, SEM_RT_IDX_MASK); | 3661 | ql_sem_unlock(qdev, SEM_RT_IDX_MASK); |
| 3667 | } | 3662 | } |
| 3668 | 3663 | ||
| @@ -3682,10 +3677,8 @@ static int qlge_set_mac_address(struct net_device *ndev, void *p) | |||
| 3682 | status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK); | 3677 | status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK); |
| 3683 | if (status) | 3678 | if (status) |
| 3684 | return status; | 3679 | return status; |
| 3685 | spin_lock(&qdev->hw_lock); | ||
| 3686 | status = ql_set_mac_addr_reg(qdev, (u8 *) ndev->dev_addr, | 3680 | status = ql_set_mac_addr_reg(qdev, (u8 *) ndev->dev_addr, |
| 3687 | MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ); | 3681 | MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ); |
| 3688 | spin_unlock(&qdev->hw_lock); | ||
| 3689 | if (status) | 3682 | if (status) |
| 3690 | QPRINTK(qdev, HW, ERR, "Failed to load MAC address.\n"); | 3683 | QPRINTK(qdev, HW, ERR, "Failed to load MAC address.\n"); |
| 3691 | ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK); | 3684 | ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK); |
| @@ -3928,7 +3921,6 @@ static int __devinit ql_init_device(struct pci_dev *pdev, | |||
| 3928 | INIT_DELAYED_WORK(&qdev->mpi_work, ql_mpi_work); | 3921 | INIT_DELAYED_WORK(&qdev->mpi_work, ql_mpi_work); |
| 3929 | INIT_DELAYED_WORK(&qdev->mpi_port_cfg_work, ql_mpi_port_cfg_work); | 3922 | INIT_DELAYED_WORK(&qdev->mpi_port_cfg_work, ql_mpi_port_cfg_work); |
| 3930 | INIT_DELAYED_WORK(&qdev->mpi_idc_work, ql_mpi_idc_work); | 3923 | INIT_DELAYED_WORK(&qdev->mpi_idc_work, ql_mpi_idc_work); |
| 3931 | mutex_init(&qdev->mpi_mutex); | ||
| 3932 | init_completion(&qdev->ide_completion); | 3924 | init_completion(&qdev->ide_completion); |
| 3933 | 3925 | ||
| 3934 | if (!cards_found) { | 3926 | if (!cards_found) { |
diff --git a/drivers/net/qlge/qlge_mpi.c b/drivers/net/qlge/qlge_mpi.c index 6685bd97da91..c2e43073047e 100644 --- a/drivers/net/qlge/qlge_mpi.c +++ b/drivers/net/qlge/qlge_mpi.c | |||
| @@ -472,7 +472,6 @@ static int ql_mailbox_command(struct ql_adapter *qdev, struct mbox_params *mbcp) | |||
| 472 | { | 472 | { |
| 473 | int status, count; | 473 | int status, count; |
| 474 | 474 | ||
| 475 | mutex_lock(&qdev->mpi_mutex); | ||
| 476 | 475 | ||
| 477 | /* Begin polled mode for MPI */ | 476 | /* Begin polled mode for MPI */ |
| 478 | ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16)); | 477 | ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16)); |
| @@ -541,7 +540,6 @@ static int ql_mailbox_command(struct ql_adapter *qdev, struct mbox_params *mbcp) | |||
| 541 | status = -EIO; | 540 | status = -EIO; |
| 542 | } | 541 | } |
| 543 | end: | 542 | end: |
| 544 | mutex_unlock(&qdev->mpi_mutex); | ||
| 545 | /* End polled mode for MPI */ | 543 | /* End polled mode for MPI */ |
| 546 | ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI); | 544 | ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI); |
| 547 | return status; | 545 | return status; |
| @@ -776,7 +774,9 @@ static int ql_idc_wait(struct ql_adapter *qdev) | |||
| 776 | static int ql_set_port_cfg(struct ql_adapter *qdev) | 774 | static int ql_set_port_cfg(struct ql_adapter *qdev) |
| 777 | { | 775 | { |
| 778 | int status; | 776 | int status; |
| 777 | rtnl_lock(); | ||
| 779 | status = ql_mb_set_port_cfg(qdev); | 778 | status = ql_mb_set_port_cfg(qdev); |
| 779 | rtnl_unlock(); | ||
| 780 | if (status) | 780 | if (status) |
| 781 | return status; | 781 | return status; |
| 782 | status = ql_idc_wait(qdev); | 782 | status = ql_idc_wait(qdev); |
| @@ -797,7 +797,9 @@ void ql_mpi_port_cfg_work(struct work_struct *work) | |||
| 797 | container_of(work, struct ql_adapter, mpi_port_cfg_work.work); | 797 | container_of(work, struct ql_adapter, mpi_port_cfg_work.work); |
| 798 | int status; | 798 | int status; |
| 799 | 799 | ||
| 800 | rtnl_lock(); | ||
| 800 | status = ql_mb_get_port_cfg(qdev); | 801 | status = ql_mb_get_port_cfg(qdev); |
| 802 | rtnl_unlock(); | ||
| 801 | if (status) { | 803 | if (status) { |
| 802 | QPRINTK(qdev, DRV, ERR, | 804 | QPRINTK(qdev, DRV, ERR, |
| 803 | "Bug: Failed to get port config data.\n"); | 805 | "Bug: Failed to get port config data.\n"); |
| @@ -855,7 +857,9 @@ void ql_mpi_idc_work(struct work_struct *work) | |||
| 855 | * needs to be set. | 857 | * needs to be set. |
| 856 | * */ | 858 | * */ |
| 857 | set_bit(QL_CAM_RT_SET, &qdev->flags); | 859 | set_bit(QL_CAM_RT_SET, &qdev->flags); |
| 860 | rtnl_lock(); | ||
| 858 | status = ql_mb_idc_ack(qdev); | 861 | status = ql_mb_idc_ack(qdev); |
| 862 | rtnl_unlock(); | ||
| 859 | if (status) { | 863 | if (status) { |
| 860 | QPRINTK(qdev, DRV, ERR, | 864 | QPRINTK(qdev, DRV, ERR, |
| 861 | "Bug: No pending IDC!\n"); | 865 | "Bug: No pending IDC!\n"); |
| @@ -871,7 +875,7 @@ void ql_mpi_work(struct work_struct *work) | |||
| 871 | struct mbox_params *mbcp = &mbc; | 875 | struct mbox_params *mbcp = &mbc; |
| 872 | int err = 0; | 876 | int err = 0; |
| 873 | 877 | ||
| 874 | mutex_lock(&qdev->mpi_mutex); | 878 | rtnl_lock(); |
| 875 | 879 | ||
| 876 | while (ql_read32(qdev, STS) & STS_PI) { | 880 | while (ql_read32(qdev, STS) & STS_PI) { |
| 877 | memset(mbcp, 0, sizeof(struct mbox_params)); | 881 | memset(mbcp, 0, sizeof(struct mbox_params)); |
| @@ -884,7 +888,7 @@ void ql_mpi_work(struct work_struct *work) | |||
| 884 | break; | 888 | break; |
| 885 | } | 889 | } |
| 886 | 890 | ||
| 887 | mutex_unlock(&qdev->mpi_mutex); | 891 | rtnl_unlock(); |
| 888 | ql_enable_completion_interrupt(qdev, 0); | 892 | ql_enable_completion_interrupt(qdev, 0); |
| 889 | } | 893 | } |
| 890 | 894 | ||
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c index f09bc5dfe8b2..ba5d3fe753b6 100644 --- a/drivers/net/tg3.c +++ b/drivers/net/tg3.c | |||
| @@ -902,11 +902,12 @@ static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg) | |||
| 902 | struct tg3 *tp = bp->priv; | 902 | struct tg3 *tp = bp->priv; |
| 903 | u32 val; | 903 | u32 val; |
| 904 | 904 | ||
| 905 | if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_PAUSED) | 905 | spin_lock_bh(&tp->lock); |
| 906 | return -EAGAIN; | ||
| 907 | 906 | ||
| 908 | if (tg3_readphy(tp, reg, &val)) | 907 | if (tg3_readphy(tp, reg, &val)) |
| 909 | return -EIO; | 908 | val = -EIO; |
| 909 | |||
| 910 | spin_unlock_bh(&tp->lock); | ||
| 910 | 911 | ||
| 911 | return val; | 912 | return val; |
| 912 | } | 913 | } |
| @@ -914,14 +915,16 @@ static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg) | |||
| 914 | static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val) | 915 | static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val) |
| 915 | { | 916 | { |
| 916 | struct tg3 *tp = bp->priv; | 917 | struct tg3 *tp = bp->priv; |
| 918 | u32 ret = 0; | ||
| 917 | 919 | ||
| 918 | if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_PAUSED) | 920 | spin_lock_bh(&tp->lock); |
| 919 | return -EAGAIN; | ||
| 920 | 921 | ||
| 921 | if (tg3_writephy(tp, reg, val)) | 922 | if (tg3_writephy(tp, reg, val)) |
| 922 | return -EIO; | 923 | ret = -EIO; |
| 923 | 924 | ||
| 924 | return 0; | 925 | spin_unlock_bh(&tp->lock); |
| 926 | |||
| 927 | return ret; | ||
| 925 | } | 928 | } |
| 926 | 929 | ||
| 927 | static int tg3_mdio_reset(struct mii_bus *bp) | 930 | static int tg3_mdio_reset(struct mii_bus *bp) |
| @@ -1011,12 +1014,6 @@ static void tg3_mdio_config_5785(struct tg3 *tp) | |||
| 1011 | 1014 | ||
| 1012 | static void tg3_mdio_start(struct tg3 *tp) | 1015 | static void tg3_mdio_start(struct tg3 *tp) |
| 1013 | { | 1016 | { |
| 1014 | if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) { | ||
| 1015 | mutex_lock(&tp->mdio_bus->mdio_lock); | ||
| 1016 | tp->tg3_flags3 &= ~TG3_FLG3_MDIOBUS_PAUSED; | ||
| 1017 | mutex_unlock(&tp->mdio_bus->mdio_lock); | ||
| 1018 | } | ||
| 1019 | |||
| 1020 | tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL; | 1017 | tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL; |
| 1021 | tw32_f(MAC_MI_MODE, tp->mi_mode); | 1018 | tw32_f(MAC_MI_MODE, tp->mi_mode); |
| 1022 | udelay(80); | 1019 | udelay(80); |
| @@ -1041,15 +1038,6 @@ static void tg3_mdio_start(struct tg3 *tp) | |||
| 1041 | tg3_mdio_config_5785(tp); | 1038 | tg3_mdio_config_5785(tp); |
| 1042 | } | 1039 | } |
| 1043 | 1040 | ||
| 1044 | static void tg3_mdio_stop(struct tg3 *tp) | ||
| 1045 | { | ||
| 1046 | if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) { | ||
| 1047 | mutex_lock(&tp->mdio_bus->mdio_lock); | ||
| 1048 | tp->tg3_flags3 |= TG3_FLG3_MDIOBUS_PAUSED; | ||
| 1049 | mutex_unlock(&tp->mdio_bus->mdio_lock); | ||
| 1050 | } | ||
| 1051 | } | ||
| 1052 | |||
| 1053 | static int tg3_mdio_init(struct tg3 *tp) | 1041 | static int tg3_mdio_init(struct tg3 *tp) |
| 1054 | { | 1042 | { |
| 1055 | int i; | 1043 | int i; |
| @@ -1141,7 +1129,6 @@ static void tg3_mdio_fini(struct tg3 *tp) | |||
| 1141 | tp->tg3_flags3 &= ~TG3_FLG3_MDIOBUS_INITED; | 1129 | tp->tg3_flags3 &= ~TG3_FLG3_MDIOBUS_INITED; |
| 1142 | mdiobus_unregister(tp->mdio_bus); | 1130 | mdiobus_unregister(tp->mdio_bus); |
| 1143 | mdiobus_free(tp->mdio_bus); | 1131 | mdiobus_free(tp->mdio_bus); |
| 1144 | tp->tg3_flags3 &= ~TG3_FLG3_MDIOBUS_PAUSED; | ||
| 1145 | } | 1132 | } |
| 1146 | } | 1133 | } |
| 1147 | 1134 | ||
| @@ -1363,7 +1350,7 @@ static void tg3_adjust_link(struct net_device *dev) | |||
| 1363 | struct tg3 *tp = netdev_priv(dev); | 1350 | struct tg3 *tp = netdev_priv(dev); |
| 1364 | struct phy_device *phydev = tp->mdio_bus->phy_map[PHY_ADDR]; | 1351 | struct phy_device *phydev = tp->mdio_bus->phy_map[PHY_ADDR]; |
| 1365 | 1352 | ||
| 1366 | spin_lock(&tp->lock); | 1353 | spin_lock_bh(&tp->lock); |
| 1367 | 1354 | ||
| 1368 | mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK | | 1355 | mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK | |
| 1369 | MAC_MODE_HALF_DUPLEX); | 1356 | MAC_MODE_HALF_DUPLEX); |
| @@ -1431,7 +1418,7 @@ static void tg3_adjust_link(struct net_device *dev) | |||
| 1431 | tp->link_config.active_speed = phydev->speed; | 1418 | tp->link_config.active_speed = phydev->speed; |
| 1432 | tp->link_config.active_duplex = phydev->duplex; | 1419 | tp->link_config.active_duplex = phydev->duplex; |
| 1433 | 1420 | ||
| 1434 | spin_unlock(&tp->lock); | 1421 | spin_unlock_bh(&tp->lock); |
| 1435 | 1422 | ||
| 1436 | if (linkmesg) | 1423 | if (linkmesg) |
| 1437 | tg3_link_report(tp); | 1424 | tg3_link_report(tp); |
| @@ -6392,8 +6379,6 @@ static int tg3_chip_reset(struct tg3 *tp) | |||
| 6392 | 6379 | ||
| 6393 | tg3_nvram_lock(tp); | 6380 | tg3_nvram_lock(tp); |
| 6394 | 6381 | ||
| 6395 | tg3_mdio_stop(tp); | ||
| 6396 | |||
| 6397 | tg3_ape_lock(tp, TG3_APE_LOCK_GRC); | 6382 | tg3_ape_lock(tp, TG3_APE_LOCK_GRC); |
| 6398 | 6383 | ||
| 6399 | /* No matching tg3_nvram_unlock() after this because | 6384 | /* No matching tg3_nvram_unlock() after this because |
| @@ -8698,6 +8683,8 @@ static int tg3_close(struct net_device *dev) | |||
| 8698 | 8683 | ||
| 8699 | del_timer_sync(&tp->timer); | 8684 | del_timer_sync(&tp->timer); |
| 8700 | 8685 | ||
| 8686 | tg3_phy_stop(tp); | ||
| 8687 | |||
| 8701 | tg3_full_lock(tp, 1); | 8688 | tg3_full_lock(tp, 1); |
| 8702 | #if 0 | 8689 | #if 0 |
| 8703 | tg3_dump_state(tp); | 8690 | tg3_dump_state(tp); |
diff --git a/drivers/net/tg3.h b/drivers/net/tg3.h index 524691cd9896..bab7940158e6 100644 --- a/drivers/net/tg3.h +++ b/drivers/net/tg3.h | |||
| @@ -2748,7 +2748,6 @@ struct tg3 { | |||
| 2748 | #define TG3_FLG3_5701_DMA_BUG 0x00000008 | 2748 | #define TG3_FLG3_5701_DMA_BUG 0x00000008 |
| 2749 | #define TG3_FLG3_USE_PHYLIB 0x00000010 | 2749 | #define TG3_FLG3_USE_PHYLIB 0x00000010 |
| 2750 | #define TG3_FLG3_MDIOBUS_INITED 0x00000020 | 2750 | #define TG3_FLG3_MDIOBUS_INITED 0x00000020 |
| 2751 | #define TG3_FLG3_MDIOBUS_PAUSED 0x00000040 | ||
| 2752 | #define TG3_FLG3_PHY_CONNECTED 0x00000080 | 2751 | #define TG3_FLG3_PHY_CONNECTED 0x00000080 |
| 2753 | #define TG3_FLG3_RGMII_STD_IBND_DISABLE 0x00000100 | 2752 | #define TG3_FLG3_RGMII_STD_IBND_DISABLE 0x00000100 |
| 2754 | #define TG3_FLG3_RGMII_EXT_IBND_RX_EN 0x00000200 | 2753 | #define TG3_FLG3_RGMII_EXT_IBND_RX_EN 0x00000200 |
diff --git a/drivers/net/usb/rndis_host.c b/drivers/net/usb/rndis_host.c index d032bba9bc4c..0caa8008c51c 100644 --- a/drivers/net/usb/rndis_host.c +++ b/drivers/net/usb/rndis_host.c | |||
| @@ -418,6 +418,7 @@ generic_rndis_bind(struct usbnet *dev, struct usb_interface *intf, int flags) | |||
| 418 | goto halt_fail_and_release; | 418 | goto halt_fail_and_release; |
| 419 | } | 419 | } |
| 420 | memcpy(net->dev_addr, bp, ETH_ALEN); | 420 | memcpy(net->dev_addr, bp, ETH_ALEN); |
| 421 | memcpy(net->perm_addr, bp, ETH_ALEN); | ||
| 421 | 422 | ||
| 422 | /* set a nonzero filter to enable data transfers */ | 423 | /* set a nonzero filter to enable data transfers */ |
| 423 | memset(u.set, 0, sizeof *u.set); | 424 | memset(u.set, 0, sizeof *u.set); |
diff --git a/drivers/serial/serial_cs.c b/drivers/serial/serial_cs.c index a3bb49031a7f..ff4617e21426 100644 --- a/drivers/serial/serial_cs.c +++ b/drivers/serial/serial_cs.c | |||
| @@ -873,10 +873,10 @@ static struct pcmcia_device_id serial_ids[] = { | |||
| 873 | PCMCIA_PFC_DEVICE_CIS_PROD_ID12(1, "Psion Dacom", "Gold Card V34 Ethernet", 0xf5f025c2, 0x338e8155, "cis/PCMLM28.cis"), | 873 | PCMCIA_PFC_DEVICE_CIS_PROD_ID12(1, "Psion Dacom", "Gold Card V34 Ethernet", 0xf5f025c2, 0x338e8155, "cis/PCMLM28.cis"), |
| 874 | PCMCIA_PFC_DEVICE_CIS_PROD_ID12(1, "Psion Dacom", "Gold Card V34 Ethernet GSM", 0xf5f025c2, 0x4ae85d35, "cis/PCMLM28.cis"), | 874 | PCMCIA_PFC_DEVICE_CIS_PROD_ID12(1, "Psion Dacom", "Gold Card V34 Ethernet GSM", 0xf5f025c2, 0x4ae85d35, "cis/PCMLM28.cis"), |
| 875 | PCMCIA_PFC_DEVICE_CIS_PROD_ID12(1, "LINKSYS", "PCMLM28", 0xf7cb0b07, 0x66881874, "cis/PCMLM28.cis"), | 875 | PCMCIA_PFC_DEVICE_CIS_PROD_ID12(1, "LINKSYS", "PCMLM28", 0xf7cb0b07, 0x66881874, "cis/PCMLM28.cis"), |
| 876 | PCMCIA_MFC_DEVICE_CIS_PROD_ID12(1, "DAYNA COMMUNICATIONS", "LAN AND MODEM MULTIFUNCTION", 0x8fdf8f89, 0xdd5ed9e8, "DP83903.cis"), | 876 | PCMCIA_MFC_DEVICE_CIS_PROD_ID12(1, "DAYNA COMMUNICATIONS", "LAN AND MODEM MULTIFUNCTION", 0x8fdf8f89, 0xdd5ed9e8, "cis/DP83903.cis"), |
| 877 | PCMCIA_MFC_DEVICE_CIS_PROD_ID4(1, "NSC MF LAN/Modem", 0x58fc6056, "DP83903.cis"), | 877 | PCMCIA_MFC_DEVICE_CIS_PROD_ID4(1, "NSC MF LAN/Modem", 0x58fc6056, "cis/DP83903.cis"), |
| 878 | PCMCIA_MFC_DEVICE_CIS_MANF_CARD(1, 0x0101, 0x0556, "cis/3CCFEM556.cis"), | 878 | PCMCIA_MFC_DEVICE_CIS_MANF_CARD(1, 0x0101, 0x0556, "cis/3CCFEM556.cis"), |
| 879 | PCMCIA_MFC_DEVICE_CIS_MANF_CARD(1, 0x0175, 0x0000, "DP83903.cis"), | 879 | PCMCIA_MFC_DEVICE_CIS_MANF_CARD(1, 0x0175, 0x0000, "cis/DP83903.cis"), |
| 880 | PCMCIA_MFC_DEVICE_CIS_MANF_CARD(1, 0x0101, 0x0035, "cis/3CXEM556.cis"), | 880 | PCMCIA_MFC_DEVICE_CIS_MANF_CARD(1, 0x0101, 0x0035, "cis/3CXEM556.cis"), |
| 881 | PCMCIA_MFC_DEVICE_CIS_MANF_CARD(1, 0x0101, 0x003d, "cis/3CXEM556.cis"), | 881 | PCMCIA_MFC_DEVICE_CIS_MANF_CARD(1, 0x0101, 0x003d, "cis/3CXEM556.cis"), |
| 882 | PCMCIA_DEVICE_CIS_PROD_ID12("Sierra Wireless", "AC850", 0xd85f6206, 0x42a2c018, "SW_8xx_SER.cis"), /* Sierra Wireless AC850 3G Network Adapter R1 */ | 882 | PCMCIA_DEVICE_CIS_PROD_ID12("Sierra Wireless", "AC850", 0xd85f6206, 0x42a2c018, "SW_8xx_SER.cis"), /* Sierra Wireless AC850 3G Network Adapter R1 */ |
| @@ -884,9 +884,9 @@ static struct pcmcia_device_id serial_ids[] = { | |||
| 884 | PCMCIA_DEVICE_CIS_MANF_CARD(0x0192, 0xa555, "SW_555_SER.cis"), /* Sierra Aircard 555 CDMA 1xrtt Modem -- pre update */ | 884 | PCMCIA_DEVICE_CIS_MANF_CARD(0x0192, 0xa555, "SW_555_SER.cis"), /* Sierra Aircard 555 CDMA 1xrtt Modem -- pre update */ |
| 885 | PCMCIA_DEVICE_CIS_MANF_CARD(0x013f, 0xa555, "SW_555_SER.cis"), /* Sierra Aircard 555 CDMA 1xrtt Modem -- post update */ | 885 | PCMCIA_DEVICE_CIS_MANF_CARD(0x013f, 0xa555, "SW_555_SER.cis"), /* Sierra Aircard 555 CDMA 1xrtt Modem -- post update */ |
| 886 | PCMCIA_DEVICE_CIS_PROD_ID12("MultiTech", "PCMCIA 56K DataFax", 0x842047ee, 0xc2efcf03, "cis/MT5634ZLX.cis"), | 886 | PCMCIA_DEVICE_CIS_PROD_ID12("MultiTech", "PCMCIA 56K DataFax", 0x842047ee, 0xc2efcf03, "cis/MT5634ZLX.cis"), |
| 887 | PCMCIA_DEVICE_CIS_PROD_ID12("ADVANTECH", "COMpad-32/85B-2", 0x96913a85, 0x27ab5437, "COMpad2.cis"), | 887 | PCMCIA_DEVICE_CIS_PROD_ID12("ADVANTECH", "COMpad-32/85B-2", 0x96913a85, 0x27ab5437, "cis/COMpad2.cis"), |
| 888 | PCMCIA_DEVICE_CIS_PROD_ID12("ADVANTECH", "COMpad-32/85B-4", 0x96913a85, 0xcec8f102, "COMpad4.cis"), | 888 | PCMCIA_DEVICE_CIS_PROD_ID12("ADVANTECH", "COMpad-32/85B-4", 0x96913a85, 0xcec8f102, "cis/COMpad4.cis"), |
| 889 | PCMCIA_DEVICE_CIS_PROD_ID123("ADVANTECH", "COMpad-32/85", "1.0", 0x96913a85, 0x8fbe92ae, 0x0877b627, "COMpad2.cis"), | 889 | PCMCIA_DEVICE_CIS_PROD_ID123("ADVANTECH", "COMpad-32/85", "1.0", 0x96913a85, 0x8fbe92ae, 0x0877b627, "cis/COMpad2.cis"), |
| 890 | PCMCIA_DEVICE_CIS_PROD_ID2("RS-COM 2P", 0xad20b156, "cis/RS-COM-2P.cis"), | 890 | PCMCIA_DEVICE_CIS_PROD_ID2("RS-COM 2P", 0xad20b156, "cis/RS-COM-2P.cis"), |
| 891 | PCMCIA_DEVICE_CIS_MANF_CARD(0x0013, 0x0000, "GLOBETROTTER.cis"), | 891 | PCMCIA_DEVICE_CIS_MANF_CARD(0x0013, 0x0000, "GLOBETROTTER.cis"), |
| 892 | PCMCIA_DEVICE_PROD_ID12("ELAN DIGITAL SYSTEMS LTD, c1997.","SERIAL CARD: SL100 1.00.",0x19ca78af,0xf964f42b), | 892 | PCMCIA_DEVICE_PROD_ID12("ELAN DIGITAL SYSTEMS LTD, c1997.","SERIAL CARD: SL100 1.00.",0x19ca78af,0xf964f42b), |
diff --git a/drivers/video/da8xx-fb.c b/drivers/video/da8xx-fb.c index 42e1005e2916..d065894ce38f 100644 --- a/drivers/video/da8xx-fb.c +++ b/drivers/video/da8xx-fb.c | |||
| @@ -26,7 +26,6 @@ | |||
| 26 | #include <linux/device.h> | 26 | #include <linux/device.h> |
| 27 | #include <linux/platform_device.h> | 27 | #include <linux/platform_device.h> |
| 28 | #include <linux/uaccess.h> | 28 | #include <linux/uaccess.h> |
| 29 | #include <linux/device.h> | ||
| 30 | #include <linux/interrupt.h> | 29 | #include <linux/interrupt.h> |
| 31 | #include <linux/clk.h> | 30 | #include <linux/clk.h> |
| 32 | #include <video/da8xx-fb.h> | 31 | #include <video/da8xx-fb.h> |
diff --git a/drivers/video/msm/mddi.c b/drivers/video/msm/mddi.c index f2de5a1acd6d..5c5a1ad1d397 100644 --- a/drivers/video/msm/mddi.c +++ b/drivers/video/msm/mddi.c | |||
| @@ -27,8 +27,6 @@ | |||
| 27 | #include <mach/msm_iomap.h> | 27 | #include <mach/msm_iomap.h> |
| 28 | #include <mach/irqs.h> | 28 | #include <mach/irqs.h> |
| 29 | #include <mach/board.h> | 29 | #include <mach/board.h> |
| 30 | #include <linux/delay.h> | ||
| 31 | |||
| 32 | #include <mach/msm_fb.h> | 30 | #include <mach/msm_fb.h> |
| 33 | #include "mddi_hw.h" | 31 | #include "mddi_hw.h" |
| 34 | 32 | ||
diff --git a/drivers/video/omap/blizzard.c b/drivers/video/omap/blizzard.c index d5e59556f9e2..70dadf9d2334 100644 --- a/drivers/video/omap/blizzard.c +++ b/drivers/video/omap/blizzard.c | |||
| @@ -93,7 +93,7 @@ struct blizzard_reg_list { | |||
| 93 | }; | 93 | }; |
| 94 | 94 | ||
| 95 | /* These need to be saved / restored separately from the rest. */ | 95 | /* These need to be saved / restored separately from the rest. */ |
| 96 | static struct blizzard_reg_list blizzard_pll_regs[] = { | 96 | static const struct blizzard_reg_list blizzard_pll_regs[] = { |
| 97 | { | 97 | { |
| 98 | .start = 0x04, /* Don't save PLL ctrl (0x0C) */ | 98 | .start = 0x04, /* Don't save PLL ctrl (0x0C) */ |
| 99 | .end = 0x0a, | 99 | .end = 0x0a, |
| @@ -104,7 +104,7 @@ static struct blizzard_reg_list blizzard_pll_regs[] = { | |||
| 104 | }, | 104 | }, |
| 105 | }; | 105 | }; |
| 106 | 106 | ||
| 107 | static struct blizzard_reg_list blizzard_gen_regs[] = { | 107 | static const struct blizzard_reg_list blizzard_gen_regs[] = { |
| 108 | { | 108 | { |
| 109 | .start = 0x18, /* SDRAM control */ | 109 | .start = 0x18, /* SDRAM control */ |
| 110 | .end = 0x20, | 110 | .end = 0x20, |
| @@ -191,7 +191,7 @@ struct blizzard_struct { | |||
| 191 | 191 | ||
| 192 | struct omapfb_device *fbdev; | 192 | struct omapfb_device *fbdev; |
| 193 | struct lcd_ctrl_extif *extif; | 193 | struct lcd_ctrl_extif *extif; |
| 194 | struct lcd_ctrl *int_ctrl; | 194 | const struct lcd_ctrl *int_ctrl; |
| 195 | 195 | ||
| 196 | void (*power_up)(struct device *dev); | 196 | void (*power_up)(struct device *dev); |
| 197 | void (*power_down)(struct device *dev); | 197 | void (*power_down)(struct device *dev); |
| @@ -1372,7 +1372,7 @@ static void blizzard_get_caps(int plane, struct omapfb_caps *caps) | |||
| 1372 | (1 << OMAPFB_COLOR_YUV420); | 1372 | (1 << OMAPFB_COLOR_YUV420); |
| 1373 | } | 1373 | } |
| 1374 | 1374 | ||
| 1375 | static void _save_regs(struct blizzard_reg_list *list, int cnt) | 1375 | static void _save_regs(const struct blizzard_reg_list *list, int cnt) |
| 1376 | { | 1376 | { |
| 1377 | int i; | 1377 | int i; |
| 1378 | 1378 | ||
| @@ -1383,7 +1383,7 @@ static void _save_regs(struct blizzard_reg_list *list, int cnt) | |||
| 1383 | } | 1383 | } |
| 1384 | } | 1384 | } |
| 1385 | 1385 | ||
| 1386 | static void _restore_regs(struct blizzard_reg_list *list, int cnt) | 1386 | static void _restore_regs(const struct blizzard_reg_list *list, int cnt) |
| 1387 | { | 1387 | { |
| 1388 | int i; | 1388 | int i; |
| 1389 | 1389 | ||
diff --git a/drivers/video/omap/omapfb_main.c b/drivers/video/omap/omapfb_main.c index 125e605b8c68..0d0c8c8b9b56 100644 --- a/drivers/video/omap/omapfb_main.c +++ b/drivers/video/omap/omapfb_main.c | |||
| @@ -393,7 +393,7 @@ static void omapfb_sync(struct fb_info *fbi) | |||
| 393 | * Set fb_info.fix fields and also updates fbdev. | 393 | * Set fb_info.fix fields and also updates fbdev. |
| 394 | * When calling this fb_info.var must be set up already. | 394 | * When calling this fb_info.var must be set up already. |
| 395 | */ | 395 | */ |
| 396 | static void set_fb_fix(struct fb_info *fbi) | 396 | static void set_fb_fix(struct fb_info *fbi, int from_init) |
| 397 | { | 397 | { |
| 398 | struct fb_fix_screeninfo *fix = &fbi->fix; | 398 | struct fb_fix_screeninfo *fix = &fbi->fix; |
| 399 | struct fb_var_screeninfo *var = &fbi->var; | 399 | struct fb_var_screeninfo *var = &fbi->var; |
| @@ -403,10 +403,16 @@ static void set_fb_fix(struct fb_info *fbi) | |||
| 403 | 403 | ||
| 404 | rg = &plane->fbdev->mem_desc.region[plane->idx]; | 404 | rg = &plane->fbdev->mem_desc.region[plane->idx]; |
| 405 | fbi->screen_base = rg->vaddr; | 405 | fbi->screen_base = rg->vaddr; |
| 406 | mutex_lock(&fbi->mm_lock); | 406 | |
| 407 | fix->smem_start = rg->paddr; | 407 | if (!from_init) { |
| 408 | fix->smem_len = rg->size; | 408 | mutex_lock(&fbi->mm_lock); |
| 409 | mutex_unlock(&fbi->mm_lock); | 409 | fix->smem_start = rg->paddr; |
| 410 | fix->smem_len = rg->size; | ||
| 411 | mutex_unlock(&fbi->mm_lock); | ||
| 412 | } else { | ||
| 413 | fix->smem_start = rg->paddr; | ||
| 414 | fix->smem_len = rg->size; | ||
| 415 | } | ||
| 410 | 416 | ||
| 411 | fix->type = FB_TYPE_PACKED_PIXELS; | 417 | fix->type = FB_TYPE_PACKED_PIXELS; |
| 412 | bpp = var->bits_per_pixel; | 418 | bpp = var->bits_per_pixel; |
| @@ -704,7 +710,7 @@ static int omapfb_set_par(struct fb_info *fbi) | |||
| 704 | int r = 0; | 710 | int r = 0; |
| 705 | 711 | ||
| 706 | omapfb_rqueue_lock(fbdev); | 712 | omapfb_rqueue_lock(fbdev); |
| 707 | set_fb_fix(fbi); | 713 | set_fb_fix(fbi, 0); |
| 708 | r = ctrl_change_mode(fbi); | 714 | r = ctrl_change_mode(fbi); |
| 709 | omapfb_rqueue_unlock(fbdev); | 715 | omapfb_rqueue_unlock(fbdev); |
| 710 | 716 | ||
| @@ -904,7 +910,7 @@ static int omapfb_setup_mem(struct fb_info *fbi, struct omapfb_mem_info *mi) | |||
| 904 | if (old_size != size) { | 910 | if (old_size != size) { |
| 905 | if (size) { | 911 | if (size) { |
| 906 | memcpy(&fbi->var, new_var, sizeof(fbi->var)); | 912 | memcpy(&fbi->var, new_var, sizeof(fbi->var)); |
| 907 | set_fb_fix(fbi); | 913 | set_fb_fix(fbi, 0); |
| 908 | } else { | 914 | } else { |
| 909 | /* | 915 | /* |
| 910 | * Set these explicitly to indicate that the | 916 | * Set these explicitly to indicate that the |
| @@ -1504,7 +1510,7 @@ static int fbinfo_init(struct omapfb_device *fbdev, struct fb_info *info) | |||
| 1504 | var->bits_per_pixel = fbdev->panel->bpp; | 1510 | var->bits_per_pixel = fbdev->panel->bpp; |
| 1505 | 1511 | ||
| 1506 | set_fb_var(info, var); | 1512 | set_fb_var(info, var); |
| 1507 | set_fb_fix(info); | 1513 | set_fb_fix(info, 1); |
| 1508 | 1514 | ||
| 1509 | r = fb_alloc_cmap(&info->cmap, 16, 0); | 1515 | r = fb_alloc_cmap(&info->cmap, 16, 0); |
| 1510 | if (r != 0) | 1516 | if (r != 0) |
diff --git a/firmware/Makefile b/firmware/Makefile index 5ea80b19785b..a6c7c3e47e42 100644 --- a/firmware/Makefile +++ b/firmware/Makefile | |||
| @@ -67,10 +67,13 @@ fw-shipped-$(CONFIG_DVB_TTUSB_BUDGET) += ttusb-budget/dspbootcode.bin | |||
| 67 | fw-shipped-$(CONFIG_E100) += e100/d101m_ucode.bin e100/d101s_ucode.bin \ | 67 | fw-shipped-$(CONFIG_E100) += e100/d101m_ucode.bin e100/d101s_ucode.bin \ |
| 68 | e100/d102e_ucode.bin | 68 | e100/d102e_ucode.bin |
| 69 | fw-shipped-$(CONFIG_MYRI_SBUS) += myricom/lanai.bin | 69 | fw-shipped-$(CONFIG_MYRI_SBUS) += myricom/lanai.bin |
| 70 | fw-shipped-$(CONFIG_PCMCIA_PCNET) += cis/LA-PCM.cis cis/PCMLM28.cis | 70 | fw-shipped-$(CONFIG_PCMCIA_PCNET) += cis/LA-PCM.cis cis/PCMLM28.cis \ |
| 71 | cis/DP83903.cis cis/NE2K.cis \ | ||
| 72 | cis/tamarack.cis | ||
| 71 | fw-shipped-$(CONFIG_PCMCIA_3C589) += cis/3CXEM556.cis | 73 | fw-shipped-$(CONFIG_PCMCIA_3C589) += cis/3CXEM556.cis |
| 72 | fw-shipped-$(CONFIG_PCMCIA_3C574) += cis/3CCFEM556.cis | 74 | fw-shipped-$(CONFIG_PCMCIA_3C574) += cis/3CCFEM556.cis |
| 73 | fw-shipped-$(CONFIG_SERIAL_8250_CS) += cis/MT5634ZLX.cis cis/RS-COM-2P.cis | 75 | fw-shipped-$(CONFIG_SERIAL_8250_CS) += cis/MT5634ZLX.cis cis/RS-COM-2P.cis \ |
| 76 | cis/COMpad2.cis cis/COMpad4.cis | ||
| 74 | fw-shipped-$(CONFIG_PCMCIA_SMC91C92) += ositech/Xilinx7OD.bin | 77 | fw-shipped-$(CONFIG_PCMCIA_SMC91C92) += ositech/Xilinx7OD.bin |
| 75 | fw-shipped-$(CONFIG_SCSI_ADVANSYS) += advansys/mcode.bin advansys/38C1600.bin \ | 78 | fw-shipped-$(CONFIG_SCSI_ADVANSYS) += advansys/mcode.bin advansys/38C1600.bin \ |
| 76 | advansys/3550.bin advansys/38C0800.bin | 79 | advansys/3550.bin advansys/38C0800.bin |
diff --git a/firmware/WHENCE b/firmware/WHENCE index 3f8c4f6bc43f..c437e14f0b11 100644 --- a/firmware/WHENCE +++ b/firmware/WHENCE | |||
| @@ -597,6 +597,9 @@ Driver: PCMCIA_PCNET - NE2000 compatible PCMCIA adapter | |||
| 597 | 597 | ||
| 598 | File: cis/LA-PCM.cis | 598 | File: cis/LA-PCM.cis |
| 599 | cis/PCMLM28.cis | 599 | cis/PCMLM28.cis |
| 600 | cis/DP83903.cis | ||
| 601 | cis/NE2K.cis | ||
| 602 | cis/tamarack.cis | ||
| 600 | 603 | ||
| 601 | Licence: GPL | 604 | Licence: GPL |
| 602 | 605 | ||
| @@ -628,6 +631,8 @@ Driver: SERIAL_8250_CS - Serial PCMCIA adapter | |||
| 628 | 631 | ||
| 629 | File: cis/MT5634ZLX.cis | 632 | File: cis/MT5634ZLX.cis |
| 630 | cis/RS-COM-2P.cis | 633 | cis/RS-COM-2P.cis |
| 634 | cis/COMpad2.cis | ||
| 635 | cis/COMpad4.cis | ||
| 631 | 636 | ||
| 632 | Licence: GPL | 637 | Licence: GPL |
| 633 | 638 | ||
diff --git a/firmware/cis/COMpad2.cis.ihex b/firmware/cis/COMpad2.cis.ihex new file mode 100644 index 000000000000..1671c5e48caa --- /dev/null +++ b/firmware/cis/COMpad2.cis.ihex | |||
| @@ -0,0 +1,11 @@ | |||
| 1 | :1000000001030000FF151F0401414456414E5445B1 | ||
| 2 | :10001000434800434F4D7061642D33322F38350013 | ||
| 3 | :10002000312E300000FF210202011A0501050001F6 | ||
| 4 | :10003000031B0EC18118AA61E80207E8030730B864 | ||
| 5 | :100040009E1B08820108AA6030030F1B0883010869 | ||
| 6 | :10005000AA6040030F1B08840108AA6050030F1B0D | ||
| 7 | :0D00600008850108AA6060030F1400FF006E | ||
| 8 | :00000001FF | ||
| 9 | # | ||
| 10 | # Replacement CIS for Advantech COMpad-32/85 | ||
| 11 | # | ||
diff --git a/firmware/cis/COMpad4.cis.ihex b/firmware/cis/COMpad4.cis.ihex new file mode 100644 index 000000000000..27bbec1921b3 --- /dev/null +++ b/firmware/cis/COMpad4.cis.ihex | |||
| @@ -0,0 +1,9 @@ | |||
| 1 | :1000000001030000FF151F0401414456414E5445B1 | ||
| 2 | :10001000434800434F4D7061642D33322F383542D1 | ||
| 3 | :100020002D34000000FF210202011A050102000127 | ||
| 4 | :10003000011B0BC18118AA6040021F30B89E1B082B | ||
| 5 | :0C004000820108AA6040031F1400FF00AA | ||
| 6 | :00000001FF | ||
| 7 | # | ||
| 8 | # Replacement CIS for Advantech COMpad-32/85B-4 | ||
| 9 | # | ||
diff --git a/firmware/cis/DP83903.cis.ihex b/firmware/cis/DP83903.cis.ihex new file mode 100644 index 000000000000..6d73ea3cf1b8 --- /dev/null +++ b/firmware/cis/DP83903.cis.ihex | |||
| @@ -0,0 +1,14 @@ | |||
| 1 | :1000000001030000FF152904014D756C74696675C4 | ||
| 2 | :100010006E6374696F6E20436172640000004E531A | ||
| 3 | :1000200043204D46204C414E2F4D6F64656D00FFBF | ||
| 4 | :1000300020047501000021020000060B02004900A7 | ||
| 5 | :100040000000006A000000FF00130343495321022F | ||
| 6 | :1000500006001A060517201077021B0C970179017C | ||
| 7 | :10006000556530FFFF284000FF001303434953212B | ||
| 8 | :100070000202001A060507401077021B09870119C2 | ||
| 9 | :0800800001552330FFFFFF00D2 | ||
| 10 | :00000001FF | ||
| 11 | # | ||
| 12 | # This CIS is for cards based on the National Semiconductor | ||
| 13 | # DP83903 Multiple Function Interface Chip | ||
| 14 | # | ||
diff --git a/firmware/cis/NE2K.cis.ihex b/firmware/cis/NE2K.cis.ihex new file mode 100644 index 000000000000..1bb40fc4759f --- /dev/null +++ b/firmware/cis/NE2K.cis.ihex | |||
| @@ -0,0 +1,8 @@ | |||
| 1 | :1000000001030000FF1515040150434D4349410011 | ||
| 2 | :1000100045746865726E6574000000FF2102060079 | ||
| 3 | :100020001A050120F803031B09E001190155653089 | ||
| 4 | :06003000FFFF1400FF00B9 | ||
| 5 | :00000001FF | ||
| 6 | # | ||
| 7 | # Replacement CIS for various busted NE2000-compatible cards | ||
| 8 | # | ||
diff --git a/firmware/cis/tamarack.cis.ihex b/firmware/cis/tamarack.cis.ihex new file mode 100644 index 000000000000..1e86547fb361 --- /dev/null +++ b/firmware/cis/tamarack.cis.ihex | |||
| @@ -0,0 +1,10 @@ | |||
| 1 | :100000000103D400FF17034100FF152404015441EC | ||
| 2 | :100010004D415241434B0045746865726E657400F2 | ||
| 3 | :10002000410030303437343331313830303100FF33 | ||
| 4 | :10003000210206001A050120F803031B14E08119B0 | ||
| 5 | :100040003F554D5D06864626E551000F100F30FFE7 | ||
| 6 | :05005000FF1400FF0099 | ||
| 7 | :00000001FF | ||
| 8 | # | ||
| 9 | # Replacement CIS for Surecom, Tamarack NE2000 cards | ||
| 10 | # | ||
diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c index 56013371f9f3..a44a7897fd4d 100644 --- a/fs/proc/kcore.c +++ b/fs/proc/kcore.c | |||
| @@ -23,7 +23,6 @@ | |||
| 23 | #include <asm/io.h> | 23 | #include <asm/io.h> |
| 24 | #include <linux/list.h> | 24 | #include <linux/list.h> |
| 25 | #include <linux/ioport.h> | 25 | #include <linux/ioport.h> |
| 26 | #include <linux/mm.h> | ||
| 27 | #include <linux/memory.h> | 26 | #include <linux/memory.h> |
| 28 | #include <asm/sections.h> | 27 | #include <asm/sections.h> |
| 29 | 28 | ||
diff --git a/fs/proc/page.c b/fs/proc/page.c index 2281c2cbfe2b..5033ce0d254b 100644 --- a/fs/proc/page.c +++ b/fs/proc/page.c | |||
| @@ -94,6 +94,7 @@ static const struct file_operations proc_kpagecount_operations = { | |||
| 94 | #define KPF_COMPOUND_TAIL 16 | 94 | #define KPF_COMPOUND_TAIL 16 |
| 95 | #define KPF_HUGE 17 | 95 | #define KPF_HUGE 17 |
| 96 | #define KPF_UNEVICTABLE 18 | 96 | #define KPF_UNEVICTABLE 18 |
| 97 | #define KPF_HWPOISON 19 | ||
| 97 | #define KPF_NOPAGE 20 | 98 | #define KPF_NOPAGE 20 |
| 98 | 99 | ||
| 99 | #define KPF_KSM 21 | 100 | #define KPF_KSM 21 |
| @@ -180,6 +181,10 @@ static u64 get_uflags(struct page *page) | |||
| 180 | u |= kpf_copy_bit(k, KPF_UNEVICTABLE, PG_unevictable); | 181 | u |= kpf_copy_bit(k, KPF_UNEVICTABLE, PG_unevictable); |
| 181 | u |= kpf_copy_bit(k, KPF_MLOCKED, PG_mlocked); | 182 | u |= kpf_copy_bit(k, KPF_MLOCKED, PG_mlocked); |
| 182 | 183 | ||
| 184 | #ifdef CONFIG_MEMORY_FAILURE | ||
| 185 | u |= kpf_copy_bit(k, KPF_HWPOISON, PG_hwpoison); | ||
| 186 | #endif | ||
| 187 | |||
| 183 | #ifdef CONFIG_IA64_UNCACHED_ALLOCATOR | 188 | #ifdef CONFIG_IA64_UNCACHED_ALLOCATOR |
| 184 | u |= kpf_copy_bit(k, KPF_UNCACHED, PG_uncached); | 189 | u |= kpf_copy_bit(k, KPF_UNCACHED, PG_uncached); |
| 185 | #endif | 190 | #endif |
diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h index ef47dfd8e5e9..b29e20168b5f 100644 --- a/include/drm/drm_crtc_helper.h +++ b/include/drm/drm_crtc_helper.h | |||
| @@ -61,6 +61,9 @@ struct drm_crtc_helper_funcs { | |||
| 61 | /* Move the crtc on the current fb to the given position *optional* */ | 61 | /* Move the crtc on the current fb to the given position *optional* */ |
| 62 | int (*mode_set_base)(struct drm_crtc *crtc, int x, int y, | 62 | int (*mode_set_base)(struct drm_crtc *crtc, int x, int y, |
| 63 | struct drm_framebuffer *old_fb); | 63 | struct drm_framebuffer *old_fb); |
| 64 | |||
| 65 | /* reload the current crtc LUT */ | ||
| 66 | void (*load_lut)(struct drm_crtc *crtc); | ||
| 64 | }; | 67 | }; |
| 65 | 68 | ||
| 66 | struct drm_encoder_helper_funcs { | 69 | struct drm_encoder_helper_funcs { |
diff --git a/include/drm/drm_fb_helper.h b/include/drm/drm_fb_helper.h index 4aa5740ce59f..58c892a2cbfa 100644 --- a/include/drm/drm_fb_helper.h +++ b/include/drm/drm_fb_helper.h | |||
| @@ -39,6 +39,8 @@ struct drm_fb_helper_crtc { | |||
| 39 | struct drm_fb_helper_funcs { | 39 | struct drm_fb_helper_funcs { |
| 40 | void (*gamma_set)(struct drm_crtc *crtc, u16 red, u16 green, | 40 | void (*gamma_set)(struct drm_crtc *crtc, u16 red, u16 green, |
| 41 | u16 blue, int regno); | 41 | u16 blue, int regno); |
| 42 | void (*gamma_get)(struct drm_crtc *crtc, u16 *red, u16 *green, | ||
| 43 | u16 *blue, int regno); | ||
| 42 | }; | 44 | }; |
| 43 | 45 | ||
| 44 | /* mode specified on the command line */ | 46 | /* mode specified on the command line */ |
| @@ -71,6 +73,7 @@ struct drm_fb_helper { | |||
| 71 | }; | 73 | }; |
| 72 | 74 | ||
| 73 | int drm_fb_helper_single_fb_probe(struct drm_device *dev, | 75 | int drm_fb_helper_single_fb_probe(struct drm_device *dev, |
| 76 | int preferred_bpp, | ||
| 74 | int (*fb_create)(struct drm_device *dev, | 77 | int (*fb_create)(struct drm_device *dev, |
| 75 | uint32_t fb_width, | 78 | uint32_t fb_width, |
| 76 | uint32_t fb_height, | 79 | uint32_t fb_height, |
| @@ -98,9 +101,11 @@ int drm_fb_helper_setcolreg(unsigned regno, | |||
| 98 | void drm_fb_helper_restore(void); | 101 | void drm_fb_helper_restore(void); |
| 99 | void drm_fb_helper_fill_var(struct fb_info *info, struct drm_framebuffer *fb, | 102 | void drm_fb_helper_fill_var(struct fb_info *info, struct drm_framebuffer *fb, |
| 100 | uint32_t fb_width, uint32_t fb_height); | 103 | uint32_t fb_width, uint32_t fb_height); |
| 101 | void drm_fb_helper_fill_fix(struct fb_info *info, uint32_t pitch); | 104 | void drm_fb_helper_fill_fix(struct fb_info *info, uint32_t pitch, |
| 105 | uint32_t depth); | ||
| 102 | 106 | ||
| 103 | int drm_fb_helper_add_connector(struct drm_connector *connector); | 107 | int drm_fb_helper_add_connector(struct drm_connector *connector); |
| 104 | int drm_fb_helper_parse_command_line(struct drm_device *dev); | 108 | int drm_fb_helper_parse_command_line(struct drm_device *dev); |
| 109 | int drm_fb_helper_setcmap(struct fb_cmap *cmap, struct fb_info *info); | ||
| 105 | 110 | ||
| 106 | #endif | 111 | #endif |
diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h index 3f6e545609be..e6f3b120f51a 100644 --- a/include/drm/drm_pciids.h +++ b/include/drm/drm_pciids.h | |||
| @@ -80,7 +80,7 @@ | |||
| 80 | {0x1002, 0x5158, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV200}, \ | 80 | {0x1002, 0x5158, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV200}, \ |
| 81 | {0x1002, 0x5159, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV100}, \ | 81 | {0x1002, 0x5159, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV100}, \ |
| 82 | {0x1002, 0x515A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV100}, \ | 82 | {0x1002, 0x515A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV100}, \ |
| 83 | {0x1002, 0x515E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV100}, \ | 83 | {0x1002, 0x515E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV100|RADEON_SINGLE_CRTC}, \ |
| 84 | {0x1002, 0x5460, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \ | 84 | {0x1002, 0x5460, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \ |
| 85 | {0x1002, 0x5462, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \ | 85 | {0x1002, 0x5462, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \ |
| 86 | {0x1002, 0x5464, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \ | 86 | {0x1002, 0x5464, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \ |
| @@ -113,7 +113,7 @@ | |||
| 113 | {0x1002, 0x5962, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280}, \ | 113 | {0x1002, 0x5962, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280}, \ |
| 114 | {0x1002, 0x5964, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280}, \ | 114 | {0x1002, 0x5964, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280}, \ |
| 115 | {0x1002, 0x5965, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280}, \ | 115 | {0x1002, 0x5965, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280}, \ |
| 116 | {0x1002, 0x5969, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV100}, \ | 116 | {0x1002, 0x5969, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV100|RADEON_SINGLE_CRTC}, \ |
| 117 | {0x1002, 0x5a41, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS400|RADEON_IS_IGP|RADEON_IS_IGPGART}, \ | 117 | {0x1002, 0x5a41, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS400|RADEON_IS_IGP|RADEON_IS_IGPGART}, \ |
| 118 | {0x1002, 0x5a42, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS400|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART}, \ | 118 | {0x1002, 0x5a42, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS400|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART}, \ |
| 119 | {0x1002, 0x5a61, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS400|RADEON_IS_IGP|RADEON_IS_IGPGART}, \ | 119 | {0x1002, 0x5a61, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS400|RADEON_IS_IGP|RADEON_IS_IGPGART}, \ |
diff --git a/include/linux/socket.h b/include/linux/socket.h index 3b461dffe244..3273a0c5043b 100644 --- a/include/linux/socket.h +++ b/include/linux/socket.h | |||
| @@ -16,7 +16,7 @@ struct __kernel_sockaddr_storage { | |||
| 16 | /* _SS_MAXSIZE value minus size of ss_family */ | 16 | /* _SS_MAXSIZE value minus size of ss_family */ |
| 17 | } __attribute__ ((aligned(_K_SS_ALIGNSIZE))); /* force desired alignment */ | 17 | } __attribute__ ((aligned(_K_SS_ALIGNSIZE))); /* force desired alignment */ |
| 18 | 18 | ||
| 19 | #if defined(__KERNEL__) || !defined(__GLIBC__) || (__GLIBC__ < 2) | 19 | #ifdef __KERNEL__ |
| 20 | 20 | ||
| 21 | #include <asm/socket.h> /* arch-dependent defines */ | 21 | #include <asm/socket.h> /* arch-dependent defines */ |
| 22 | #include <linux/sockios.h> /* the SIOCxxx I/O controls */ | 22 | #include <linux/sockios.h> /* the SIOCxxx I/O controls */ |
| @@ -101,21 +101,6 @@ struct cmsghdr { | |||
| 101 | ((char *)(cmsg) - (char *)(mhdr)->msg_control))) | 101 | ((char *)(cmsg) - (char *)(mhdr)->msg_control))) |
| 102 | 102 | ||
| 103 | /* | 103 | /* |
| 104 | * This mess will go away with glibc | ||
| 105 | */ | ||
| 106 | |||
| 107 | #ifdef __KERNEL__ | ||
| 108 | #define __KINLINE static inline | ||
| 109 | #elif defined(__GNUC__) | ||
| 110 | #define __KINLINE static __inline__ | ||
| 111 | #elif defined(__cplusplus) | ||
| 112 | #define __KINLINE static inline | ||
| 113 | #else | ||
| 114 | #define __KINLINE static | ||
| 115 | #endif | ||
| 116 | |||
| 117 | |||
| 118 | /* | ||
| 119 | * Get the next cmsg header | 104 | * Get the next cmsg header |
| 120 | * | 105 | * |
| 121 | * PLEASE, do not touch this function. If you think, that it is | 106 | * PLEASE, do not touch this function. If you think, that it is |
| @@ -128,7 +113,7 @@ struct cmsghdr { | |||
| 128 | * ancillary object DATA. --ANK (980731) | 113 | * ancillary object DATA. --ANK (980731) |
| 129 | */ | 114 | */ |
| 130 | 115 | ||
| 131 | __KINLINE struct cmsghdr * __cmsg_nxthdr(void *__ctl, __kernel_size_t __size, | 116 | static inline struct cmsghdr * __cmsg_nxthdr(void *__ctl, __kernel_size_t __size, |
| 132 | struct cmsghdr *__cmsg) | 117 | struct cmsghdr *__cmsg) |
| 133 | { | 118 | { |
| 134 | struct cmsghdr * __ptr; | 119 | struct cmsghdr * __ptr; |
| @@ -140,7 +125,7 @@ __KINLINE struct cmsghdr * __cmsg_nxthdr(void *__ctl, __kernel_size_t __size, | |||
| 140 | return __ptr; | 125 | return __ptr; |
| 141 | } | 126 | } |
| 142 | 127 | ||
| 143 | __KINLINE struct cmsghdr * cmsg_nxthdr (struct msghdr *__msg, struct cmsghdr *__cmsg) | 128 | static inline struct cmsghdr * cmsg_nxthdr (struct msghdr *__msg, struct cmsghdr *__cmsg) |
| 144 | { | 129 | { |
| 145 | return __cmsg_nxthdr(__msg->msg_control, __msg->msg_controllen, __cmsg); | 130 | return __cmsg_nxthdr(__msg->msg_control, __msg->msg_controllen, __cmsg); |
| 146 | } | 131 | } |
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c index 6d7020490f94..3e1c36e7998f 100644 --- a/kernel/hrtimer.c +++ b/kernel/hrtimer.c | |||
| @@ -726,8 +726,6 @@ static int hrtimer_switch_to_hres(void) | |||
| 726 | /* "Retrigger" the interrupt to get things going */ | 726 | /* "Retrigger" the interrupt to get things going */ |
| 727 | retrigger_next_event(NULL); | 727 | retrigger_next_event(NULL); |
| 728 | local_irq_restore(flags); | 728 | local_irq_restore(flags); |
| 729 | printk(KERN_DEBUG "Switched to high resolution mode on CPU %d\n", | ||
| 730 | smp_processor_id()); | ||
| 731 | return 1; | 729 | return 1; |
| 732 | } | 730 | } |
| 733 | 731 | ||
diff --git a/kernel/perf_event.c b/kernel/perf_event.c index 0f86feb6db0c..e491fb087939 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c | |||
| @@ -1030,14 +1030,10 @@ void __perf_event_sched_out(struct perf_event_context *ctx, | |||
| 1030 | update_context_time(ctx); | 1030 | update_context_time(ctx); |
| 1031 | 1031 | ||
| 1032 | perf_disable(); | 1032 | perf_disable(); |
| 1033 | if (ctx->nr_active) { | 1033 | if (ctx->nr_active) |
| 1034 | list_for_each_entry(event, &ctx->group_list, group_entry) { | 1034 | list_for_each_entry(event, &ctx->group_list, group_entry) |
| 1035 | if (event != event->group_leader) | 1035 | group_sched_out(event, cpuctx, ctx); |
| 1036 | event_sched_out(event, cpuctx, ctx); | 1036 | |
| 1037 | else | ||
| 1038 | group_sched_out(event, cpuctx, ctx); | ||
| 1039 | } | ||
| 1040 | } | ||
| 1041 | perf_enable(); | 1037 | perf_enable(); |
| 1042 | out: | 1038 | out: |
| 1043 | spin_unlock(&ctx->lock); | 1039 | spin_unlock(&ctx->lock); |
| @@ -1258,12 +1254,8 @@ __perf_event_sched_in(struct perf_event_context *ctx, | |||
| 1258 | if (event->cpu != -1 && event->cpu != cpu) | 1254 | if (event->cpu != -1 && event->cpu != cpu) |
| 1259 | continue; | 1255 | continue; |
| 1260 | 1256 | ||
| 1261 | if (event != event->group_leader) | 1257 | if (group_can_go_on(event, cpuctx, 1)) |
| 1262 | event_sched_in(event, cpuctx, ctx, cpu); | 1258 | group_sched_in(event, cpuctx, ctx, cpu); |
| 1263 | else { | ||
| 1264 | if (group_can_go_on(event, cpuctx, 1)) | ||
| 1265 | group_sched_in(event, cpuctx, ctx, cpu); | ||
| 1266 | } | ||
| 1267 | 1259 | ||
| 1268 | /* | 1260 | /* |
| 1269 | * If this pinned group hasn't been scheduled, | 1261 | * If this pinned group hasn't been scheduled, |
| @@ -1291,15 +1283,9 @@ __perf_event_sched_in(struct perf_event_context *ctx, | |||
| 1291 | if (event->cpu != -1 && event->cpu != cpu) | 1283 | if (event->cpu != -1 && event->cpu != cpu) |
| 1292 | continue; | 1284 | continue; |
| 1293 | 1285 | ||
| 1294 | if (event != event->group_leader) { | 1286 | if (group_can_go_on(event, cpuctx, can_add_hw)) |
| 1295 | if (event_sched_in(event, cpuctx, ctx, cpu)) | 1287 | if (group_sched_in(event, cpuctx, ctx, cpu)) |
| 1296 | can_add_hw = 0; | 1288 | can_add_hw = 0; |
| 1297 | } else { | ||
| 1298 | if (group_can_go_on(event, cpuctx, can_add_hw)) { | ||
| 1299 | if (group_sched_in(event, cpuctx, ctx, cpu)) | ||
| 1300 | can_add_hw = 0; | ||
| 1301 | } | ||
| 1302 | } | ||
| 1303 | } | 1289 | } |
| 1304 | perf_enable(); | 1290 | perf_enable(); |
| 1305 | out: | 1291 | out: |
| @@ -4781,9 +4767,7 @@ int perf_event_init_task(struct task_struct *child) | |||
| 4781 | * We dont have to disable NMIs - we are only looking at | 4767 | * We dont have to disable NMIs - we are only looking at |
| 4782 | * the list, not manipulating it: | 4768 | * the list, not manipulating it: |
| 4783 | */ | 4769 | */ |
| 4784 | list_for_each_entry_rcu(event, &parent_ctx->event_list, event_entry) { | 4770 | list_for_each_entry(event, &parent_ctx->group_list, group_entry) { |
| 4785 | if (event != event->group_leader) | ||
| 4786 | continue; | ||
| 4787 | 4771 | ||
| 4788 | if (!event->attr.inherit) { | 4772 | if (!event->attr.inherit) { |
| 4789 | inherited_all = 0; | 4773 | inherited_all = 0; |
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 46592feab5a6..3724756e41ca 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c | |||
| @@ -225,7 +225,11 @@ static void ftrace_update_pid_func(void) | |||
| 225 | if (ftrace_trace_function == ftrace_stub) | 225 | if (ftrace_trace_function == ftrace_stub) |
| 226 | return; | 226 | return; |
| 227 | 227 | ||
| 228 | #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST | ||
| 228 | func = ftrace_trace_function; | 229 | func = ftrace_trace_function; |
| 230 | #else | ||
| 231 | func = __ftrace_trace_function; | ||
| 232 | #endif | ||
| 229 | 233 | ||
| 230 | if (ftrace_pid_trace) { | 234 | if (ftrace_pid_trace) { |
| 231 | set_ftrace_pid_function(func); | 235 | set_ftrace_pid_function(func); |
diff --git a/kernel/trace/kmemtrace.c b/kernel/trace/kmemtrace.c index 81b1645c8549..a91da69f153a 100644 --- a/kernel/trace/kmemtrace.c +++ b/kernel/trace/kmemtrace.c | |||
| @@ -501,7 +501,7 @@ static int __init init_kmem_tracer(void) | |||
| 501 | return 1; | 501 | return 1; |
| 502 | } | 502 | } |
| 503 | 503 | ||
| 504 | if (!register_tracer(&kmem_tracer)) { | 504 | if (register_tracer(&kmem_tracer) != 0) { |
| 505 | pr_warning("Warning: could not register the kmem tracer\n"); | 505 | pr_warning("Warning: could not register the kmem tracer\n"); |
| 506 | return 1; | 506 | return 1; |
| 507 | } | 507 | } |
diff --git a/mm/Kconfig b/mm/Kconfig index edd300aca173..57963c6063d1 100644 --- a/mm/Kconfig +++ b/mm/Kconfig | |||
| @@ -224,7 +224,9 @@ config KSM | |||
| 224 | the many instances by a single resident page with that content, so | 224 | the many instances by a single resident page with that content, so |
| 225 | saving memory until one or another app needs to modify the content. | 225 | saving memory until one or another app needs to modify the content. |
| 226 | Recommended for use with KVM, or with other duplicative applications. | 226 | Recommended for use with KVM, or with other duplicative applications. |
| 227 | See Documentation/vm/ksm.txt for more information. | 227 | See Documentation/vm/ksm.txt for more information: KSM is inactive |
| 228 | until a program has madvised that an area is MADV_MERGEABLE, and | ||
| 229 | root has set /sys/kernel/mm/ksm/run to 1 (if CONFIG_SYSFS is set). | ||
| 228 | 230 | ||
| 229 | config DEFAULT_MMAP_MIN_ADDR | 231 | config DEFAULT_MMAP_MIN_ADDR |
| 230 | int "Low address space to protect from user allocation" | 232 | int "Low address space to protect from user allocation" |
| @@ -184,11 +184,6 @@ static DEFINE_SPINLOCK(ksm_mmlist_lock); | |||
| 184 | sizeof(struct __struct), __alignof__(struct __struct),\ | 184 | sizeof(struct __struct), __alignof__(struct __struct),\ |
| 185 | (__flags), NULL) | 185 | (__flags), NULL) |
| 186 | 186 | ||
| 187 | static void __init ksm_init_max_kernel_pages(void) | ||
| 188 | { | ||
| 189 | ksm_max_kernel_pages = nr_free_buffer_pages() / 4; | ||
| 190 | } | ||
| 191 | |||
| 192 | static int __init ksm_slab_init(void) | 187 | static int __init ksm_slab_init(void) |
| 193 | { | 188 | { |
| 194 | rmap_item_cache = KSM_KMEM_CACHE(rmap_item, 0); | 189 | rmap_item_cache = KSM_KMEM_CACHE(rmap_item, 0); |
| @@ -1673,7 +1668,7 @@ static int __init ksm_init(void) | |||
| 1673 | struct task_struct *ksm_thread; | 1668 | struct task_struct *ksm_thread; |
| 1674 | int err; | 1669 | int err; |
| 1675 | 1670 | ||
| 1676 | ksm_init_max_kernel_pages(); | 1671 | ksm_max_kernel_pages = totalram_pages / 4; |
| 1677 | 1672 | ||
| 1678 | err = ksm_slab_init(); | 1673 | err = ksm_slab_init(); |
| 1679 | if (err) | 1674 | if (err) |
| @@ -1697,6 +1692,9 @@ static int __init ksm_init(void) | |||
| 1697 | kthread_stop(ksm_thread); | 1692 | kthread_stop(ksm_thread); |
| 1698 | goto out_free2; | 1693 | goto out_free2; |
| 1699 | } | 1694 | } |
| 1695 | #else | ||
| 1696 | ksm_run = KSM_RUN_MERGE; /* no way for user to start it */ | ||
| 1697 | |||
| 1700 | #endif /* CONFIG_SYSFS */ | 1698 | #endif /* CONFIG_SYSFS */ |
| 1701 | 1699 | ||
| 1702 | return 0; | 1700 | return 0; |
diff --git a/mm/vmalloc.c b/mm/vmalloc.c index 69511e663234..2f7c9d75c552 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c | |||
| @@ -25,7 +25,6 @@ | |||
| 25 | #include <linux/rcupdate.h> | 25 | #include <linux/rcupdate.h> |
| 26 | #include <linux/pfn.h> | 26 | #include <linux/pfn.h> |
| 27 | #include <linux/kmemleak.h> | 27 | #include <linux/kmemleak.h> |
| 28 | #include <linux/highmem.h> | ||
| 29 | #include <asm/atomic.h> | 28 | #include <asm/atomic.h> |
| 30 | #include <asm/uaccess.h> | 29 | #include <asm/uaccess.h> |
| 31 | #include <asm/tlbflush.h> | 30 | #include <asm/tlbflush.h> |
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c index 821d30918cfc..427ded841224 100644 --- a/net/core/net-sysfs.c +++ b/net/core/net-sysfs.c | |||
| @@ -366,13 +366,13 @@ static ssize_t wireless_show(struct device *d, char *buf, | |||
| 366 | const struct iw_statistics *iw; | 366 | const struct iw_statistics *iw; |
| 367 | ssize_t ret = -EINVAL; | 367 | ssize_t ret = -EINVAL; |
| 368 | 368 | ||
| 369 | read_lock(&dev_base_lock); | 369 | rtnl_lock(); |
| 370 | if (dev_isalive(dev)) { | 370 | if (dev_isalive(dev)) { |
| 371 | iw = get_wireless_stats(dev); | 371 | iw = get_wireless_stats(dev); |
| 372 | if (iw) | 372 | if (iw) |
| 373 | ret = (*format)(iw, buf); | 373 | ret = (*format)(iw, buf); |
| 374 | } | 374 | } |
| 375 | read_unlock(&dev_base_lock); | 375 | rtnl_unlock(); |
| 376 | 376 | ||
| 377 | return ret; | 377 | return ret; |
| 378 | } | 378 | } |
diff --git a/net/core/pktgen.c b/net/core/pktgen.c index b69455217ed6..86acdba0a97d 100644 --- a/net/core/pktgen.c +++ b/net/core/pktgen.c | |||
| @@ -964,7 +964,7 @@ static ssize_t pktgen_if_write(struct file *file, | |||
| 964 | if (value == 0x7FFFFFFF) | 964 | if (value == 0x7FFFFFFF) |
| 965 | pkt_dev->delay = ULLONG_MAX; | 965 | pkt_dev->delay = ULLONG_MAX; |
| 966 | else | 966 | else |
| 967 | pkt_dev->delay = (u64)value * NSEC_PER_USEC; | 967 | pkt_dev->delay = (u64)value; |
| 968 | 968 | ||
| 969 | sprintf(pg_result, "OK: delay=%llu", | 969 | sprintf(pg_result, "OK: delay=%llu", |
| 970 | (unsigned long long) pkt_dev->delay); | 970 | (unsigned long long) pkt_dev->delay); |
| @@ -2212,7 +2212,7 @@ static void set_cur_queue_map(struct pktgen_dev *pkt_dev) | |||
| 2212 | if (pkt_dev->flags & F_QUEUE_MAP_CPU) | 2212 | if (pkt_dev->flags & F_QUEUE_MAP_CPU) |
| 2213 | pkt_dev->cur_queue_map = smp_processor_id(); | 2213 | pkt_dev->cur_queue_map = smp_processor_id(); |
| 2214 | 2214 | ||
| 2215 | else if (pkt_dev->queue_map_min < pkt_dev->queue_map_max) { | 2215 | else if (pkt_dev->queue_map_min <= pkt_dev->queue_map_max) { |
| 2216 | __u16 t; | 2216 | __u16 t; |
| 2217 | if (pkt_dev->flags & F_QUEUE_MAP_RND) { | 2217 | if (pkt_dev->flags & F_QUEUE_MAP_RND) { |
| 2218 | t = random32() % | 2218 | t = random32() % |
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c index e92f1fd28aa5..5df2f6a0b0f0 100644 --- a/net/ipv4/devinet.c +++ b/net/ipv4/devinet.c | |||
| @@ -1077,12 +1077,16 @@ static int inetdev_event(struct notifier_block *this, unsigned long event, | |||
| 1077 | ip_mc_up(in_dev); | 1077 | ip_mc_up(in_dev); |
| 1078 | /* fall through */ | 1078 | /* fall through */ |
| 1079 | case NETDEV_CHANGEADDR: | 1079 | case NETDEV_CHANGEADDR: |
| 1080 | if (IN_DEV_ARP_NOTIFY(in_dev)) | 1080 | /* Send gratuitous ARP to notify of link change */ |
| 1081 | arp_send(ARPOP_REQUEST, ETH_P_ARP, | 1081 | if (IN_DEV_ARP_NOTIFY(in_dev)) { |
| 1082 | in_dev->ifa_list->ifa_address, | 1082 | struct in_ifaddr *ifa = in_dev->ifa_list; |
| 1083 | dev, | 1083 | |
| 1084 | in_dev->ifa_list->ifa_address, | 1084 | if (ifa) |
| 1085 | NULL, dev->dev_addr, NULL); | 1085 | arp_send(ARPOP_REQUEST, ETH_P_ARP, |
| 1086 | ifa->ifa_address, dev, | ||
| 1087 | ifa->ifa_address, NULL, | ||
| 1088 | dev->dev_addr, NULL); | ||
| 1089 | } | ||
| 1086 | break; | 1090 | break; |
| 1087 | case NETDEV_DOWN: | 1091 | case NETDEV_DOWN: |
| 1088 | ip_mc_down(in_dev); | 1092 | ip_mc_down(in_dev); |
diff --git a/tools/perf/Documentation/perf-timechart.txt b/tools/perf/Documentation/perf-timechart.txt index 1c2ed3090cce..a7910099d6fd 100644 --- a/tools/perf/Documentation/perf-timechart.txt +++ b/tools/perf/Documentation/perf-timechart.txt | |||
| @@ -31,6 +31,9 @@ OPTIONS | |||
| 31 | -w:: | 31 | -w:: |
| 32 | --width=:: | 32 | --width=:: |
| 33 | Select the width of the SVG file (default: 1000) | 33 | Select the width of the SVG file (default: 1000) |
| 34 | -p:: | ||
| 35 | --power-only:: | ||
| 36 | Only output the CPU power section of the diagram | ||
| 34 | 37 | ||
| 35 | 38 | ||
| 36 | SEE ALSO | 39 | SEE ALSO |
diff --git a/tools/perf/Makefile b/tools/perf/Makefile index b5f1953b6144..5881943f0c34 100644 --- a/tools/perf/Makefile +++ b/tools/perf/Makefile | |||
| @@ -728,7 +728,7 @@ $(BUILT_INS): perf$X | |||
| 728 | common-cmds.h: util/generate-cmdlist.sh command-list.txt | 728 | common-cmds.h: util/generate-cmdlist.sh command-list.txt |
| 729 | 729 | ||
| 730 | common-cmds.h: $(wildcard Documentation/perf-*.txt) | 730 | common-cmds.h: $(wildcard Documentation/perf-*.txt) |
| 731 | $(QUIET_GEN)util/generate-cmdlist.sh > $@+ && mv $@+ $@ | 731 | $(QUIET_GEN). util/generate-cmdlist.sh > $@+ && mv $@+ $@ |
| 732 | 732 | ||
| 733 | $(patsubst %.sh,%,$(SCRIPT_SH)) : % : %.sh | 733 | $(patsubst %.sh,%,$(SCRIPT_SH)) : % : %.sh |
| 734 | $(QUIET_GEN)$(RM) $@ $@+ && \ | 734 | $(QUIET_GEN)$(RM) $@ $@+ && \ |
diff --git a/tools/perf/builtin-timechart.c b/tools/perf/builtin-timechart.c index 4405681b3134..702d8fe58fbc 100644 --- a/tools/perf/builtin-timechart.c +++ b/tools/perf/builtin-timechart.c | |||
| @@ -46,6 +46,8 @@ static u64 turbo_frequency; | |||
| 46 | 46 | ||
| 47 | static u64 first_time, last_time; | 47 | static u64 first_time, last_time; |
| 48 | 48 | ||
| 49 | static int power_only; | ||
| 50 | |||
| 49 | 51 | ||
| 50 | static struct perf_header *header; | 52 | static struct perf_header *header; |
| 51 | 53 | ||
| @@ -547,7 +549,7 @@ static void end_sample_processing(void) | |||
| 547 | u64 cpu; | 549 | u64 cpu; |
| 548 | struct power_event *pwr; | 550 | struct power_event *pwr; |
| 549 | 551 | ||
| 550 | for (cpu = 0; cpu < numcpus; cpu++) { | 552 | for (cpu = 0; cpu <= numcpus; cpu++) { |
| 551 | pwr = malloc(sizeof(struct power_event)); | 553 | pwr = malloc(sizeof(struct power_event)); |
| 552 | if (!pwr) | 554 | if (!pwr) |
| 553 | return; | 555 | return; |
| @@ -871,7 +873,7 @@ static int determine_display_tasks(u64 threshold) | |||
| 871 | /* no exit marker, task kept running to the end */ | 873 | /* no exit marker, task kept running to the end */ |
| 872 | if (p->end_time == 0) | 874 | if (p->end_time == 0) |
| 873 | p->end_time = last_time; | 875 | p->end_time = last_time; |
| 874 | if (p->total_time >= threshold) | 876 | if (p->total_time >= threshold && !power_only) |
| 875 | p->display = 1; | 877 | p->display = 1; |
| 876 | 878 | ||
| 877 | c = p->all; | 879 | c = p->all; |
| @@ -882,7 +884,7 @@ static int determine_display_tasks(u64 threshold) | |||
| 882 | if (c->start_time == 1) | 884 | if (c->start_time == 1) |
| 883 | c->start_time = first_time; | 885 | c->start_time = first_time; |
| 884 | 886 | ||
| 885 | if (c->total_time >= threshold) { | 887 | if (c->total_time >= threshold && !power_only) { |
| 886 | c->display = 1; | 888 | c->display = 1; |
| 887 | count++; | 889 | count++; |
| 888 | } | 890 | } |
| @@ -1134,6 +1136,8 @@ static const struct option options[] = { | |||
| 1134 | "output file name"), | 1136 | "output file name"), |
| 1135 | OPT_INTEGER('w', "width", &svg_page_width, | 1137 | OPT_INTEGER('w', "width", &svg_page_width, |
| 1136 | "page width"), | 1138 | "page width"), |
| 1139 | OPT_BOOLEAN('p', "power-only", &power_only, | ||
| 1140 | "output power data only"), | ||
| 1137 | OPT_END() | 1141 | OPT_END() |
| 1138 | }; | 1142 | }; |
| 1139 | 1143 | ||
diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c index 1ca88896eee4..37512e936235 100644 --- a/tools/perf/builtin-top.c +++ b/tools/perf/builtin-top.c | |||
| @@ -782,6 +782,7 @@ static const char *skip_symbols[] = { | |||
| 782 | "exit_idle", | 782 | "exit_idle", |
| 783 | "mwait_idle", | 783 | "mwait_idle", |
| 784 | "mwait_idle_with_hints", | 784 | "mwait_idle_with_hints", |
| 785 | "poll_idle", | ||
| 785 | "ppc64_runlatch_off", | 786 | "ppc64_runlatch_off", |
| 786 | "pseries_dedicated_idle_sleep", | 787 | "pseries_dedicated_idle_sleep", |
| 787 | NULL | 788 | NULL |
diff --git a/tools/perf/util/svghelper.c b/tools/perf/util/svghelper.c index a778fd0f4ae4..856655d8b0b8 100644 --- a/tools/perf/util/svghelper.c +++ b/tools/perf/util/svghelper.c | |||
| @@ -28,7 +28,7 @@ static u64 turbo_frequency, max_freq; | |||
| 28 | 28 | ||
| 29 | int svg_page_width = 1000; | 29 | int svg_page_width = 1000; |
| 30 | 30 | ||
| 31 | #define MIN_TEXT_SIZE 0.001 | 31 | #define MIN_TEXT_SIZE 0.01 |
| 32 | 32 | ||
| 33 | static u64 total_height; | 33 | static u64 total_height; |
| 34 | static FILE *svgfile; | 34 | static FILE *svgfile; |
| @@ -217,6 +217,18 @@ static char *cpu_model(void) | |||
| 217 | } | 217 | } |
| 218 | fclose(file); | 218 | fclose(file); |
| 219 | } | 219 | } |
| 220 | |||
| 221 | /* CPU type */ | ||
| 222 | file = fopen("/sys/devices/system/cpu/cpu0/cpufreq/scaling_available_frequencies", "r"); | ||
| 223 | if (file) { | ||
| 224 | while (fgets(buf, 255, file)) { | ||
| 225 | unsigned int freq; | ||
| 226 | freq = strtoull(buf, NULL, 10); | ||
| 227 | if (freq > max_freq) | ||
| 228 | max_freq = freq; | ||
| 229 | } | ||
| 230 | fclose(file); | ||
| 231 | } | ||
| 220 | return cpu_m; | 232 | return cpu_m; |
| 221 | } | 233 | } |
| 222 | 234 | ||
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index e79c54034bcd..b7c78a403dc2 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c | |||
| @@ -850,6 +850,19 @@ static void kvm_mmu_notifier_invalidate_page(struct mmu_notifier *mn, | |||
| 850 | 850 | ||
| 851 | } | 851 | } |
| 852 | 852 | ||
| 853 | static void kvm_mmu_notifier_change_pte(struct mmu_notifier *mn, | ||
| 854 | struct mm_struct *mm, | ||
| 855 | unsigned long address, | ||
| 856 | pte_t pte) | ||
| 857 | { | ||
| 858 | struct kvm *kvm = mmu_notifier_to_kvm(mn); | ||
| 859 | |||
| 860 | spin_lock(&kvm->mmu_lock); | ||
| 861 | kvm->mmu_notifier_seq++; | ||
| 862 | kvm_set_spte_hva(kvm, address, pte); | ||
| 863 | spin_unlock(&kvm->mmu_lock); | ||
| 864 | } | ||
| 865 | |||
| 853 | static void kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn, | 866 | static void kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn, |
| 854 | struct mm_struct *mm, | 867 | struct mm_struct *mm, |
| 855 | unsigned long start, | 868 | unsigned long start, |
| @@ -929,6 +942,7 @@ static const struct mmu_notifier_ops kvm_mmu_notifier_ops = { | |||
| 929 | .invalidate_range_start = kvm_mmu_notifier_invalidate_range_start, | 942 | .invalidate_range_start = kvm_mmu_notifier_invalidate_range_start, |
| 930 | .invalidate_range_end = kvm_mmu_notifier_invalidate_range_end, | 943 | .invalidate_range_end = kvm_mmu_notifier_invalidate_range_end, |
| 931 | .clear_flush_young = kvm_mmu_notifier_clear_flush_young, | 944 | .clear_flush_young = kvm_mmu_notifier_clear_flush_young, |
| 945 | .change_pte = kvm_mmu_notifier_change_pte, | ||
| 932 | .release = kvm_mmu_notifier_release, | 946 | .release = kvm_mmu_notifier_release, |
| 933 | }; | 947 | }; |
| 934 | #endif /* CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER */ | 948 | #endif /* CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER */ |
